hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
37e068d75868e79c883a445cae686d62c7b7a6fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void init(unsigned int seed, hiprandState_t* states) { /* we have to initialize the state */ hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ threadIdx.x, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[threadIdx.x]); }
37e068d75868e79c883a445cae686d62c7b7a6fe.cu
#include "includes.h" __global__ void init(unsigned int seed, curandState_t* states) { /* we have to initialize the state */ curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ threadIdx.x, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[threadIdx.x]); }
fb2ad87be5d40f06c1389de96f76751cd14fa509.hip
// !!! This is a file automatically generated by hipify!!! /** * CUDA-implemented utility functions & kernels needed by the neural net * @author Aadyot Bhatngar * @date April 22, 2018 */ #include "utils.cuh" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <algorithm> #include "helper_cuda.h" // CUDA block width #define BW 1024 /** * Sets all entries in a device buffer of floats equal to a specified value. */ template<typename T> void cudaMemsetType(T *dev_ptr, T val, int n_vals) { thrust::device_ptr<T> thrust_dev_ptr(dev_ptr); thrust::fill(thrust_dev_ptr, thrust_dev_ptr + n_vals, val); } /** * Invokes a CUDA kernel to compute the average cross entropy between softmaxed * predictions pred_Y and ground truth true_Y. * * @param pred_Y predictions made by model (probability vectors) * @param true_Y true output values (one-hot vectors) * @param n number of predictions * @param c number of channels per prediction * @param h height of each prediction * @param w width of each prediction * * @return cross-entropy loss between pred_Y and true_Y */ float CrossEntropyLoss(float* pred_Y, float* true_Y, int n, int c, int h, int w) { // Inialize loss on the device to be zero float loss, *d_loss; CUDA_CALL( hipMalloc(&d_loss, sizeof(float)) ); cudaMemsetType<float>(d_loss, 0.0, 1); // Accumulate the total loss on the device by invoking a kernel int n_blocks = ::min(65535, (n * c * h * w + BW - 1) / BW); // TODO (set 5): call CrossEntropyKernel hipLaunchKernelGGL(( CrossEntropyKernel), dim3(n_blocks), dim3(BW), BW*sizeof(float), 0, pred_Y, true_Y, d_loss, n, c, h, w); // Copy back the accumulated loss on the device back to the host CUDA_CALL( hipMemcpy(&loss, d_loss, sizeof(float), hipMemcpyDeviceToHost) ); CUDA_CALL( hipFree(d_loss) ); // Return the average loss return loss; } /** * Invokes a CUDA kernel to compute the average accuracy of softmaxed predictions * pred_Y, given ground truth true_Y. * * @param pred_Y predictions made by model (probability vectors) * @param true_Y true output values (one-hot vectors) * @param n number of predictions * @param c number of channels per prediction * @param h height of each prediction * @param w width of each prediction * * @return proportion of n for which the maximum entry in pred_Y (most probable * class predicted) is the same as the one entry in true_Y (true class) */ float SoftThresholdAccuracy(float* pred_Y, float* true_Y, int n, int c, int h, int w) { // Initialize the accuracy on the device to be zero float acc, *d_acc; CUDA_CALL( hipMalloc(&d_acc, sizeof(float)) ); cudaMemsetType<float>(d_acc, 0.0, 1); // Accumulate the total loss on the device by invoking a kernel int n_blocks = ::min(65535, (n * c * h * w + BW - 1) / BW); hipLaunchKernelGGL(( SoftThresholdAccKernel), dim3(n_blocks), dim3(BW), BW * sizeof(float), 0, pred_Y, true_Y, d_acc, n, c, h, w); // Copy back the accumulated accuracy on the device back to the host CUDA_CALL(hipMemcpy(&acc, d_acc, sizeof(float), hipMemcpyDeviceToHost)); CUDA_CALL(hipFree(d_acc)); // Return the average accuracy return acc / static_cast<float>(n); } /** * Kernel to compute cross-entropy between pred_Y and true_Y as described by * {\link CrossEntropyLoss}. */ __global__ void CrossEntropyKernel(float* pred_Y, float* true_Y, float *loss, int n, int c, int h, int w) { extern __shared__ float shmem[]; // TODO (set 5): use a parallel reduction to compute cross-entropy between // pred_Y and true_Y, i.e. -sum( log(pred_Y[i]) * true_Y[i] ), // where i ranges from 0 to (n*c*h*w) - 1 int tid = blockDim.x*blockIdx.x + threadIdx.x; const int local_tid = threadIdx.x; shmem[local_tid] = log(pred_Y[tid])*true_Y[tid]; tid += gridDim.x*blockDim.x; while (tid < (n*c*h*w) ){ shmem[local_tid] += log(pred_Y[tid])*true_Y[tid]; tid += gridDim.x*blockDim.x; } __syncthreads(); for (int s = blockDim.x/2; s > 0; s /= 2){ if (local_tid < s){ shmem[local_tid] += log(pred_Y[local_tid+s])*true_Y[local_tid+s]; } __syncthreads(); } // atomically add the accumulated loss per block into the global accumulator if (threadIdx.x == 0) atomicAdd(loss, shmem[0] / static_cast<float>(n)); } /** * Kernel to compute accuracy of pred_Y given ground truth true_Y as described * by {\link SoftThresholdAccuracy}. */ __global__ void SoftThresholdAccKernel(float* pred_Y, float* true_Y, float* acc, int n, int c, int h, int w) { extern __shared__ float shmem[]; unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned tid = threadIdx.x; // have each thread in each block accumulate some of the total loss in // shared memory shmem[tid] = 0.0; for (; idx < n; idx += blockDim.x * gridDim.x) { unsigned idx_cur = idx * c * h * w; // Determine which copmonent/element of the current prediction vector // and its corresponding ground truth is largest unsigned argmax_pred = 0, argmax_true = 0; for (unsigned j = 0; j < c * h * w; ++j) { if (pred_Y[idx_cur + argmax_pred] < pred_Y[idx_cur + j]) argmax_pred = j; if (true_Y[idx_cur + argmax_true] < true_Y[idx_cur + j]) argmax_true = j; } // If we were correct, add 1 to the accuracy count if (argmax_pred == argmax_true) shmem[tid] += 1.0; } __syncthreads(); // do a reduction to sum up all of the accuracy components in this block's // shared memory for (unsigned s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) shmem[tid] += shmem[tid + s]; __syncthreads(); } // atomically add the accumulated accuracy per block into the global accumulator if (tid == 0) atomicAdd(acc, shmem[tid]); }
fb2ad87be5d40f06c1389de96f76751cd14fa509.cu
/** * CUDA-implemented utility functions & kernels needed by the neural net * @author Aadyot Bhatngar * @date April 22, 2018 */ #include "utils.cuh" #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <algorithm> #include "helper_cuda.h" // CUDA block width #define BW 1024 /** * Sets all entries in a device buffer of floats equal to a specified value. */ template<typename T> void cudaMemsetType(T *dev_ptr, T val, int n_vals) { thrust::device_ptr<T> thrust_dev_ptr(dev_ptr); thrust::fill(thrust_dev_ptr, thrust_dev_ptr + n_vals, val); } /** * Invokes a CUDA kernel to compute the average cross entropy between softmaxed * predictions pred_Y and ground truth true_Y. * * @param pred_Y predictions made by model (probability vectors) * @param true_Y true output values (one-hot vectors) * @param n number of predictions * @param c number of channels per prediction * @param h height of each prediction * @param w width of each prediction * * @return cross-entropy loss between pred_Y and true_Y */ float CrossEntropyLoss(float* pred_Y, float* true_Y, int n, int c, int h, int w) { // Inialize loss on the device to be zero float loss, *d_loss; CUDA_CALL( cudaMalloc(&d_loss, sizeof(float)) ); cudaMemsetType<float>(d_loss, 0.0, 1); // Accumulate the total loss on the device by invoking a kernel int n_blocks = std::min(65535, (n * c * h * w + BW - 1) / BW); // TODO (set 5): call CrossEntropyKernel CrossEntropyKernel<<<n_blocks, BW, BW*sizeof(float)>>>(pred_Y, true_Y, d_loss, n, c, h, w); // Copy back the accumulated loss on the device back to the host CUDA_CALL( cudaMemcpy(&loss, d_loss, sizeof(float), cudaMemcpyDeviceToHost) ); CUDA_CALL( cudaFree(d_loss) ); // Return the average loss return loss; } /** * Invokes a CUDA kernel to compute the average accuracy of softmaxed predictions * pred_Y, given ground truth true_Y. * * @param pred_Y predictions made by model (probability vectors) * @param true_Y true output values (one-hot vectors) * @param n number of predictions * @param c number of channels per prediction * @param h height of each prediction * @param w width of each prediction * * @return proportion of n for which the maximum entry in pred_Y (most probable * class predicted) is the same as the one entry in true_Y (true class) */ float SoftThresholdAccuracy(float* pred_Y, float* true_Y, int n, int c, int h, int w) { // Initialize the accuracy on the device to be zero float acc, *d_acc; CUDA_CALL( cudaMalloc(&d_acc, sizeof(float)) ); cudaMemsetType<float>(d_acc, 0.0, 1); // Accumulate the total loss on the device by invoking a kernel int n_blocks = std::min(65535, (n * c * h * w + BW - 1) / BW); SoftThresholdAccKernel<<<n_blocks, BW, BW * sizeof(float)>>>(pred_Y, true_Y, d_acc, n, c, h, w); // Copy back the accumulated accuracy on the device back to the host CUDA_CALL(cudaMemcpy(&acc, d_acc, sizeof(float), cudaMemcpyDeviceToHost)); CUDA_CALL(cudaFree(d_acc)); // Return the average accuracy return acc / static_cast<float>(n); } /** * Kernel to compute cross-entropy between pred_Y and true_Y as described by * {\link CrossEntropyLoss}. */ __global__ void CrossEntropyKernel(float* pred_Y, float* true_Y, float *loss, int n, int c, int h, int w) { extern __shared__ float shmem[]; // TODO (set 5): use a parallel reduction to compute cross-entropy between // pred_Y and true_Y, i.e. -sum( log(pred_Y[i]) * true_Y[i] ), // where i ranges from 0 to (n*c*h*w) - 1 int tid = blockDim.x*blockIdx.x + threadIdx.x; const int local_tid = threadIdx.x; shmem[local_tid] = log(pred_Y[tid])*true_Y[tid]; tid += gridDim.x*blockDim.x; while (tid < (n*c*h*w) ){ shmem[local_tid] += log(pred_Y[tid])*true_Y[tid]; tid += gridDim.x*blockDim.x; } __syncthreads(); for (int s = blockDim.x/2; s > 0; s /= 2){ if (local_tid < s){ shmem[local_tid] += log(pred_Y[local_tid+s])*true_Y[local_tid+s]; } __syncthreads(); } // atomically add the accumulated loss per block into the global accumulator if (threadIdx.x == 0) atomicAdd(loss, shmem[0] / static_cast<float>(n)); } /** * Kernel to compute accuracy of pred_Y given ground truth true_Y as described * by {\link SoftThresholdAccuracy}. */ __global__ void SoftThresholdAccKernel(float* pred_Y, float* true_Y, float* acc, int n, int c, int h, int w) { extern __shared__ float shmem[]; unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned tid = threadIdx.x; // have each thread in each block accumulate some of the total loss in // shared memory shmem[tid] = 0.0; for (; idx < n; idx += blockDim.x * gridDim.x) { unsigned idx_cur = idx * c * h * w; // Determine which copmonent/element of the current prediction vector // and its corresponding ground truth is largest unsigned argmax_pred = 0, argmax_true = 0; for (unsigned j = 0; j < c * h * w; ++j) { if (pred_Y[idx_cur + argmax_pred] < pred_Y[idx_cur + j]) argmax_pred = j; if (true_Y[idx_cur + argmax_true] < true_Y[idx_cur + j]) argmax_true = j; } // If we were correct, add 1 to the accuracy count if (argmax_pred == argmax_true) shmem[tid] += 1.0; } __syncthreads(); // do a reduction to sum up all of the accuracy components in this block's // shared memory for (unsigned s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) shmem[tid] += shmem[tid + s]; __syncthreads(); } // atomically add the accumulated accuracy per block into the global accumulator if (tid == 0) atomicAdd(acc, shmem[tid]); }
838e7aadd69933685c640bfe1ead6279778d4b51.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #include <rocm_smi/rocm_smi.h> #include <assert.h> #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; double * __tilevar_0__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X; double * __tilevar_1__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X; double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X; double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X; double t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f, out = 0.0f; double b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f; int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-8); int __iter_y__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)); // Initialize the values int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_1__[__iter_3__-__iter_0__] = 0.0f; __tilevar_2__[__iter_3__-__iter_0__] = 0.0f; __tilevar_3__[__iter_3__-__iter_0__] = 0.0f; } // Initial loop for (int __iter_1__ = FORMA_MAX(0,__iter_y__-4); __iter_1__ <= __iter_y__+3; __iter_1__++) { if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)]; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))) { // Bottom double __temp_2__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t2 += __temp_10__; // Mid double __temp_13__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b2 += __temp_22__; // Top double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_1__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))) { // Bottom double __temp_2__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t3 += __temp_10__; // Mid double __temp_13__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b3 += __temp_22__; // Top double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_2__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))) { // Bottom double __temp_2__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t4 += __temp_10__; // Mid double __temp_13__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b4 += __temp_22__; // Top double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_3__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2))) { // Bottom double __temp_2__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t5 += __temp_10__; // Mid double __temp_13__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b5 += __temp_22__; // Top double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; out += __temp_34__; } __syncthreads (); // Now rotate __tilevar_1__[__iter_3__-__iter_0__] = b2; b2 = t2; t2 = 0.0f; __tilevar_2__[__iter_3__-__iter_0__] = b3; b3 = t3; t3 = 0.0f; __tilevar_3__[__iter_3__-__iter_0__] = b4; b4 = t4; t4 = 0.0f; out= b5; b5 = t5; t5 = 0.0f; } // Rest of the computation __syncthreads (); for (int __iter_1__ = __iter_y__+4; __iter_1__ <= FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+3); __iter_1__++) { if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)]; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))) { // Bottom double __temp_2__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t2 += __temp_10__; // Mid double __temp_13__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b2 += __temp_22__; // Top double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_1__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))) { // Bottom double __temp_2__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t3 += __temp_10__; // Mid double __temp_13__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b3 += __temp_22__; // Top double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_2__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))) { // Bottom double __temp_2__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t4 += __temp_10__; // Mid double __temp_13__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b4 += __temp_22__; // Top double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_3__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2))) { // Bottom double __temp_2__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t5 += __temp_10__; // Mid double __temp_13__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b5 += __temp_22__; // Top double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; out += __temp_34__; __var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-4,0)] = out; } __syncthreads (); // Now rotate __tilevar_1__[__iter_3__-__iter_0__] = b2; b2 = t2; t2 = 0.0f; __tilevar_2__[__iter_3__-__iter_0__] = b3; b3 = t3; t3 = 0.0f; __tilevar_3__[__iter_3__-__iter_0__] = b4; b4 = t4; t4 = 0.0f; out= b5; b5 = t5; t5 = 0.0f; } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(double * h_input, int N, int M, double * __var_0__){ /* Host allocation Begin */ double * input; hipMalloc(&input,sizeof(double)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input); } double * __var_1__; hipMalloc(&__var_1__,sizeof(double)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = M; int __size_1___kernel___forma_kernel__0__ = N; int __block_0___kernel___forma_kernel__0__ = 128; int __block_1___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__size_0___kernel___forma_kernel__0__/64); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); unsigned int power1, power2; rsmi_status_t result; uint32_t device; nvmlEnableState_t mode; result=nvmlInit(); result = nvmlDeviceGetHandleByIndex(0, &device); assert(RSMI_STATUS_SUCCESS == result); result=nvmlDeviceGetPowerManagementMode(device, &mode); printf("enabled = %d\n", mode); result=nvmlDeviceGetPowerUsage(device,&power1); assert(RSMI_STATUS_SUCCESS == result); hipDeviceSynchronize(); for (int x=0; x<1000; x++) { hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_0___kernel___forma_kernel__0__/64, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); } hipDeviceSynchronize(); result=nvmlDeviceGetPowerUsage(device,&power2); assert(RSMI_STATUS_SUCCESS == result); power2 -= power1; printf("%u\n", power2); nvmlShutdown(); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); } /*Host Free End*/
838e7aadd69933685c640bfe1ead6279778d4b51.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #include <nvml.h> #include <assert.h> #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); __kernel_init__<<<init_grid,init_block>>>(d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; double * __tilevar_0__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X; double * __tilevar_1__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X; double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X; double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*FORMA_BLOCKDIM_X; double t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f, out = 0.0f; double b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f; int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-8); int __iter_y__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)); // Initialize the values int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_1__[__iter_3__-__iter_0__] = 0.0f; __tilevar_2__[__iter_3__-__iter_0__] = 0.0f; __tilevar_3__[__iter_3__-__iter_0__] = 0.0f; } // Initial loop for (int __iter_1__ = FORMA_MAX(0,__iter_y__-4); __iter_1__ <= __iter_y__+3; __iter_1__++) { if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)]; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))) { // Bottom double __temp_2__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t2 += __temp_10__; // Mid double __temp_13__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b2 += __temp_22__; // Top double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_1__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))) { // Bottom double __temp_2__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t3 += __temp_10__; // Mid double __temp_13__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b3 += __temp_22__; // Top double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_2__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))) { // Bottom double __temp_2__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t4 += __temp_10__; // Mid double __temp_13__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b4 += __temp_22__; // Top double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_3__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2))) { // Bottom double __temp_2__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t5 += __temp_10__; // Mid double __temp_13__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b5 += __temp_22__; // Top double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; out += __temp_34__; } __syncthreads (); // Now rotate __tilevar_1__[__iter_3__-__iter_0__] = b2; b2 = t2; t2 = 0.0f; __tilevar_2__[__iter_3__-__iter_0__] = b3; b3 = t3; t3 = 0.0f; __tilevar_3__[__iter_3__-__iter_0__] = b4; b4 = t4; t4 = 0.0f; out= b5; b5 = t5; t5 = 0.0f; } // Rest of the computation __syncthreads (); for (int __iter_1__ = __iter_y__+4; __iter_1__ <= FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+3); __iter_1__++) { if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)]; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))) { // Bottom double __temp_2__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t2 += __temp_10__; // Mid double __temp_13__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b2 += __temp_22__; // Top double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_1__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))) { // Bottom double __temp_2__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t3 += __temp_10__; // Mid double __temp_13__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b3 += __temp_22__; // Top double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_2__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))) { // Bottom double __temp_2__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t4 += __temp_10__; // Mid double __temp_13__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b4 += __temp_22__; // Top double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; __tilevar_3__[__iter_3__-__iter_0__] += __temp_34__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2))) { // Bottom double __temp_2__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__); double __temp_9__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_10__ = (__temp_6__ + 9 * __temp_9__) / 118; t5 += __temp_10__; // Mid double __temp_13__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_17__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_18__ = (12 * __temp_13__ + 15 * __temp_17__); double __temp_21__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_22__ = (__temp_18__ + 12 * __temp_21__) / 118; b5 += __temp_22__; // Top double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]); double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_30__ = (9 * __temp_25__ + 5 * __temp_29__); double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_34__ = (__temp_30__ + 7 * __temp_33__) / 118; out += __temp_34__; __var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-4,0)] = out; } __syncthreads (); // Now rotate __tilevar_1__[__iter_3__-__iter_0__] = b2; b2 = t2; t2 = 0.0f; __tilevar_2__[__iter_3__-__iter_0__] = b3; b3 = t3; t3 = 0.0f; __tilevar_3__[__iter_3__-__iter_0__] = b4; b4 = t4; t4 = 0.0f; out= b5; b5 = t5; t5 = 0.0f; } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(double * h_input, int N, int M, double * __var_0__){ /* Host allocation Begin */ double * input; cudaMalloc(&input,sizeof(double)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input); } double * __var_1__; cudaMalloc(&__var_1__,sizeof(double)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = M; int __size_1___kernel___forma_kernel__0__ = N; int __block_0___kernel___forma_kernel__0__ = 128; int __block_1___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__size_0___kernel___forma_kernel__0__/64); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); unsigned int power1, power2; nvmlReturn_t result; nvmlDevice_t device; nvmlEnableState_t mode; result=nvmlInit(); result = nvmlDeviceGetHandleByIndex(0, &device); assert(NVML_SUCCESS == result); result=nvmlDeviceGetPowerManagementMode(device, &mode); printf("enabled = %d\n", mode); result=nvmlDeviceGetPowerUsage(device,&power1); assert(NVML_SUCCESS == result); cudaDeviceSynchronize(); for (int x=0; x<1000; x++) { __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_0___kernel___forma_kernel__0__/64, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); } cudaDeviceSynchronize(); result=nvmlDeviceGetPowerUsage(device,&power2); assert(NVML_SUCCESS == result); power2 -= power1; printf("%u\n", power2); nvmlShutdown(); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); } /*Host Free End*/
fc427bc525c5493a2bb0f463e8af0e9d36bd0803.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "codegen/embedding_backward_template_helpers.cuh" using namespace at; using namespace fbgemm_gpu; enum class BoundsCheckMode { FATAL = 0, WARNING = 1, IGNORE = 2, }; DEVICE_INLINE int64_t gpuAtomicIncrement(int64_t* p) { static_assert( sizeof(int64_t) == sizeof(unsigned long long), "expected int64_t to be unsigned long long"); return static_cast<int64_t>(atomicAdd( reinterpret_cast<unsigned long long int*>(p), static_cast<unsigned long long int>(1))); } template <typename index_t> __global__ void bounds_check_indices_kernel( const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> rows_per_table, at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> indices, const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> offsets, int64_t bounds_check_mode_, at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> warning, FixedDivisor fd) { int32_t T = rows_per_table.size(0); int32_t B = (offsets.size(0) - 1) / T; int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y; int32_t b; // = b_t % B; int32_t t; // = b_t / B; fd.DivMod(b_t, &t, &b); if (t >= T) { return; } auto bounds_check_mode = static_cast<BoundsCheckMode>(bounds_check_mode_); auto num_rows = rows_per_table[t]; auto indices_start = offsets[t * B + b]; auto indices_end = offsets[t * B + b + 1]; auto L = indices_end - indices_start; for (auto i = threadIdx.x; i < L; i += fbgemm_gpu::kWarpSize) { auto idx = indices[indices_start + i]; if (idx == -1) { // -1 indicates pruned rows. continue; } if (bounds_check_mode == BoundsCheckMode::FATAL) { CUDA_KERNEL_ASSERT(idx >= 0 && "Failed idx >= 0 in bounds_check_indices"); CUDA_KERNEL_ASSERT(idx < num_rows && "Failed idx < num_rows in bounds_check_indices"); } else if (bounds_check_mode == BoundsCheckMode::WARNING) { if (idx < 0 || idx >= num_rows) { if (gpuAtomicIncrement(&warning[0]) == 0) { printf( "EmbeddingBoundsCheck: (at least one) Out of bounds access for batch: %lld, table: %lld, bag element: %lld, idx: %lld, num_rows: %lld. Setting idx to zero.\n", int64_t(b), int64_t(t), int64_t(i), int64_t(idx), num_rows); } indices[indices_start + i] = 0; } } else if (bounds_check_mode == BoundsCheckMode::IGNORE) { if (idx < 0 || idx >= num_rows) { indices[indices_start + i] = 0; } } } } void bounds_check_indices_cuda( Tensor rows_per_table, Tensor indices, Tensor offsets, int64_t bounds_check_mode_, Tensor warning) { int32_t T = rows_per_table.size(0); int32_t B = (offsets.size(0) - 1) / T; if (B == 0 || T == 0) { return; } auto bounds_check_mode = static_cast<BoundsCheckMode>(bounds_check_mode_); if (bounds_check_mode == BoundsCheckMode::WARNING) { warning.zero_(); } constexpr size_t kNumThreads = 256; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "bounds_check_indices", [&]() { hipLaunchKernelGGL(( bounds_check_indices_kernel<index_t>) , dim3(div_round_up(B * T, kNumThreads / fbgemm_gpu::kWarpSize)), dim3(dim3(fbgemm_gpu::kWarpSize, kNumThreads / fbgemm_gpu::kWarpSize)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), rows_per_table .packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), bounds_check_mode_, warning.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), FixedDivisor(B)); }); C10_HIP_KERNEL_LAUNCH_CHECK(); }
fc427bc525c5493a2bb0f463e8af0e9d36bd0803.cu
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "codegen/embedding_backward_template_helpers.cuh" using namespace at; using namespace fbgemm_gpu; enum class BoundsCheckMode { FATAL = 0, WARNING = 1, IGNORE = 2, }; DEVICE_INLINE int64_t gpuAtomicIncrement(int64_t* p) { static_assert( sizeof(int64_t) == sizeof(unsigned long long), "expected int64_t to be unsigned long long"); return static_cast<int64_t>(atomicAdd( reinterpret_cast<unsigned long long int*>(p), static_cast<unsigned long long int>(1))); } template <typename index_t> __global__ void bounds_check_indices_kernel( const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> rows_per_table, at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> indices, const at::PackedTensorAccessor32<index_t, 1, at::RestrictPtrTraits> offsets, int64_t bounds_check_mode_, at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> warning, FixedDivisor fd) { int32_t T = rows_per_table.size(0); int32_t B = (offsets.size(0) - 1) / T; int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y; int32_t b; // = b_t % B; int32_t t; // = b_t / B; fd.DivMod(b_t, &t, &b); if (t >= T) { return; } auto bounds_check_mode = static_cast<BoundsCheckMode>(bounds_check_mode_); auto num_rows = rows_per_table[t]; auto indices_start = offsets[t * B + b]; auto indices_end = offsets[t * B + b + 1]; auto L = indices_end - indices_start; for (auto i = threadIdx.x; i < L; i += fbgemm_gpu::kWarpSize) { auto idx = indices[indices_start + i]; if (idx == -1) { // -1 indicates pruned rows. continue; } if (bounds_check_mode == BoundsCheckMode::FATAL) { CUDA_KERNEL_ASSERT(idx >= 0 && "Failed idx >= 0 in bounds_check_indices"); CUDA_KERNEL_ASSERT(idx < num_rows && "Failed idx < num_rows in bounds_check_indices"); } else if (bounds_check_mode == BoundsCheckMode::WARNING) { if (idx < 0 || idx >= num_rows) { if (gpuAtomicIncrement(&warning[0]) == 0) { printf( "EmbeddingBoundsCheck: (at least one) Out of bounds access for batch: %lld, table: %lld, bag element: %lld, idx: %lld, num_rows: %lld. Setting idx to zero.\n", int64_t(b), int64_t(t), int64_t(i), int64_t(idx), num_rows); } indices[indices_start + i] = 0; } } else if (bounds_check_mode == BoundsCheckMode::IGNORE) { if (idx < 0 || idx >= num_rows) { indices[indices_start + i] = 0; } } } } void bounds_check_indices_cuda( Tensor rows_per_table, Tensor indices, Tensor offsets, int64_t bounds_check_mode_, Tensor warning) { int32_t T = rows_per_table.size(0); int32_t B = (offsets.size(0) - 1) / T; if (B == 0 || T == 0) { return; } auto bounds_check_mode = static_cast<BoundsCheckMode>(bounds_check_mode_); if (bounds_check_mode == BoundsCheckMode::WARNING) { warning.zero_(); } constexpr size_t kNumThreads = 256; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "bounds_check_indices", [&]() { bounds_check_indices_kernel<index_t> <<<div_round_up(B * T, kNumThreads / fbgemm_gpu::kWarpSize), dim3(fbgemm_gpu::kWarpSize, kNumThreads / fbgemm_gpu::kWarpSize), 0, at::cuda::getCurrentCUDAStream()>>>( rows_per_table .packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), indices.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), offsets.packed_accessor32<index_t, 1, at::RestrictPtrTraits>(), bounds_check_mode_, warning.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(), FixedDivisor(B)); }); C10_CUDA_KERNEL_LAUNCH_CHECK(); }
27cd3d95c1d77c98270a914489c66f8f89518863.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> using namespace std; #define N 20 #define BLOCK_DIM 10 void random_inits(int a[N][N]){ for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ a[i][j] = rand() % 10; } } } __global__ void add(int a[N][N], int b[N][N], int c[N][N]){ int i = threadIdx.x; int j = threadIdx.y; c[i][j] = a[i][j] + b[i][j]; } void show(int a[N][N], int b[N][N], int c[N][N]){ for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ cout << "matrix[" << i << "][" << j << "]" << " = " << a[i][j] << " + " << b[i][j] << "=" << c[i][j] <<"\t"; } cout << endl; } } int main(void){ int a[N][N]; int b[N][N]; int c[N][N]; random_inits(a); random_inits(b); int (*d_a)[N], (*d_b)[N], (*d_c)[N]; hipMalloc((void**)&d_a, (N*N)*sizeof(int)); hipMalloc((void**)&d_b, (N*N)*sizeof(int)); hipMalloc((void**)&d_c, (N*N)*sizeof(int)); hipMemcpy(d_a, a, (N*N)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, b, (N*N)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_c, c, (N*N)*sizeof(int), hipMemcpyHostToDevice); int numBlocks = 1; dim3 threadsPerBlock(N,N); hipLaunchKernelGGL(( add), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_a,d_b,d_c); hipMemcpy(c, d_c, (N*N)*sizeof(int), hipMemcpyDeviceToHost); show(a, b, c); hipFree(d_a); hipFree(d_b); hipFree(d_c); cout<< endl; return 0; }
27cd3d95c1d77c98270a914489c66f8f89518863.cu
#include <iostream> #include <cuda_runtime.h> using namespace std; #define N 20 #define BLOCK_DIM 10 void random_inits(int a[N][N]){ for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ a[i][j] = rand() % 10; } } } __global__ void add(int a[N][N], int b[N][N], int c[N][N]){ int i = threadIdx.x; int j = threadIdx.y; c[i][j] = a[i][j] + b[i][j]; } void show(int a[N][N], int b[N][N], int c[N][N]){ for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ cout << "matrix[" << i << "][" << j << "]" << " = " << a[i][j] << " + " << b[i][j] << "=" << c[i][j] <<"\t"; } cout << endl; } } int main(void){ int a[N][N]; int b[N][N]; int c[N][N]; random_inits(a); random_inits(b); int (*d_a)[N], (*d_b)[N], (*d_c)[N]; cudaMalloc((void**)&d_a, (N*N)*sizeof(int)); cudaMalloc((void**)&d_b, (N*N)*sizeof(int)); cudaMalloc((void**)&d_c, (N*N)*sizeof(int)); cudaMemcpy(d_a, a, (N*N)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, (N*N)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, (N*N)*sizeof(int), cudaMemcpyHostToDevice); int numBlocks = 1; dim3 threadsPerBlock(N,N); add<<<numBlocks,threadsPerBlock>>>(d_a,d_b,d_c); cudaMemcpy(c, d_c, (N*N)*sizeof(int), cudaMemcpyDeviceToHost); show(a, b, c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cout<< endl; return 0; }
5f53b523d210e57dbccc337bc236797601937cad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! \file h_correction_3D_cuda.cu * \brief Functions definitions for the H correciton kernels. Written following Sanders et al. 1998. */ #ifdef CUDA #include<cuda.h> #include<math.h> #include"global.h" #include"global_cuda.h" #include"h_correction_3D_cuda.h" /*! \fn void calc_eta_x_3D(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_x, int nx, int ny, int nz, int n_ghost, Real gamma) * \brief When passed the left and right boundary values at an interface, calculates the eta value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_eta_x_3D(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_x, int nx, int ny, int nz, int n_ghost, Real gamma) { int n_cells = nx*ny*nz; // declare primative variables for each stencil // these will be placed into registers for each thread Real pl, pr, al, ar; // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; // x-direction if (xid > n_ghost-2 && xid < nx-n_ghost && yid > n_ghost-2 && yid < ny-n_ghost+1 && zid > n_ghost-2 && zid < nz-n_ghost+1) { // load the interface values into registers id = xid + yid*nx + zid*nx*ny; pl = (dev_bounds_L[4*n_cells + id] - 0.5*(dev_bounds_L[ n_cells+id]*dev_bounds_L[ n_cells+id] + dev_bounds_L[2*n_cells+id]*dev_bounds_L[2*n_cells+id] + dev_bounds_L[3*n_cells+id]*dev_bounds_L[3*n_cells+id])/dev_bounds_L[id]) * (gamma - 1.0); pl = fmax(pl, (Real) 1.0e-20); pr = (dev_bounds_R[4*n_cells + id] - 0.5*(dev_bounds_R[ n_cells+id]*dev_bounds_R[ n_cells+id] + dev_bounds_R[2*n_cells+id]*dev_bounds_R[2*n_cells+id] + dev_bounds_R[3*n_cells+id]*dev_bounds_R[3*n_cells+id])/dev_bounds_R[id]) * (gamma - 1.0); pr = fmax(pr, (Real) 1.0e-20); al = sqrt(gamma*pl/dev_bounds_L[id]); ar = sqrt(gamma*pl/dev_bounds_R[id]); eta_x[id] = 0.5*fabs((dev_bounds_R[n_cells+id]/dev_bounds_R[id] + ar) - (dev_bounds_L[n_cells+id]/dev_bounds_L[id] - al)); } } /*! \fn void calc_eta_y(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_y, int nx, int ny, int nz, int n_ghost, Real gamma) * \brief When passed the left and right boundary values at an interface, calculates the eta value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_eta_y_3D(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_y, int nx, int ny, int nz, int n_ghost, Real gamma) { int n_cells = nx*ny*nz; // declare primative variables for each stencil // these will be placed into registers for each thread Real pl, pr, al, ar; // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; // y-direction if (yid > n_ghost-2 && yid < ny-n_ghost && xid > n_ghost-2 && xid < nx-n_ghost+1 && zid > n_ghost-2 && zid < nz-n_ghost+1) { // load the interface values into registers id = xid + yid*nx + zid*nx*ny; pl = (dev_bounds_L[4*n_cells + id] - 0.5*(dev_bounds_L[2*n_cells+id]*dev_bounds_L[2*n_cells+id] + dev_bounds_L[3*n_cells+id]*dev_bounds_L[3*n_cells+id] + dev_bounds_L[ n_cells+id]*dev_bounds_L[ n_cells+id])/dev_bounds_L[id]) * (gamma - 1.0); pl = fmax(pl, (Real) 1.0e-20); pr = (dev_bounds_R[4*n_cells + id] - 0.5*(dev_bounds_R[2*n_cells+id]*dev_bounds_R[2*n_cells+id] + dev_bounds_R[3*n_cells+id]*dev_bounds_R[3*n_cells+id] + dev_bounds_R[ n_cells+id]*dev_bounds_R[ n_cells+id])/dev_bounds_R[id]) * (gamma - 1.0); pr = fmax(pr, (Real) 1.0e-20); al = sqrt(gamma*pl/dev_bounds_L[id]); ar = sqrt(gamma*pl/dev_bounds_R[id]); eta_y[id] = 0.5*fabs((dev_bounds_R[2*n_cells+id]/dev_bounds_R[id] + ar) - (dev_bounds_L[2*n_cells+id]/dev_bounds_L[id] - al)); } } /*! \fn void calc_eta_z(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_z, int nx, int ny, int nz, int n_ghost, Real gamma) * \brief When passed the left and right boundary values at an interface, calculates the eta value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_eta_z_3D(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_z, int nx, int ny, int nz, int n_ghost, Real gamma) { int n_cells = nx*ny*nz; // declare primative variables for each stencil // these will be placed into registers for each thread Real pl, pr, al, ar; // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; // z-direction if (zid > n_ghost-2 && zid < nz-n_ghost && xid > n_ghost-2 && xid < nx-n_ghost+1 && yid > n_ghost-2 && yid < ny-n_ghost+1) { // load the interface values into registers id = xid + yid*nx + zid*nx*ny; pl = (dev_bounds_L[4*n_cells + id] - 0.5*(dev_bounds_L[3*n_cells+id]*dev_bounds_L[3*n_cells+id] + dev_bounds_L[ n_cells+id]*dev_bounds_L[ n_cells+id] + dev_bounds_L[2*n_cells+id]*dev_bounds_L[2*n_cells+id])/dev_bounds_L[id]) * (gamma - 1.0); pl = fmax(pl, (Real) 1.0e-20); pr = (dev_bounds_R[4*n_cells + id] - 0.5*(dev_bounds_R[3*n_cells+id]*dev_bounds_R[3*n_cells+id] + dev_bounds_R[ n_cells+id]*dev_bounds_R[ n_cells+id] + dev_bounds_R[2*n_cells+id]*dev_bounds_R[2*n_cells+id])/dev_bounds_R[id]) * (gamma - 1.0); pr = fmax(pr, (Real) 1.0e-20); al = sqrt(gamma*pl/dev_bounds_L[id]); ar = sqrt(gamma*pl/dev_bounds_R[id]); eta_z[id] = 0.5*fabs((dev_bounds_R[3*n_cells+id]/dev_bounds_R[id] + ar) - (dev_bounds_L[3*n_cells+id]/dev_bounds_L[id] - al)); } } /*! \fn void calc_etah_x_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_x, int nx, int ny, int nz, int n_ghost) * \brief When passed the eta values at every interface, calculates the eta_h value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_etah_x_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_x, int nx, int ny, int nz, int n_ghost) { // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; Real etah; // x-direction if (xid > n_ghost-2 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost) { id = xid + yid*nx + zid*nx*ny; etah = fmax(eta_y[xid + (yid-1)*nx + zid*nx*ny], eta_y[xid+1 + (yid-1)*nx + zid*nx*ny]); etah = fmax(etah, eta_y[id]); etah = fmax(etah, eta_y[xid+1 + yid*nx + zid*nx*ny]); etah = fmax(etah, eta_z[xid + yid*nx + (zid-1)*nx*ny]); etah = fmax(etah, eta_z[xid+1 + yid*nx + (zid-1)*nx*ny]); etah = fmax(etah, eta_z[id]); etah = fmax(etah, eta_z[xid+1 + yid*nx + zid*nx*ny]); etah = fmax(etah, eta_x[id]); etah_x[id] = etah; } } /*! \fn void calc_etah_y_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_y, int nx, int ny, int nz, int n_ghost) * \brief When passed the eta values at every interface, calculates the eta_h value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_etah_y_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_y, int nx, int ny, int nz, int n_ghost) { // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; Real etah; // y-direction if (yid > n_ghost-2 && yid < ny-n_ghost && xid > n_ghost-1 && xid < nx-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost) { id = xid + yid*nx + zid*nx*ny; etah = fmax(eta_z[xid + yid*nx + (zid-1)*nx*ny], eta_z[xid + (yid+1)*nx + (zid-1)*nx*ny]); etah = fmax(etah, eta_z[id]); etah = fmax(etah, eta_z[xid + (yid+1)*nx + zid*nx*ny]); etah = fmax(etah, eta_x[xid-1 + yid*nx + zid*nx*ny]); etah = fmax(etah, eta_x[xid-1 + (yid+1)*nx + zid*nx*ny]); etah = fmax(etah, eta_x[id]); etah = fmax(etah, eta_x[xid + (yid+1)*nx + zid*nx*ny]); etah = fmax(etah, eta_y[id]); etah_y[id] = etah; } } /*! \fn void calc_etah_z_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_z, int nx, int ny, int nz, int n_ghost) * \brief When passed the eta values at every interface, calculates the eta_h value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_etah_z_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_z, int nx, int ny, int nz, int n_ghost) { // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; Real etah; // z-direction if (zid > n_ghost-2 && zid < nz-n_ghost && xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost) { id = xid + yid*nx + zid*nx*ny; etah = fmax(eta_x[xid-1 + yid*nx + zid*nx*ny], eta_x[xid-1 + yid*nx + (zid+1)*nx*ny]); etah = fmax(etah, eta_x[id]); etah = fmax(etah, eta_x[xid + yid*nx + (zid+1)*nx*ny]); etah = fmax(etah, eta_y[xid + (yid-1)*nx + zid*nx*ny]); etah = fmax(etah, eta_y[xid + (yid-1)*nx + (zid+1)*nx*ny]); etah = fmax(etah, eta_y[id]); etah = fmax(etah, eta_y[xid + yid*nx + (zid+1)*nx*ny]); etah = fmax(etah, eta_z[id]); etah_z[id] = etah; } } #endif //CUDA
5f53b523d210e57dbccc337bc236797601937cad.cu
/*! \file h_correction_3D_cuda.cu * \brief Functions definitions for the H correciton kernels. Written following Sanders et al. 1998. */ #ifdef CUDA #include<cuda.h> #include<math.h> #include"global.h" #include"global_cuda.h" #include"h_correction_3D_cuda.h" /*! \fn void calc_eta_x_3D(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_x, int nx, int ny, int nz, int n_ghost, Real gamma) * \brief When passed the left and right boundary values at an interface, calculates the eta value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_eta_x_3D(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_x, int nx, int ny, int nz, int n_ghost, Real gamma) { int n_cells = nx*ny*nz; // declare primative variables for each stencil // these will be placed into registers for each thread Real pl, pr, al, ar; // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; // x-direction if (xid > n_ghost-2 && xid < nx-n_ghost && yid > n_ghost-2 && yid < ny-n_ghost+1 && zid > n_ghost-2 && zid < nz-n_ghost+1) { // load the interface values into registers id = xid + yid*nx + zid*nx*ny; pl = (dev_bounds_L[4*n_cells + id] - 0.5*(dev_bounds_L[ n_cells+id]*dev_bounds_L[ n_cells+id] + dev_bounds_L[2*n_cells+id]*dev_bounds_L[2*n_cells+id] + dev_bounds_L[3*n_cells+id]*dev_bounds_L[3*n_cells+id])/dev_bounds_L[id]) * (gamma - 1.0); pl = fmax(pl, (Real) 1.0e-20); pr = (dev_bounds_R[4*n_cells + id] - 0.5*(dev_bounds_R[ n_cells+id]*dev_bounds_R[ n_cells+id] + dev_bounds_R[2*n_cells+id]*dev_bounds_R[2*n_cells+id] + dev_bounds_R[3*n_cells+id]*dev_bounds_R[3*n_cells+id])/dev_bounds_R[id]) * (gamma - 1.0); pr = fmax(pr, (Real) 1.0e-20); al = sqrt(gamma*pl/dev_bounds_L[id]); ar = sqrt(gamma*pl/dev_bounds_R[id]); eta_x[id] = 0.5*fabs((dev_bounds_R[n_cells+id]/dev_bounds_R[id] + ar) - (dev_bounds_L[n_cells+id]/dev_bounds_L[id] - al)); } } /*! \fn void calc_eta_y(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_y, int nx, int ny, int nz, int n_ghost, Real gamma) * \brief When passed the left and right boundary values at an interface, calculates the eta value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_eta_y_3D(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_y, int nx, int ny, int nz, int n_ghost, Real gamma) { int n_cells = nx*ny*nz; // declare primative variables for each stencil // these will be placed into registers for each thread Real pl, pr, al, ar; // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; // y-direction if (yid > n_ghost-2 && yid < ny-n_ghost && xid > n_ghost-2 && xid < nx-n_ghost+1 && zid > n_ghost-2 && zid < nz-n_ghost+1) { // load the interface values into registers id = xid + yid*nx + zid*nx*ny; pl = (dev_bounds_L[4*n_cells + id] - 0.5*(dev_bounds_L[2*n_cells+id]*dev_bounds_L[2*n_cells+id] + dev_bounds_L[3*n_cells+id]*dev_bounds_L[3*n_cells+id] + dev_bounds_L[ n_cells+id]*dev_bounds_L[ n_cells+id])/dev_bounds_L[id]) * (gamma - 1.0); pl = fmax(pl, (Real) 1.0e-20); pr = (dev_bounds_R[4*n_cells + id] - 0.5*(dev_bounds_R[2*n_cells+id]*dev_bounds_R[2*n_cells+id] + dev_bounds_R[3*n_cells+id]*dev_bounds_R[3*n_cells+id] + dev_bounds_R[ n_cells+id]*dev_bounds_R[ n_cells+id])/dev_bounds_R[id]) * (gamma - 1.0); pr = fmax(pr, (Real) 1.0e-20); al = sqrt(gamma*pl/dev_bounds_L[id]); ar = sqrt(gamma*pl/dev_bounds_R[id]); eta_y[id] = 0.5*fabs((dev_bounds_R[2*n_cells+id]/dev_bounds_R[id] + ar) - (dev_bounds_L[2*n_cells+id]/dev_bounds_L[id] - al)); } } /*! \fn void calc_eta_z(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_z, int nx, int ny, int nz, int n_ghost, Real gamma) * \brief When passed the left and right boundary values at an interface, calculates the eta value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_eta_z_3D(Real *dev_bounds_L, Real *dev_bounds_R, Real *eta_z, int nx, int ny, int nz, int n_ghost, Real gamma) { int n_cells = nx*ny*nz; // declare primative variables for each stencil // these will be placed into registers for each thread Real pl, pr, al, ar; // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; // z-direction if (zid > n_ghost-2 && zid < nz-n_ghost && xid > n_ghost-2 && xid < nx-n_ghost+1 && yid > n_ghost-2 && yid < ny-n_ghost+1) { // load the interface values into registers id = xid + yid*nx + zid*nx*ny; pl = (dev_bounds_L[4*n_cells + id] - 0.5*(dev_bounds_L[3*n_cells+id]*dev_bounds_L[3*n_cells+id] + dev_bounds_L[ n_cells+id]*dev_bounds_L[ n_cells+id] + dev_bounds_L[2*n_cells+id]*dev_bounds_L[2*n_cells+id])/dev_bounds_L[id]) * (gamma - 1.0); pl = fmax(pl, (Real) 1.0e-20); pr = (dev_bounds_R[4*n_cells + id] - 0.5*(dev_bounds_R[3*n_cells+id]*dev_bounds_R[3*n_cells+id] + dev_bounds_R[ n_cells+id]*dev_bounds_R[ n_cells+id] + dev_bounds_R[2*n_cells+id]*dev_bounds_R[2*n_cells+id])/dev_bounds_R[id]) * (gamma - 1.0); pr = fmax(pr, (Real) 1.0e-20); al = sqrt(gamma*pl/dev_bounds_L[id]); ar = sqrt(gamma*pl/dev_bounds_R[id]); eta_z[id] = 0.5*fabs((dev_bounds_R[3*n_cells+id]/dev_bounds_R[id] + ar) - (dev_bounds_L[3*n_cells+id]/dev_bounds_L[id] - al)); } } /*! \fn void calc_etah_x_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_x, int nx, int ny, int nz, int n_ghost) * \brief When passed the eta values at every interface, calculates the eta_h value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_etah_x_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_x, int nx, int ny, int nz, int n_ghost) { // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; Real etah; // x-direction if (xid > n_ghost-2 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost) { id = xid + yid*nx + zid*nx*ny; etah = fmax(eta_y[xid + (yid-1)*nx + zid*nx*ny], eta_y[xid+1 + (yid-1)*nx + zid*nx*ny]); etah = fmax(etah, eta_y[id]); etah = fmax(etah, eta_y[xid+1 + yid*nx + zid*nx*ny]); etah = fmax(etah, eta_z[xid + yid*nx + (zid-1)*nx*ny]); etah = fmax(etah, eta_z[xid+1 + yid*nx + (zid-1)*nx*ny]); etah = fmax(etah, eta_z[id]); etah = fmax(etah, eta_z[xid+1 + yid*nx + zid*nx*ny]); etah = fmax(etah, eta_x[id]); etah_x[id] = etah; } } /*! \fn void calc_etah_y_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_y, int nx, int ny, int nz, int n_ghost) * \brief When passed the eta values at every interface, calculates the eta_h value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_etah_y_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_y, int nx, int ny, int nz, int n_ghost) { // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; Real etah; // y-direction if (yid > n_ghost-2 && yid < ny-n_ghost && xid > n_ghost-1 && xid < nx-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost) { id = xid + yid*nx + zid*nx*ny; etah = fmax(eta_z[xid + yid*nx + (zid-1)*nx*ny], eta_z[xid + (yid+1)*nx + (zid-1)*nx*ny]); etah = fmax(etah, eta_z[id]); etah = fmax(etah, eta_z[xid + (yid+1)*nx + zid*nx*ny]); etah = fmax(etah, eta_x[xid-1 + yid*nx + zid*nx*ny]); etah = fmax(etah, eta_x[xid-1 + (yid+1)*nx + zid*nx*ny]); etah = fmax(etah, eta_x[id]); etah = fmax(etah, eta_x[xid + (yid+1)*nx + zid*nx*ny]); etah = fmax(etah, eta_y[id]); etah_y[id] = etah; } } /*! \fn void calc_etah_z_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_z, int nx, int ny, int nz, int n_ghost) * \brief When passed the eta values at every interface, calculates the eta_h value for the interface according to the forumulation in Sanders et al, 1998. */ __global__ void calc_etah_z_3D(Real *eta_x, Real *eta_y, Real *eta_z, Real *etah_z, int nx, int ny, int nz, int n_ghost) { // get a thread ID int tid = threadIdx.x + blockIdx.x*blockDim.x; int id; int zid = tid / (nx*ny); int yid = (tid - zid*nx*ny) / nx; int xid = tid - zid*nx*ny - yid*nx; Real etah; // z-direction if (zid > n_ghost-2 && zid < nz-n_ghost && xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost) { id = xid + yid*nx + zid*nx*ny; etah = fmax(eta_x[xid-1 + yid*nx + zid*nx*ny], eta_x[xid-1 + yid*nx + (zid+1)*nx*ny]); etah = fmax(etah, eta_x[id]); etah = fmax(etah, eta_x[xid + yid*nx + (zid+1)*nx*ny]); etah = fmax(etah, eta_y[xid + (yid-1)*nx + zid*nx*ny]); etah = fmax(etah, eta_y[xid + (yid-1)*nx + (zid+1)*nx*ny]); etah = fmax(etah, eta_y[id]); etah = fmax(etah, eta_y[xid + yid*nx + (zid+1)*nx*ny]); etah = fmax(etah, eta_z[id]); etah_z[id] = etah; } } #endif //CUDA
66fb1bec6f09abe7b759a842933838b362f16cd8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "add.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *array_a = NULL; hipMalloc(&array_a, XSIZE*YSIZE); float *array_b = NULL; hipMalloc(&array_b, XSIZE*YSIZE); float *array_c = NULL; hipMalloc(&array_c, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, array_a,array_b,array_c,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, array_a,array_b,array_c,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, array_a,array_b,array_c,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
66fb1bec6f09abe7b759a842933838b362f16cd8.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "add.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *array_a = NULL; cudaMalloc(&array_a, XSIZE*YSIZE); float *array_b = NULL; cudaMalloc(&array_b, XSIZE*YSIZE); float *array_c = NULL; cudaMalloc(&array_c, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); add<<<gridBlock,threadBlock>>>(array_a,array_b,array_c,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { add<<<gridBlock,threadBlock>>>(array_a,array_b,array_c,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { add<<<gridBlock,threadBlock>>>(array_a,array_b,array_c,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
67a9c60663b5af2e37f6f96d8b9a72f4ad30f747.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "error.h" #include "inf_norm_cuda.h" #include <stdio.h> #include <assert.h> #include <cmath> #include <ctime> template<class T,class realT> realT cpu_inf_norm(const int size,const T arr[]){ realT inf_norm = 0.0; for(int i=0;i<size;++i){ inf_norm = ::max(inf_norm,std::abs(arr[i])); } return inf_norm; } template<class T,class realT> void test_inf_norm(int size,int niter){ unsigned int threads = (size < 2*__MAX_THREADS_INF_NORM) ? nextPow2((size + 1)/ 2) : __MAX_THREADS_INF_NORM; unsigned int blocks = (size + (threads * 2 - 1)) / (threads * 2); std::srand(std::time(nullptr)); T * hi_data = NULL; T * di_data = NULL; realT * do_data = NULL; realT result_gpu,result_cpu; cudaErrorCheck(hipHostMalloc((void**)&hi_data,size*sizeof(T))); cudaErrorCheck(hipMalloc((void**)&di_data,size*sizeof(T))); cudaErrorCheck(hipMalloc((void**)&do_data,blocks*sizeof(realT))); for(int j=0;j<niter;++j) { for(int i=0;i<size;++i){ hi_data[i] = std::rand(); } cudaErrorCheck(hipMemcpy(di_data,hi_data,size*sizeof(T),hipMemcpyHostToDevice)); inf_norm<int,realT,T>(size,di_data,do_data,&result_gpu); hipDeviceSynchronize(); result_cpu = cpu_inf_norm<T,realT>(size,hi_data); assert(result_cpu==result_gpu); } if(hi_data) cudaErrorCheck(hipHostFree(hi_data)); if(di_data) cudaErrorCheck(hipFree(di_data)); if(do_data) cudaErrorCheck(hipFree(do_data)); } int main(int argc, char const *argv[]) { test_inf_norm<double,double>(70,1000); cudaErrorCheck(hipDeviceReset()); return 0; }
67a9c60663b5af2e37f6f96d8b9a72f4ad30f747.cu
#include <cuda_runtime.h> #include "error.h" #include "inf_norm_cuda.h" #include <stdio.h> #include <assert.h> #include <cmath> #include <ctime> template<class T,class realT> realT cpu_inf_norm(const int size,const T arr[]){ realT inf_norm = 0.0; for(int i=0;i<size;++i){ inf_norm = std::max(inf_norm,std::abs(arr[i])); } return inf_norm; } template<class T,class realT> void test_inf_norm(int size,int niter){ unsigned int threads = (size < 2*__MAX_THREADS_INF_NORM) ? nextPow2((size + 1)/ 2) : __MAX_THREADS_INF_NORM; unsigned int blocks = (size + (threads * 2 - 1)) / (threads * 2); std::srand(std::time(nullptr)); T * hi_data = NULL; T * di_data = NULL; realT * do_data = NULL; realT result_gpu,result_cpu; cudaErrorCheck(cudaMallocHost((void**)&hi_data,size*sizeof(T))); cudaErrorCheck(cudaMalloc((void**)&di_data,size*sizeof(T))); cudaErrorCheck(cudaMalloc((void**)&do_data,blocks*sizeof(realT))); for(int j=0;j<niter;++j) { for(int i=0;i<size;++i){ hi_data[i] = std::rand(); } cudaErrorCheck(cudaMemcpy(di_data,hi_data,size*sizeof(T),cudaMemcpyHostToDevice)); inf_norm<int,realT,T>(size,di_data,do_data,&result_gpu); cudaDeviceSynchronize(); result_cpu = cpu_inf_norm<T,realT>(size,hi_data); assert(result_cpu==result_gpu); } if(hi_data) cudaErrorCheck(cudaFreeHost(hi_data)); if(di_data) cudaErrorCheck(cudaFree(di_data)); if(do_data) cudaErrorCheck(cudaFree(do_data)); } int main(int argc, char const *argv[]) { test_inf_norm<double,double>(70,1000); cudaErrorCheck(cudaDeviceReset()); return 0; }
708fc59bfd5af7bbcdcb0e3d5e1be5f1f7ff4f30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This is the Open Simplex Noise algorithm, ported to Enoki/CUDA. // // Original Open Simplex Algorithm: Stefan Gustavson (public domain.) // http://webstaff.itn.liu.se/~stegu/simplexnoise // // C port by Bram Stolk // https://github.com/stolk/sino // // Port to Enoki by Bram Stolk // https://github.com/stolk/osino // // License: 3-clause BSD to match Enoki License. // // osino.cpp #include <inttypes.h> #include <stdio.h> #include <assert.h> #include <hip/hip_fp16.h> #if defined(STORECHARS) #include "bluenoise.h" #include "honeycomb.h" #endif #define IMRES (1<<IMMAG) #define IMMSK (IMRES-1) // Skewing / Unskewing factors for 2, 3, and 4 dimensions. #define F2 0.3660254037844386f // 0.5*(Math.sqrt(3.0)-1.0); #define G2 0.21132486540518713f // (3.0-Math.sqrt(3.0))/6.0; #define F3 0.3333333333333333f // 1.0/3.0; #define G3 0.16666666666666666f // 1.0/6.0; #define F4 0.30901699437494745f // (Math.sqrt(5.0)-1.0)/4.0; #define G4 0.1381966011250105f // (5.0-Math.sqrt(5.0))/20.0; typedef short value_t; unsigned char pal[4][3] = { 0xff,0xff,0xff, 0x7F,0xC2,0xC2, 0xE1,0x77,0x4B, 0xF4,0xDB,0x60, }; // Hash function that we use to generate random directions. __device__ __forceinline__ int murmur(int key, uint32_t seed) { int k = ( key ^ seed ) * 0x5bd1e995; k = k ^ (k>>24); return k; } // Dot product. __device__ __forceinline__ float dot_2d( float ax, float ay, float bx, float by ) { return ax * bx + ay * by; } // Generates a random 2D direction for specified grid location. #define RANDOMDIR_2D(x0,y0,PRF) \ const int PRF ## hx = murmur( x0*8887+y0*7213, 0x17295179 ); \ const int PRF ## hy = murmur( x0*8887+y0*7213, 0x18732214 ); \ const int PRF ## ax = (PRF ## hx)>>16; \ const int PRF ## ay = (PRF ## hy)>>16; \ const int PRF ## bx = PRF ## hx & 0x0000ffff; \ const int PRF ## by = PRF ## hy & 0x0000ffff; \ const float PRF ## cand_a_x = PRF ## ax * (2/65536.0f) - 1; \ const float PRF ## cand_a_y = PRF ## ay * (2/65536.0f) - 1; \ const float PRF ## cand_b_x = PRF ## bx * (2/65536.0f) - 1; \ const float PRF ## cand_b_y = PRF ## by * (2/65536.0f) - 1; \ const float PRF ## lensq_a = dot_2d(PRF ## cand_a_x, PRF ## cand_a_y, PRF ## cand_a_x, PRF ## cand_a_y); \ const float PRF ## lensq_b = dot_2d(PRF ## cand_b_x, PRF ## cand_b_y, PRF ## cand_b_x, PRF ## cand_b_y); \ const float PRF ## ilen_a = rsqrtf( PRF ## lensq_a ); \ const float PRF ## ilen_b = rsqrtf( PRF ## lensq_b ); \ const auto PRF ## a_is_shorter = ( PRF ## lensq_a < PRF ## lensq_b ); \ const float PRF ## norm_a_x = ( PRF ## cand_a_x * PRF ## ilen_a ); \ const float PRF ## norm_a_y = ( PRF ## cand_a_y * PRF ## ilen_a ); \ const float PRF ## norm_b_x = ( PRF ## cand_b_x * PRF ## ilen_b ); \ const float PRF ## norm_b_y = ( PRF ## cand_b_y * PRF ## ilen_b ); \ const float PRF ## _x = PRF ## a_is_shorter ? PRF ## norm_a_x : PRF ## norm_b_x; \ const float PRF ## _y = PRF ## a_is_shorter ? PRF ## norm_a_y : PRF ## norm_b_y; \ // Open Simplex Noise 2D __device__ __noinline__ float osino_2d(float x, float y) { // Skew const float s = ( x + y ) * F2; const float flx = floorf(x+s); const float fly = floorf(y+s); const float t = (flx+fly) * G2; const int i = (int)flx; const int j = (int)fly; // Unskew const float X0 = flx - t; const float Y0 = fly - t; const float x0 = x - X0; const float y0 = y - Y0; // Determine which simplex. const int i1 = x0>y0 ? 1 : 0; const int j1 = x0>y0 ? 0 : 1; const float x1 = x0 - i1 + G2; const float y1 = y0 - j1 + G2; const float x2 = x0 - 1.0f + 2.0f * G2; const float y2 = y0 - 1.0f + 2.0f * G2; // Generate a random direction for each corner. RANDOMDIR_2D((i ), (j ), grad0); RANDOMDIR_2D((i+i1), (j+j1), grad1); RANDOMDIR_2D((i+ 1), (j+ 1), grad2); const float t0 = 0.5f - x0*x0 - y0*y0; const float t1 = 0.5f - x1*x1 - y1*y1; const float t2 = 0.5f - x2*x2 - y2*y2; const float p0 = t0*t0*t0*t0 * dot_2d(grad0_x, grad0_y, x0, y0); const float p1 = t1*t1*t1*t1 * dot_2d(grad1_x, grad1_y, x1, y1); const float p2 = t2*t2*t2*t2 * dot_2d(grad2_x, grad2_y, x2, y2); const float n0 = t0<0 ? 0 : p0; const float n1 = t1<0 ? 0 : p1; const float n2 = t2<0 ? 0 : p2; // Add contributions from each corner and scale to [-1,1] interval. return 70.0f * ( n0 + n1 + n2 ); } // Do 4 octaves of open simplex noise in 2D. __device__ float osino_2d_4o( float x, float y ) { const float n0 = osino_2d( x, y); const float n1 = osino_2d(2*x, 2*y); const float n2 = osino_2d(4*x, 4*y); const float n3 = osino_2d(8*x, 8*y); return (1/1.875f) * ( n0 + 0.5f * n1 + 0.25f * n2 + 0.125f * n3 ); } extern "C" { __global__ void doubledomainwarp ( value_t* field, float offset_x, float offset_y, float domainwarp0, float domainwarp1, float freq ) { const int xc = threadIdx.x; const int yc = blockIdx.x & IMMSK; const float ifull = 1.0f / IMRES; const float s0 = 2.017f * ifull; const float s1 = 2.053f * ifull; float x = xc * s0; float y = yc * s1; const float w0x = osino_2d(offset_x+411+y, offset_y+423-x) * domainwarp1; const float w0y = osino_2d(offset_x+419-y, offset_y+413+x) * domainwarp1; const float w1x = osino_2d(offset_x+711-w0x, offset_y+723-w0y) * domainwarp0; const float w1y = osino_2d(offset_x-719+w0x, offset_y+713+w0y) * domainwarp0; x += w1x; y += w1y; const int idx = (yc * (IMRES)) + xc; float result = osino_2d_4o(offset_x+freq*x, offset_y+freq*y); result = result < -1 ? -1 : result; result = result > 1 ? 1 : result; field[ idx ] = (value_t) ( result * 32767.0f ); } }// extern C __host__ void query(void) { int nDevices=-1; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); int maxthr=-1; hipDeviceGetAttribute(&maxthr, hipDeviceAttributeMaxThreadsPerBlock, i); int wrpsiz=-1; hipDeviceGetAttribute(&wrpsiz, hipDeviceAttributeWarpSize, i); fprintf(stderr, "Device Number: %d\n", i); fprintf(stderr, " Device name: %s\n", prop.name); fprintf(stderr, " Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); fprintf(stderr, " Memory Bus Width (bits): %d\n", prop.memoryBusWidth); fprintf(stderr, " Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); fprintf(stderr, " Max threads per block: %d\n", maxthr); fprintf(stderr, " Warp size: %d\n", wrpsiz); } } #define CHECK_CUDA \ { \ const hipError_t err = hipGetLastError(); \ fprintf(stderr,"%s\n", hipGetErrorString(err)); \ } __host__ int main(int argc, char* argv[]) { float opt_freq = 1.092f; float opt_warp0 = 0.1f; float opt_warp1 = 0.1f; const char* opt_out="out_doubledomainwarp.ppm"; for ( int i=1; i<argc; ++i) { if (!strncmp(argv[i],"freq=",5)) opt_freq = atof(argv[i]+5); if (!strncmp(argv[i],"warp0=",6)) opt_warp0 = atof(argv[i]+6); if (!strncmp(argv[i],"warp1=",6)) opt_warp1 = atof(argv[i]+6); if (!strncmp(argv[i],"out=",4)) opt_out = argv[i]+4; } query(); const int N = IMRES*IMRES; value_t* field = 0; hipMallocManaged(&field, N*sizeof(value_t)); assert(field); CHECK_CUDA hipLaunchKernelGGL(( doubledomainwarp), dim3(IMRES),dim3(IMRES), 0, 0, field, 14567.89f, 21123.46f, opt_warp0, opt_warp1, opt_freq ); fprintf( stderr,"warp0 %f warp1 %f", opt_warp0, opt_warp1 ); hipDeviceSynchronize(); CHECK_CUDA unsigned char im[IMRES*IMRES][3]; for ( int i=0; i<IMRES*IMRES; ++i ) { int idx = 0; const value_t v = field[i]; if ( v> 2000 && v< 4000 ) idx=1; if ( v> 5000 && v< 9000 ) idx=2; if ( v> 10000 && v<12000 ) idx=1; if ( v<-16000 ) idx=3; im[i][0] = pal[idx][0]; im[i][1] = pal[idx][1]; im[i][2] = pal[idx][2]; } FILE* f = fopen(opt_out,"wb"); fprintf(f, "P6\n%d %d\n255\n", IMRES, IMRES); fwrite( im, sizeof(im), 1, f ); fclose(f); hipFree(field); return 0; }
708fc59bfd5af7bbcdcb0e3d5e1be5f1f7ff4f30.cu
// This is the Open Simplex Noise algorithm, ported to Enoki/CUDA. // // Original Open Simplex Algorithm: Stefan Gustavson (public domain.) // http://webstaff.itn.liu.se/~stegu/simplexnoise // // C port by Bram Stolk // https://github.com/stolk/sino // // Port to Enoki by Bram Stolk // https://github.com/stolk/osino // // License: 3-clause BSD to match Enoki License. // // osino.cpp #include <inttypes.h> #include <stdio.h> #include <assert.h> #include <cuda_fp16.h> #if defined(STORECHARS) #include "bluenoise.h" #include "honeycomb.h" #endif #define IMRES (1<<IMMAG) #define IMMSK (IMRES-1) // Skewing / Unskewing factors for 2, 3, and 4 dimensions. #define F2 0.3660254037844386f // 0.5*(Math.sqrt(3.0)-1.0); #define G2 0.21132486540518713f // (3.0-Math.sqrt(3.0))/6.0; #define F3 0.3333333333333333f // 1.0/3.0; #define G3 0.16666666666666666f // 1.0/6.0; #define F4 0.30901699437494745f // (Math.sqrt(5.0)-1.0)/4.0; #define G4 0.1381966011250105f // (5.0-Math.sqrt(5.0))/20.0; typedef short value_t; unsigned char pal[4][3] = { 0xff,0xff,0xff, 0x7F,0xC2,0xC2, 0xE1,0x77,0x4B, 0xF4,0xDB,0x60, }; // Hash function that we use to generate random directions. __device__ __forceinline__ int murmur(int key, uint32_t seed) { int k = ( key ^ seed ) * 0x5bd1e995; k = k ^ (k>>24); return k; } // Dot product. __device__ __forceinline__ float dot_2d( float ax, float ay, float bx, float by ) { return ax * bx + ay * by; } // Generates a random 2D direction for specified grid location. #define RANDOMDIR_2D(x0,y0,PRF) \ const int PRF ## hx = murmur( x0*8887+y0*7213, 0x17295179 ); \ const int PRF ## hy = murmur( x0*8887+y0*7213, 0x18732214 ); \ const int PRF ## ax = (PRF ## hx)>>16; \ const int PRF ## ay = (PRF ## hy)>>16; \ const int PRF ## bx = PRF ## hx & 0x0000ffff; \ const int PRF ## by = PRF ## hy & 0x0000ffff; \ const float PRF ## cand_a_x = PRF ## ax * (2/65536.0f) - 1; \ const float PRF ## cand_a_y = PRF ## ay * (2/65536.0f) - 1; \ const float PRF ## cand_b_x = PRF ## bx * (2/65536.0f) - 1; \ const float PRF ## cand_b_y = PRF ## by * (2/65536.0f) - 1; \ const float PRF ## lensq_a = dot_2d(PRF ## cand_a_x, PRF ## cand_a_y, PRF ## cand_a_x, PRF ## cand_a_y); \ const float PRF ## lensq_b = dot_2d(PRF ## cand_b_x, PRF ## cand_b_y, PRF ## cand_b_x, PRF ## cand_b_y); \ const float PRF ## ilen_a = rsqrtf( PRF ## lensq_a ); \ const float PRF ## ilen_b = rsqrtf( PRF ## lensq_b ); \ const auto PRF ## a_is_shorter = ( PRF ## lensq_a < PRF ## lensq_b ); \ const float PRF ## norm_a_x = ( PRF ## cand_a_x * PRF ## ilen_a ); \ const float PRF ## norm_a_y = ( PRF ## cand_a_y * PRF ## ilen_a ); \ const float PRF ## norm_b_x = ( PRF ## cand_b_x * PRF ## ilen_b ); \ const float PRF ## norm_b_y = ( PRF ## cand_b_y * PRF ## ilen_b ); \ const float PRF ## _x = PRF ## a_is_shorter ? PRF ## norm_a_x : PRF ## norm_b_x; \ const float PRF ## _y = PRF ## a_is_shorter ? PRF ## norm_a_y : PRF ## norm_b_y; \ // Open Simplex Noise 2D __device__ __noinline__ float osino_2d(float x, float y) { // Skew const float s = ( x + y ) * F2; const float flx = floorf(x+s); const float fly = floorf(y+s); const float t = (flx+fly) * G2; const int i = (int)flx; const int j = (int)fly; // Unskew const float X0 = flx - t; const float Y0 = fly - t; const float x0 = x - X0; const float y0 = y - Y0; // Determine which simplex. const int i1 = x0>y0 ? 1 : 0; const int j1 = x0>y0 ? 0 : 1; const float x1 = x0 - i1 + G2; const float y1 = y0 - j1 + G2; const float x2 = x0 - 1.0f + 2.0f * G2; const float y2 = y0 - 1.0f + 2.0f * G2; // Generate a random direction for each corner. RANDOMDIR_2D((i ), (j ), grad0); RANDOMDIR_2D((i+i1), (j+j1), grad1); RANDOMDIR_2D((i+ 1), (j+ 1), grad2); const float t0 = 0.5f - x0*x0 - y0*y0; const float t1 = 0.5f - x1*x1 - y1*y1; const float t2 = 0.5f - x2*x2 - y2*y2; const float p0 = t0*t0*t0*t0 * dot_2d(grad0_x, grad0_y, x0, y0); const float p1 = t1*t1*t1*t1 * dot_2d(grad1_x, grad1_y, x1, y1); const float p2 = t2*t2*t2*t2 * dot_2d(grad2_x, grad2_y, x2, y2); const float n0 = t0<0 ? 0 : p0; const float n1 = t1<0 ? 0 : p1; const float n2 = t2<0 ? 0 : p2; // Add contributions from each corner and scale to [-1,1] interval. return 70.0f * ( n0 + n1 + n2 ); } // Do 4 octaves of open simplex noise in 2D. __device__ float osino_2d_4o( float x, float y ) { const float n0 = osino_2d( x, y); const float n1 = osino_2d(2*x, 2*y); const float n2 = osino_2d(4*x, 4*y); const float n3 = osino_2d(8*x, 8*y); return (1/1.875f) * ( n0 + 0.5f * n1 + 0.25f * n2 + 0.125f * n3 ); } extern "C" { __global__ void doubledomainwarp ( value_t* field, float offset_x, float offset_y, float domainwarp0, float domainwarp1, float freq ) { const int xc = threadIdx.x; const int yc = blockIdx.x & IMMSK; const float ifull = 1.0f / IMRES; const float s0 = 2.017f * ifull; const float s1 = 2.053f * ifull; float x = xc * s0; float y = yc * s1; const float w0x = osino_2d(offset_x+411+y, offset_y+423-x) * domainwarp1; const float w0y = osino_2d(offset_x+419-y, offset_y+413+x) * domainwarp1; const float w1x = osino_2d(offset_x+711-w0x, offset_y+723-w0y) * domainwarp0; const float w1y = osino_2d(offset_x-719+w0x, offset_y+713+w0y) * domainwarp0; x += w1x; y += w1y; const int idx = (yc * (IMRES)) + xc; float result = osino_2d_4o(offset_x+freq*x, offset_y+freq*y); result = result < -1 ? -1 : result; result = result > 1 ? 1 : result; field[ idx ] = (value_t) ( result * 32767.0f ); } }// extern C __host__ void query(void) { int nDevices=-1; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); int maxthr=-1; cudaDeviceGetAttribute(&maxthr, cudaDevAttrMaxThreadsPerBlock, i); int wrpsiz=-1; cudaDeviceGetAttribute(&wrpsiz, cudaDevAttrWarpSize, i); fprintf(stderr, "Device Number: %d\n", i); fprintf(stderr, " Device name: %s\n", prop.name); fprintf(stderr, " Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); fprintf(stderr, " Memory Bus Width (bits): %d\n", prop.memoryBusWidth); fprintf(stderr, " Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); fprintf(stderr, " Max threads per block: %d\n", maxthr); fprintf(stderr, " Warp size: %d\n", wrpsiz); } } #define CHECK_CUDA \ { \ const cudaError_t err = cudaGetLastError(); \ fprintf(stderr,"%s\n", cudaGetErrorString(err)); \ } __host__ int main(int argc, char* argv[]) { float opt_freq = 1.092f; float opt_warp0 = 0.1f; float opt_warp1 = 0.1f; const char* opt_out="out_doubledomainwarp.ppm"; for ( int i=1; i<argc; ++i) { if (!strncmp(argv[i],"freq=",5)) opt_freq = atof(argv[i]+5); if (!strncmp(argv[i],"warp0=",6)) opt_warp0 = atof(argv[i]+6); if (!strncmp(argv[i],"warp1=",6)) opt_warp1 = atof(argv[i]+6); if (!strncmp(argv[i],"out=",4)) opt_out = argv[i]+4; } query(); const int N = IMRES*IMRES; value_t* field = 0; cudaMallocManaged(&field, N*sizeof(value_t)); assert(field); CHECK_CUDA doubledomainwarp<<<IMRES,IMRES>>>(field, 14567.89f, 21123.46f, opt_warp0, opt_warp1, opt_freq ); fprintf( stderr,"warp0 %f warp1 %f", opt_warp0, opt_warp1 ); cudaDeviceSynchronize(); CHECK_CUDA unsigned char im[IMRES*IMRES][3]; for ( int i=0; i<IMRES*IMRES; ++i ) { int idx = 0; const value_t v = field[i]; if ( v> 2000 && v< 4000 ) idx=1; if ( v> 5000 && v< 9000 ) idx=2; if ( v> 10000 && v<12000 ) idx=1; if ( v<-16000 ) idx=3; im[i][0] = pal[idx][0]; im[i][1] = pal[idx][1]; im[i][2] = pal[idx][2]; } FILE* f = fopen(opt_out,"wb"); fprintf(f, "P6\n%d %d\n255\n", IMRES, IMRES); fwrite( im, sizeof(im), 1, f ); fclose(f); cudaFree(field); return 0; }
ae75a9a61873125a0b13dfe98abb235e479845f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* File: cuda_kmeans.cu (CUDA version) */ /* Description: Implementation of simple k-means clustering algorithm */ /* This program takes an array of N data objects, each with */ /* M coordinates and performs a k-means clustering given a */ /* user-provided value of the number of clusters (K). The */ /* clustering results are saved in 2 arrays: */ /* 1. a returned array of size [K][N] indicating the center */ /* coordinates of K clusters */ /* 2. membership[N] stores the cluster center ids, each */ /* corresponding to the cluster a data object is assigned */ /* */ /* Author: Wei-keng Liao */ /* ECE Department, Northwestern University */ /* email: wkliao@ece.northwestern.edu */ /* Copyright, 2005, Wei-keng Liao */ /* */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ // Copyright (c) 2005 Wei-keng Liao // Copyright (c) 2011 Serban Giuroiu // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // ----------------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include "kmeans.h" static inline int nextPowerOfTwo(int n) { n--; n = n >> 1 | n; n = n >> 2 | n; n = n >> 4 | n; n = n >> 8 | n; n = n >> 16 | n; // n = n >> 32 | n; // For 64-bit ints return ++n; } /*----< euclid_dist_2() >----------------------------------------------------*/ /* square of Euclid distance between two multi-dimensional points */ __host__ __device__ inline static float euclid_dist_2(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *clusters, // [numCoords][numClusters] int objectId, int clusterId) { int i; float ans=0.0; for (i = 0; i < numCoords; i++) { /*ans += (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]) * (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]);*/ ans += (objects[objectId * numCoords + i] - clusters[clusterId * numCoords + i]) * (objects[objectId * numCoords + i] - clusters[clusterId * numCoords + i]); } return(ans); } /*----< find_nearest_cluster() >---------------------------------------------*/ __global__ static void find_nearest_cluster(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *deviceClusters, // [numCoords][numClusters] int *membership, // [numObjs] int *intermediates) { extern __shared__ char sharedMemory[]; // The type chosen for membershipChanged must be large enough to support // reductions! There are blockDim.x elements, one for each thread in the // block. unsigned char *membershipChanged = (unsigned char *)sharedMemory; float *clusters = (float *)(sharedMemory + blockDim.x); membershipChanged[threadIdx.x] = 0; // BEWARE: We can overrun our shared memory here if there are too many // clusters or too many coordinates! for (int i = threadIdx.x; i < numClusters; i += blockDim.x) { for (int j = 0; j < numCoords; j++) { clusters[numClusters * j + i] = deviceClusters[numClusters * j + i]; } } __syncthreads(); int objectId = blockDim.x * blockIdx.x + threadIdx.x; if (objectId < numObjs) { int index, i; float dist, min_dist; /* find the cluster id that has min distance to object */ index = 0; min_dist = euclid_dist_2(numCoords, numObjs, numClusters, objects, clusters, objectId, 0); for (i=1; i<numClusters; i++) { dist = euclid_dist_2(numCoords, numObjs, numClusters, objects, clusters, objectId, i); /* no need square root */ if (dist < min_dist) { /* find the min and its array index */ min_dist = dist; index = i; } } if (membership[objectId] != index) { membershipChanged[threadIdx.x] = 1; } /* assign the membership to object objectId */ membership[objectId] = index; __syncthreads(); // For membershipChanged[] // blockDim.x *must* be a power of two! for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { membershipChanged[threadIdx.x] += membershipChanged[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { intermediates[blockIdx.x] = membershipChanged[0]; } } } __global__ static void compute_delta(int *deviceIntermediates, int numIntermediates, // The actual number of intermediates int numIntermediates2) // The next power of two { // The number of elements in this array should be equal to // numIntermediates2, the number of threads launched. It *must* be a power // of two! extern __shared__ unsigned int intermediates[]; // Copy global intermediate values into shared memory. intermediates[threadIdx.x] = (blockIdx.x*blockDim.x + threadIdx.x < numIntermediates) ? deviceIntermediates[blockIdx.x*blockDim.x + threadIdx.x] : 0; __syncthreads(); // numIntermediates2 *must* be a power of two! for (unsigned int s = numIntermediates2 / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { intermediates[threadIdx.x] += intermediates[threadIdx.x + s]; } __syncthreads(); } if ((threadIdx.x == 0) && (blockIdx.x == 0)) { deviceIntermediates[0] = intermediates[0]; } else if (threadIdx.x == 0) { deviceIntermediates[0] += intermediates[0]; } } /*----< cuda_kmeans() >-------------------------------------------------------*/ // // ---------------------------------------- // DATA LAYOUT // // objects [numObjs][numCoords] // clusters [numClusters][numCoords] // dimObjects [numCoords][numObjs] // dimClusters [numCoords][numClusters] // newClusters [numCoords][numClusters] // deviceObjects [numCoords][numObjs] // deviceClusters [numCoords][numClusters] // ---------------------------------------- // /* return an array of cluster centers of size [numClusters][numCoords] */ float** cuda_kmeans(float **objects, /* in: [numObjs][numCoords] */ int numCoords, /* no. features */ int numObjs, /* no. objects */ int numClusters, /* no. clusters */ float threshold, /* % objects change membership */ int *membership, /* out: [numObjs] */ int *loop_iterations) { int i, j, index, loop=0; int *newClusterSize; /* [numClusters]: no. objects assigned in each new cluster */ float delta; /* % of objects change their clusters */ //float **dimObjects; float **clusters; /* out: [numClusters][numCoords] */ //float **dimClusters; float **newClusters; /* [numCoords][numClusters] */ float *deviceObjects; float *deviceClusters; int *deviceMembership; int *deviceIntermediates; // Copy objects given in [numObjs][numCoords] layout to new // [numCoords][numObjs] layout /*malloc2D(dimObjects, numCoords, numObjs, float); for (i = 0; i < numCoords; i++) { for (j = 0; j < numObjs; j++) { dimObjects[i][j] = objects[j][i]; } }*/ /* pick first numClusters elements of objects[] as initial cluster centers*/ /*malloc2D(dimClusters, numCoords, numClusters, float); for (i = 0; i < numCoords; i++) { for (j = 0; j < numClusters; j++) { dimClusters[i][j] = dimObjects[i][j]; } }*/ malloc2D(clusters, numClusters, numCoords, float); for (i = 0; i < numClusters; i ++) { for (j = 0; j < numCoords; j ++) { clusters[i][j] = objects[i][j]; } } /* initialize membership[] */ for (i=0; i<numObjs; i++) membership[i] = -1; /* need to initialize newClusterSize and newClusters[0] to all 0 */ newClusterSize = (int*) calloc(numClusters, sizeof(int)); assert(newClusterSize != NULL); // malloc2D(newClusters, numCoords, numClusters, float); // memset(newClusters[0], 0, numCoords * numClusters * sizeof(float)); malloc2D(newClusters, numClusters, numCoords, float); memset(newClusters[0], 0, numClusters * numCoords * sizeof(float)); // To support reduction, numThreadsPerClusterBlock *must* be a power of // two, and it *must* be no larger than the number of bits that will // fit into an unsigned char, the type used to keep track of membership // changes in the kernel. const unsigned int numThreadsPerClusterBlock = 256; const unsigned int numClusterBlocks = (numObjs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock; const unsigned int clusterBlockSharedDataSize = numThreadsPerClusterBlock * sizeof(unsigned char) + numClusters * numCoords * sizeof(float); unsigned int orgReductionThreads = nextPowerOfTwo(numClusterBlocks); const unsigned int numReductionBlocks = orgReductionThreads/512 + 1; const unsigned int numReductionThreads = orgReductionThreads>512?512:orgReductionThreads; const unsigned int reductionBlockSharedDataSize = numReductionThreads * sizeof(unsigned int); checkCuda(hipMalloc(&deviceObjects, numObjs*numCoords*sizeof(float))); checkCuda(hipMalloc(&deviceClusters, numClusters*numCoords*sizeof(float))); checkCuda(hipMalloc(&deviceMembership, numObjs*sizeof(int))); checkCuda(hipMalloc(&deviceIntermediates, numReductionThreads*sizeof(unsigned int))); /*checkCuda(hipMemcpy(deviceObjects, dimObjects[0], numObjs*numCoords*sizeof(float), hipMemcpyHostToDevice));*/ checkCuda(hipMemcpy(deviceObjects, objects[0], numObjs*numCoords*sizeof(float), hipMemcpyHostToDevice)); checkCuda(hipMemcpy(deviceMembership, membership, numObjs*sizeof(int), hipMemcpyHostToDevice)); do { /*checkCuda(hipMemcpy(deviceClusters, dimClusters[0], numClusters*numCoords*sizeof(float), hipMemcpyHostToDevice));*/ checkCuda(hipMemcpy(deviceClusters, clusters[0], numClusters*numCoords*sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( find_nearest_cluster) , dim3(numClusterBlocks), dim3(numThreadsPerClusterBlock), clusterBlockSharedDataSize , 0, numCoords, numObjs, numClusters, deviceObjects, deviceClusters, deviceMembership, deviceIntermediates); hipDeviceSynchronize(); checkLastCudaError(); hipLaunchKernelGGL(( compute_delta) , dim3(numReductionBlocks), dim3(numReductionThreads), reductionBlockSharedDataSize , 0, deviceIntermediates, numClusterBlocks, numReductionThreads); hipDeviceSynchronize(); checkLastCudaError(); int d; checkCuda(hipMemcpy(&d, deviceIntermediates, sizeof(int), hipMemcpyDeviceToHost)); delta = (float)d; checkCuda(hipMemcpy(membership, deviceMembership, numObjs*sizeof(int), hipMemcpyDeviceToHost)); for (i=0; i<numObjs; i++) { /* find the array index of nestest cluster center */ index = membership[i]; /* update new cluster centers : sum of objects located within */ newClusterSize[index]++; for (j=0; j<numCoords; j++) newClusters[index][j] += objects[i][j]; } // TODO: Flip the nesting order // TODO: Change layout of newClusters to [numClusters][numCoords] /* average the sum and replace old cluster centers with newClusters */ for (i=0; i<numClusters; i++) { for (j=0; j<numCoords; j++) { if (newClusterSize[i] > 0) clusters[i][j] = newClusters[i][j] / newClusterSize[i]; newClusters[i][j] = 0.0; /* set back to 0 */ } newClusterSize[i] = 0; /* set back to 0 */ } delta /= numObjs; } while (delta > threshold && loop++ < 500); *loop_iterations = loop + 1; /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ /*malloc2D(clusters, numClusters, numCoords, float); for (i = 0; i < numClusters; i++) { for (j = 0; j < numCoords; j++) { clusters[i][j] = dimClusters[j][i]; } }*/ checkCuda(hipFree(deviceObjects)); checkCuda(hipFree(deviceClusters)); checkCuda(hipFree(deviceMembership)); checkCuda(hipFree(deviceIntermediates)); //free(dimObjects[0]); //free(dimObjects); //free(dimClusters[0]); //free(dimClusters); free(newClusters[0]); free(newClusters); free(newClusterSize); return clusters; }
ae75a9a61873125a0b13dfe98abb235e479845f1.cu
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* File: cuda_kmeans.cu (CUDA version) */ /* Description: Implementation of simple k-means clustering algorithm */ /* This program takes an array of N data objects, each with */ /* M coordinates and performs a k-means clustering given a */ /* user-provided value of the number of clusters (K). The */ /* clustering results are saved in 2 arrays: */ /* 1. a returned array of size [K][N] indicating the center */ /* coordinates of K clusters */ /* 2. membership[N] stores the cluster center ids, each */ /* corresponding to the cluster a data object is assigned */ /* */ /* Author: Wei-keng Liao */ /* ECE Department, Northwestern University */ /* email: wkliao@ece.northwestern.edu */ /* Copyright, 2005, Wei-keng Liao */ /* */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ // Copyright (c) 2005 Wei-keng Liao // Copyright (c) 2011 Serban Giuroiu // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // ----------------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include "kmeans.h" static inline int nextPowerOfTwo(int n) { n--; n = n >> 1 | n; n = n >> 2 | n; n = n >> 4 | n; n = n >> 8 | n; n = n >> 16 | n; // n = n >> 32 | n; // For 64-bit ints return ++n; } /*----< euclid_dist_2() >----------------------------------------------------*/ /* square of Euclid distance between two multi-dimensional points */ __host__ __device__ inline static float euclid_dist_2(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *clusters, // [numCoords][numClusters] int objectId, int clusterId) { int i; float ans=0.0; for (i = 0; i < numCoords; i++) { /*ans += (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]) * (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]);*/ ans += (objects[objectId * numCoords + i] - clusters[clusterId * numCoords + i]) * (objects[objectId * numCoords + i] - clusters[clusterId * numCoords + i]); } return(ans); } /*----< find_nearest_cluster() >---------------------------------------------*/ __global__ static void find_nearest_cluster(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *deviceClusters, // [numCoords][numClusters] int *membership, // [numObjs] int *intermediates) { extern __shared__ char sharedMemory[]; // The type chosen for membershipChanged must be large enough to support // reductions! There are blockDim.x elements, one for each thread in the // block. unsigned char *membershipChanged = (unsigned char *)sharedMemory; float *clusters = (float *)(sharedMemory + blockDim.x); membershipChanged[threadIdx.x] = 0; // BEWARE: We can overrun our shared memory here if there are too many // clusters or too many coordinates! for (int i = threadIdx.x; i < numClusters; i += blockDim.x) { for (int j = 0; j < numCoords; j++) { clusters[numClusters * j + i] = deviceClusters[numClusters * j + i]; } } __syncthreads(); int objectId = blockDim.x * blockIdx.x + threadIdx.x; if (objectId < numObjs) { int index, i; float dist, min_dist; /* find the cluster id that has min distance to object */ index = 0; min_dist = euclid_dist_2(numCoords, numObjs, numClusters, objects, clusters, objectId, 0); for (i=1; i<numClusters; i++) { dist = euclid_dist_2(numCoords, numObjs, numClusters, objects, clusters, objectId, i); /* no need square root */ if (dist < min_dist) { /* find the min and its array index */ min_dist = dist; index = i; } } if (membership[objectId] != index) { membershipChanged[threadIdx.x] = 1; } /* assign the membership to object objectId */ membership[objectId] = index; __syncthreads(); // For membershipChanged[] // blockDim.x *must* be a power of two! for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { membershipChanged[threadIdx.x] += membershipChanged[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { intermediates[blockIdx.x] = membershipChanged[0]; } } } __global__ static void compute_delta(int *deviceIntermediates, int numIntermediates, // The actual number of intermediates int numIntermediates2) // The next power of two { // The number of elements in this array should be equal to // numIntermediates2, the number of threads launched. It *must* be a power // of two! extern __shared__ unsigned int intermediates[]; // Copy global intermediate values into shared memory. intermediates[threadIdx.x] = (blockIdx.x*blockDim.x + threadIdx.x < numIntermediates) ? deviceIntermediates[blockIdx.x*blockDim.x + threadIdx.x] : 0; __syncthreads(); // numIntermediates2 *must* be a power of two! for (unsigned int s = numIntermediates2 / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { intermediates[threadIdx.x] += intermediates[threadIdx.x + s]; } __syncthreads(); } if ((threadIdx.x == 0) && (blockIdx.x == 0)) { deviceIntermediates[0] = intermediates[0]; } else if (threadIdx.x == 0) { deviceIntermediates[0] += intermediates[0]; } } /*----< cuda_kmeans() >-------------------------------------------------------*/ // // ---------------------------------------- // DATA LAYOUT // // objects [numObjs][numCoords] // clusters [numClusters][numCoords] // dimObjects [numCoords][numObjs] // dimClusters [numCoords][numClusters] // newClusters [numCoords][numClusters] // deviceObjects [numCoords][numObjs] // deviceClusters [numCoords][numClusters] // ---------------------------------------- // /* return an array of cluster centers of size [numClusters][numCoords] */ float** cuda_kmeans(float **objects, /* in: [numObjs][numCoords] */ int numCoords, /* no. features */ int numObjs, /* no. objects */ int numClusters, /* no. clusters */ float threshold, /* % objects change membership */ int *membership, /* out: [numObjs] */ int *loop_iterations) { int i, j, index, loop=0; int *newClusterSize; /* [numClusters]: no. objects assigned in each new cluster */ float delta; /* % of objects change their clusters */ //float **dimObjects; float **clusters; /* out: [numClusters][numCoords] */ //float **dimClusters; float **newClusters; /* [numCoords][numClusters] */ float *deviceObjects; float *deviceClusters; int *deviceMembership; int *deviceIntermediates; // Copy objects given in [numObjs][numCoords] layout to new // [numCoords][numObjs] layout /*malloc2D(dimObjects, numCoords, numObjs, float); for (i = 0; i < numCoords; i++) { for (j = 0; j < numObjs; j++) { dimObjects[i][j] = objects[j][i]; } }*/ /* pick first numClusters elements of objects[] as initial cluster centers*/ /*malloc2D(dimClusters, numCoords, numClusters, float); for (i = 0; i < numCoords; i++) { for (j = 0; j < numClusters; j++) { dimClusters[i][j] = dimObjects[i][j]; } }*/ malloc2D(clusters, numClusters, numCoords, float); for (i = 0; i < numClusters; i ++) { for (j = 0; j < numCoords; j ++) { clusters[i][j] = objects[i][j]; } } /* initialize membership[] */ for (i=0; i<numObjs; i++) membership[i] = -1; /* need to initialize newClusterSize and newClusters[0] to all 0 */ newClusterSize = (int*) calloc(numClusters, sizeof(int)); assert(newClusterSize != NULL); // malloc2D(newClusters, numCoords, numClusters, float); // memset(newClusters[0], 0, numCoords * numClusters * sizeof(float)); malloc2D(newClusters, numClusters, numCoords, float); memset(newClusters[0], 0, numClusters * numCoords * sizeof(float)); // To support reduction, numThreadsPerClusterBlock *must* be a power of // two, and it *must* be no larger than the number of bits that will // fit into an unsigned char, the type used to keep track of membership // changes in the kernel. const unsigned int numThreadsPerClusterBlock = 256; const unsigned int numClusterBlocks = (numObjs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock; const unsigned int clusterBlockSharedDataSize = numThreadsPerClusterBlock * sizeof(unsigned char) + numClusters * numCoords * sizeof(float); unsigned int orgReductionThreads = nextPowerOfTwo(numClusterBlocks); const unsigned int numReductionBlocks = orgReductionThreads/512 + 1; const unsigned int numReductionThreads = orgReductionThreads>512?512:orgReductionThreads; const unsigned int reductionBlockSharedDataSize = numReductionThreads * sizeof(unsigned int); checkCuda(cudaMalloc(&deviceObjects, numObjs*numCoords*sizeof(float))); checkCuda(cudaMalloc(&deviceClusters, numClusters*numCoords*sizeof(float))); checkCuda(cudaMalloc(&deviceMembership, numObjs*sizeof(int))); checkCuda(cudaMalloc(&deviceIntermediates, numReductionThreads*sizeof(unsigned int))); /*checkCuda(cudaMemcpy(deviceObjects, dimObjects[0], numObjs*numCoords*sizeof(float), cudaMemcpyHostToDevice));*/ checkCuda(cudaMemcpy(deviceObjects, objects[0], numObjs*numCoords*sizeof(float), cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(deviceMembership, membership, numObjs*sizeof(int), cudaMemcpyHostToDevice)); do { /*checkCuda(cudaMemcpy(deviceClusters, dimClusters[0], numClusters*numCoords*sizeof(float), cudaMemcpyHostToDevice));*/ checkCuda(cudaMemcpy(deviceClusters, clusters[0], numClusters*numCoords*sizeof(float), cudaMemcpyHostToDevice)); find_nearest_cluster <<< numClusterBlocks, numThreadsPerClusterBlock, clusterBlockSharedDataSize >>> (numCoords, numObjs, numClusters, deviceObjects, deviceClusters, deviceMembership, deviceIntermediates); cudaThreadSynchronize(); checkLastCudaError(); compute_delta <<< numReductionBlocks, numReductionThreads, reductionBlockSharedDataSize >>> (deviceIntermediates, numClusterBlocks, numReductionThreads); cudaThreadSynchronize(); checkLastCudaError(); int d; checkCuda(cudaMemcpy(&d, deviceIntermediates, sizeof(int), cudaMemcpyDeviceToHost)); delta = (float)d; checkCuda(cudaMemcpy(membership, deviceMembership, numObjs*sizeof(int), cudaMemcpyDeviceToHost)); for (i=0; i<numObjs; i++) { /* find the array index of nestest cluster center */ index = membership[i]; /* update new cluster centers : sum of objects located within */ newClusterSize[index]++; for (j=0; j<numCoords; j++) newClusters[index][j] += objects[i][j]; } // TODO: Flip the nesting order // TODO: Change layout of newClusters to [numClusters][numCoords] /* average the sum and replace old cluster centers with newClusters */ for (i=0; i<numClusters; i++) { for (j=0; j<numCoords; j++) { if (newClusterSize[i] > 0) clusters[i][j] = newClusters[i][j] / newClusterSize[i]; newClusters[i][j] = 0.0; /* set back to 0 */ } newClusterSize[i] = 0; /* set back to 0 */ } delta /= numObjs; } while (delta > threshold && loop++ < 500); *loop_iterations = loop + 1; /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ /*malloc2D(clusters, numClusters, numCoords, float); for (i = 0; i < numClusters; i++) { for (j = 0; j < numCoords; j++) { clusters[i][j] = dimClusters[j][i]; } }*/ checkCuda(cudaFree(deviceObjects)); checkCuda(cudaFree(deviceClusters)); checkCuda(cudaFree(deviceMembership)); checkCuda(cudaFree(deviceIntermediates)); //free(dimObjects[0]); //free(dimObjects); //free(dimClusters[0]); //free(dimClusters); free(newClusters[0]); free(newClusters); free(newClusterSize); return clusters; }
3542623f8ac5ebd22e1664dba217abb3938c3b82.hip
// !!! This is a file automatically generated by hipify!!! /** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "cuda_math_util.h" #include "pulsed_weight_updater.h" #include <hipcub/hipcub.hpp> #include <algorithm> #include <chrono> #include <cmath> #include <iostream> #include <memory> #include <random> #include "io_iterator.h" #include "pwu_kernel_parameter.h" #include "rpucuda_pulsed_device.h" namespace RPU { /****************************************************************************************************************/ /* PULSEDWEIGHTUPDATER */ /******************************************************************************************************************/ template <typename T> PulsedWeightUpdater<T>::PulsedWeightUpdater(CudaContext *c, int x_size, int d_size) : context_{c}, x_size_{x_size}, d_size_{d_size} { blm_ = RPU::make_unique<BitLineMaker<T>>(c, x_size, d_size); up_context_ = nullptr; is_async_update_ = false; }; template <typename T> pwukpvec_t<T> PulsedWeightUpdater<T>::getValidUpdateKernels( PulsedRPUDeviceCudaBase<T> *rpucuda_device, int m_batch, const PulsedUpdateMetaParameter<T> &up) { pwukpvec_t<T> v; for (int use_bo64 : {1, 0}) { // omit 2 (ie bo64 translation) for (int out_trans : {true, false}) { pwukpvec_t<T> v2 = rpucuda_device->getUpdateKernels(m_batch, up.getNK32Default(), use_bo64, out_trans, up); for (int i = 0; i < v2.size(); i++) { if (v2[i]->isValid()) { v.push_back(v2[i]); } } } if (v.size() > 0 && (m_batch >= 1000)) { break; // prefer bo64 for large batch if possible } } return v; } template <typename T> void PulsedWeightUpdater<T>::makeUpdateAsync() { if (!is_async_update_) { is_async_update_ = true; up_context_ = RPU::make_unique<CudaContext>(context_->getGPUId()); } } template <typename T> void PulsedWeightUpdater<T>::waitForUpdateCalculations() { if (is_async_update_) { // use the up_context event for it because context_ might be shared context_->recordWaitEvent(up_context_->getStream(), up_context_->getEvent()); } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::executeUpdate( pwukp_t<T> kpars, XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, PulsedRPUDeviceCudaBase<T> *rpucuda_device, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans_in, const bool d_trans_in) { blm_->makeCounts( x_in, d_in, up, rpucuda_device->getDwMin(), lr, m_batch, x_trans_in, d_trans_in, kpars->getOutTrans(), kpars->getUseBo64(), kpars->getImplicitPulses()); CudaContext *c = context_; if (is_async_update_) { up_context_->recordWaitEvent(context_->getStream(), context_->getEvent()); c = &*up_context_; } rpucuda_device->runUpdateKernel( kpars, c, dev_weights, m_batch, &*blm_, up, c->getRandomStates(kpars->getNStates())); } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::tuneUpdate( pwukp_t<T> &opt_kernel_pars, pwukpvec_t<T> &v, XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, PulsedRPUDeviceCudaBase<T> *rpucuda_device, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans_in, const bool d_trans_in) { bool is_async_update = is_async_update_; is_async_update_ = false; CUDA_TIMING_INIT; int nrepeats = 3; CudaArray<T> dev_tmp_weights(context_, x_size_ * d_size_); auto *tmp_device = rpucuda_device->clone(); PulsedUpdateMetaParameter<T> up_tuning(up); up_tuning._currently_tuning = true; dev_tmp_weights.assignFromDevice(dev_weights); context_->synchronizeDevice(); // maybe other streams exist. T min_timing = FLT_MAX; int min_i = 0; for (int k = 0; k < v.size(); k++) { CUDA_TIMING_START(*context_); for (int i = 0; i < nrepeats; i++) { this->executeUpdate( v[k], x_in, d_in, dev_tmp_weights.getData(), tmp_device, up_tuning, lr, m_batch, x_trans_in, d_trans_in); } CUDA_TIMING_STOP_NO_OUTPUT(*context_); v[k]->timing = milliseconds / nrepeats; if (v[k]->timing < min_timing) { min_timing = v[k]->timing; min_i = k; } } CUDA_TIMING_DESTROY; is_async_update_ = is_async_update; opt_kernel_pars = v[min_i]; delete tmp_device; DEBUG_OUT( "UpdateTuner: Using " << opt_kernel_pars->getName() << " for PWU [" << opt_kernel_pars->timing << "].\n"); DEBUG_CALL(opt_kernel_pars->print()); } template <typename T> template <typename InputIteratorT> const T *PulsedWeightUpdater<T>::copyIterator2Buffer( InputIteratorT vec, std::shared_ptr<CudaArray<T>> &buffer, int size) { if ((buffer == nullptr) || (buffer->getSize() < size)) { buffer = std::shared_ptr<CudaArray<T>>(new CudaArray<T>(context_, size)); } RPU::math::copyWithIterator(context_, buffer->getData(), vec, size); return buffer->getDataConst(); } template <> template <> const float *PulsedWeightUpdater<float>::copyIterator2Buffer( const float *vec, std::shared_ptr<CudaArray<float>> &buffer, int size) { return vec; } #ifdef RPU_USE_DOUBLE template <> template <> const double *PulsedWeightUpdater<double>::copyIterator2Buffer( const double *vec, std::shared_ptr<CudaArray<double>> &buffer, int size) { return vec; } #endif template <typename T> void PulsedWeightUpdater<T>::setSharedBuffer( int m_batch, std::shared_ptr<CudaArray<T>> x_buffer, std::shared_ptr<CudaArray<T>> d_buffer) { if (x_buffer) { dev_fpx_buffer_ = x_buffer; if (dev_fpx_buffer_->getSize() < m_batch * x_size_) { RPU_FATAL("X batch buffer size too small."); } } if (d_buffer) { dev_fpd_buffer_ = d_buffer; if (dev_fpd_buffer_->getSize() < m_batch * d_size_) { RPU_FATAL("D batch buffer size too small."); } } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::doFPupdate( XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, const T lr, const int m_batch, const bool x_trans, const bool d_trans, const T beta) { const T *x_out = copyIterator2Buffer(x_in, dev_fpx_buffer_, x_size_ * m_batch); const T *d_out = copyIterator2Buffer(d_in, dev_fpd_buffer_, d_size_ * m_batch); if (m_batch == 1 && beta == 1.0) { RPU::math::ger<T>(context_, d_size_, x_size_, -lr, d_out, 1, x_out, 1, dev_weights, d_size_); } else { RPU::math::gemm<T>( context_, d_trans, !x_trans, d_size_, // M x_size_, // N m_batch, // K -lr, d_out, d_trans ? m_batch : d_size_, x_out, x_trans ? m_batch : x_size_, beta, dev_weights, d_size_); } } template <typename T> void PulsedWeightUpdater<T>::checkBuffers(int m_batch) { // make sure shared buffers are constructed if ((dev_fpx_buffer_ == nullptr) || (dev_fpx_buffer_->getSize() < x_size_ * m_batch)) { dev_fpx_buffer_ = std::make_shared<CudaArray<T>>(context_, x_size_ * m_batch); } if ((dev_fpd_buffer_ == nullptr) || (dev_fpd_buffer_->getSize() < d_size_ * m_batch)) { dev_fpd_buffer_ = std::make_shared<CudaArray<T>>(context_, d_size_ * m_batch); } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::doDirectUpdate( XInputIteratorT x_in, DInputIteratorT d_in, AbstractRPUDeviceCuda<T> *rpucuda_device, T *dev_weights, const T lr, const PulsedUpdateMetaParameter<T> &up, const int m_batch, const bool x_trans, const bool d_trans, const T beta) { checkBuffers(m_batch); // make sure they are created (we need them also for float * iterator) const T *x_out = copyIterator2Buffer(x_in, dev_fpx_buffer_, x_size_ * m_batch); const T *d_out = copyIterator2Buffer(d_in, dev_fpd_buffer_, d_size_ * m_batch); if (!rpucuda_device->hasDirectUpdate()) { RPU_FATAL("Device does not support a direct update"); } rpucuda_device->doDirectUpdate( x_out, d_out, dev_weights, lr, m_batch, x_trans, d_trans, beta, up, dev_fpx_buffer_->getData(), // this might overrite x_out dev_fpd_buffer_->getData()); } template <typename T> bool PulsedWeightUpdater<T>::checkForFPUpdate( AbstractRPUDeviceCuda<T> *rpucuda_device_in, const PulsedUpdateMetaParameter<T> &up) { return (up.pulse_type == PulseType::None) || (rpucuda_device_in == nullptr) || !rpucuda_device_in->isPulsedDevice() || (rpucuda_device_in->implements() == DeviceUpdateType::FloatingPoint); } #define FORCE_TUNING_THRES 0 template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::update( XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, AbstractRPUDeviceCuda<T> *rpucuda_device_in, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans, const bool d_trans) { // FP update if no device is given if (rpucuda_device_in != nullptr && rpucuda_device_in->hasDirectUpdate()) { doDirectUpdate(x_in, d_in, rpucuda_device_in, dev_weights, lr, up, m_batch, x_trans, d_trans); return; } else if ( checkForFPUpdate(rpucuda_device_in, up) || (up.pulse_type == PulseType::NoneWithDevice)) { doFPupdate(x_in, d_in, dev_weights, lr, m_batch, x_trans, d_trans); if (up.pulse_type == PulseType::NoneWithDevice) { // apply bounds rpucuda_device_in->clipWeights(dev_weights, -1.0); } return; } // safe because of isPulsedDevice PulsedRPUDeviceCudaBase<T> *rpucuda_device = static_cast<PulsedRPUDeviceCudaBase<T> *>(rpucuda_device_in); bool force_tuning = false; // check need for init (or re-init) DeviceUpdateType update_type = rpucuda_device->implements(); if (update_type != update_type_) //|| (!blm_->checkBuffer(m_batch,BL))) { // we do not check for change in x_size/d_size, but they are assumed to be constant as well! force_tuning = true; update_type_ = update_type; update_count_ = 0; // init kernels valid_kernels_ = getValidUpdateKernels(rpucuda_device, m_batch, up); if (valid_kernels_.size() == 0) { RPU_FATAL("Cannot find valid update kernels"); } kernel_pars_ = valid_kernels_[0]; // this will be modified if tuned if (up._debug_kernel_index >= 0) { // set default for debugging // just get a valid kpars (will be overwritten if tuning is used below) force_tuning = false; int kidx = up._debug_kernel_index; if (up._debug_kernel_index >= valid_kernels_.size()) { std::cout << "DEBUG WARNING: kernel index out of range " << valid_kernels_.size() << std::endl; kidx = 0; } kernel_pars_ = valid_kernels_[kidx]; if (kernel_pars_->getUseBo64() == 1) { std::cout << "DEBUG WARNING: cannot test BO64 direct. Set to translate " << std::endl; kernel_pars_->forceBo64Translate(); } if (kidx == 0) { kernel_pars_->force32(); // debug hack: might break kernel in the worst case kernel_pars_->forceNonTrans(); // debug hack: might break kernel in the worst case std::cout << "DEBUG WARNING: Kernel index 0: FORCED 32 and non-trans" << std::endl; } std::cout << "Selected kernel index " << kidx << " out of " << valid_kernels_.size() << std::endl; kernel_pars_->print(); } } if (update_count_ < FORCE_TUNING_THRES) { // only once again update_count_ += 1; force_tuning = force_tuning || (update_count_ == FORCE_TUNING_THRES); } // tune if requested if (force_tuning) { this->tuneUpdate( kernel_pars_, valid_kernels_, x_in, d_in, dev_weights, rpucuda_device, up, lr, m_batch, x_trans, d_trans); } // do update this->executeUpdate( kernel_pars_, x_in, d_in, dev_weights, rpucuda_device, up, lr, m_batch, x_trans, d_trans); } #define RPU_PWU_ITER_TEMPLATE(NUM_T, XITERT, DITERT) \ template void PulsedWeightUpdater<NUM_T>::update( \ XITERT, DITERT, NUM_T *, AbstractRPUDeviceCuda<NUM_T> *, \ const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, const int, const bool, const bool); \ template void PulsedWeightUpdater<NUM_T>::doFPupdate( \ XITERT, DITERT, NUM_T *, const NUM_T, const int, const bool, const bool, const NUM_T); \ template void PulsedWeightUpdater<NUM_T>::doDirectUpdate( \ XITERT, DITERT, AbstractRPUDeviceCuda<NUM_T> *, NUM_T *, const NUM_T, \ const PulsedUpdateMetaParameter<NUM_T> &, const int, const bool, const bool, const NUM_T); \ template void PulsedWeightUpdater<NUM_T>::tuneUpdate( \ pwukp_t<NUM_T> &, pwukpvec_t<NUM_T> &, XITERT, DITERT, NUM_T *, \ PulsedRPUDeviceCudaBase<NUM_T> *, const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, \ const int, const bool, const bool); \ template void PulsedWeightUpdater<NUM_T>::executeUpdate( \ pwukp_t<NUM_T>, XITERT, DITERT, NUM_T *, PulsedRPUDeviceCudaBase<NUM_T> *, \ const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, const int, const bool, const bool); #define TRANSFLOAT(TRANS) TRANS, float template class PulsedWeightUpdater<float>; RPU_PWU_ITER_TEMPLATE(float, IndexReaderTransInputIterator<float>, const float *); RPU_PWU_ITER_TEMPLATE(float, IndexReaderInputIterator<float>, const float *); RPU_PWU_ITER_TEMPLATE(float, const float *, const float *); RPU_PWU_ITER_TEMPLATE( float, IndexReaderTransInputIterator<float>, PermuterTransInputIterator<float>); RPU_PWU_ITER_TEMPLATE(float, const float *, PermuterTransInputIterator<float>); RPU_PWU_ITER_TEMPLATE( float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, SliceInputIterator<TRANSFLOAT(true)>); RPU_PWU_ITER_TEMPLATE( float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, SliceInputIterator<TRANSFLOAT(false)>); RPU_PWU_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(true)>); RPU_PWU_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(false)>); RPU_PWU_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, const float *); RPU_PWU_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, const float *); #undef TRANSFLOAT #ifdef RPU_USE_DOUBLE #define TRANSDOUBLE(TRANS) TRANS, double template class PulsedWeightUpdater<double>; RPU_PWU_ITER_TEMPLATE(double, IndexReaderTransInputIterator<double>, const double *); RPU_PWU_ITER_TEMPLATE(double, IndexReaderInputIterator<double>, const double *); RPU_PWU_ITER_TEMPLATE(double, const double *, const double *); RPU_PWU_ITER_TEMPLATE( double, IndexReaderTransInputIterator<double>, PermuterTransInputIterator<double>); RPU_PWU_ITER_TEMPLATE(double, const double *, PermuterTransInputIterator<double>); RPU_PWU_ITER_TEMPLATE( double, IndexReaderSliceInputIterator<TRANSDOUBLE(true)>, SliceInputIterator<TRANSDOUBLE(true)>); RPU_PWU_ITER_TEMPLATE( double, IndexReaderSliceInputIterator<TRANSDOUBLE(false)>, SliceInputIterator<TRANSDOUBLE(false)>); RPU_PWU_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(true)>); RPU_PWU_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(false)>); RPU_PWU_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(true)>, const double *); RPU_PWU_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(false)>, const double *); #undef TRANSDOUBLE #endif #undef RPU_PWU_ITER_TEMPLATE } // namespace RPU
3542623f8ac5ebd22e1664dba217abb3938c3b82.cu
/** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "cuda_math_util.h" #include "pulsed_weight_updater.h" #include <cub/cub.cuh> #include <algorithm> #include <chrono> #include <cmath> #include <iostream> #include <memory> #include <random> #include "io_iterator.h" #include "pwu_kernel_parameter.h" #include "rpucuda_pulsed_device.h" namespace RPU { /****************************************************************************************************************/ /* PULSEDWEIGHTUPDATER */ /******************************************************************************************************************/ template <typename T> PulsedWeightUpdater<T>::PulsedWeightUpdater(CudaContext *c, int x_size, int d_size) : context_{c}, x_size_{x_size}, d_size_{d_size} { blm_ = RPU::make_unique<BitLineMaker<T>>(c, x_size, d_size); up_context_ = nullptr; is_async_update_ = false; }; template <typename T> pwukpvec_t<T> PulsedWeightUpdater<T>::getValidUpdateKernels( PulsedRPUDeviceCudaBase<T> *rpucuda_device, int m_batch, const PulsedUpdateMetaParameter<T> &up) { pwukpvec_t<T> v; for (int use_bo64 : {1, 0}) { // omit 2 (ie bo64 translation) for (int out_trans : {true, false}) { pwukpvec_t<T> v2 = rpucuda_device->getUpdateKernels(m_batch, up.getNK32Default(), use_bo64, out_trans, up); for (int i = 0; i < v2.size(); i++) { if (v2[i]->isValid()) { v.push_back(v2[i]); } } } if (v.size() > 0 && (m_batch >= 1000)) { break; // prefer bo64 for large batch if possible } } return v; } template <typename T> void PulsedWeightUpdater<T>::makeUpdateAsync() { if (!is_async_update_) { is_async_update_ = true; up_context_ = RPU::make_unique<CudaContext>(context_->getGPUId()); } } template <typename T> void PulsedWeightUpdater<T>::waitForUpdateCalculations() { if (is_async_update_) { // use the up_context event for it because context_ might be shared context_->recordWaitEvent(up_context_->getStream(), up_context_->getEvent()); } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::executeUpdate( pwukp_t<T> kpars, XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, PulsedRPUDeviceCudaBase<T> *rpucuda_device, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans_in, const bool d_trans_in) { blm_->makeCounts( x_in, d_in, up, rpucuda_device->getDwMin(), lr, m_batch, x_trans_in, d_trans_in, kpars->getOutTrans(), kpars->getUseBo64(), kpars->getImplicitPulses()); CudaContext *c = context_; if (is_async_update_) { up_context_->recordWaitEvent(context_->getStream(), context_->getEvent()); c = &*up_context_; } rpucuda_device->runUpdateKernel( kpars, c, dev_weights, m_batch, &*blm_, up, c->getRandomStates(kpars->getNStates())); } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::tuneUpdate( pwukp_t<T> &opt_kernel_pars, pwukpvec_t<T> &v, XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, PulsedRPUDeviceCudaBase<T> *rpucuda_device, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans_in, const bool d_trans_in) { bool is_async_update = is_async_update_; is_async_update_ = false; CUDA_TIMING_INIT; int nrepeats = 3; CudaArray<T> dev_tmp_weights(context_, x_size_ * d_size_); auto *tmp_device = rpucuda_device->clone(); PulsedUpdateMetaParameter<T> up_tuning(up); up_tuning._currently_tuning = true; dev_tmp_weights.assignFromDevice(dev_weights); context_->synchronizeDevice(); // maybe other streams exist. T min_timing = FLT_MAX; int min_i = 0; for (int k = 0; k < v.size(); k++) { CUDA_TIMING_START(*context_); for (int i = 0; i < nrepeats; i++) { this->executeUpdate( v[k], x_in, d_in, dev_tmp_weights.getData(), tmp_device, up_tuning, lr, m_batch, x_trans_in, d_trans_in); } CUDA_TIMING_STOP_NO_OUTPUT(*context_); v[k]->timing = milliseconds / nrepeats; if (v[k]->timing < min_timing) { min_timing = v[k]->timing; min_i = k; } } CUDA_TIMING_DESTROY; is_async_update_ = is_async_update; opt_kernel_pars = v[min_i]; delete tmp_device; DEBUG_OUT( "UpdateTuner: Using " << opt_kernel_pars->getName() << " for PWU [" << opt_kernel_pars->timing << "].\n"); DEBUG_CALL(opt_kernel_pars->print()); } template <typename T> template <typename InputIteratorT> const T *PulsedWeightUpdater<T>::copyIterator2Buffer( InputIteratorT vec, std::shared_ptr<CudaArray<T>> &buffer, int size) { if ((buffer == nullptr) || (buffer->getSize() < size)) { buffer = std::shared_ptr<CudaArray<T>>(new CudaArray<T>(context_, size)); } RPU::math::copyWithIterator(context_, buffer->getData(), vec, size); return buffer->getDataConst(); } template <> template <> const float *PulsedWeightUpdater<float>::copyIterator2Buffer( const float *vec, std::shared_ptr<CudaArray<float>> &buffer, int size) { return vec; } #ifdef RPU_USE_DOUBLE template <> template <> const double *PulsedWeightUpdater<double>::copyIterator2Buffer( const double *vec, std::shared_ptr<CudaArray<double>> &buffer, int size) { return vec; } #endif template <typename T> void PulsedWeightUpdater<T>::setSharedBuffer( int m_batch, std::shared_ptr<CudaArray<T>> x_buffer, std::shared_ptr<CudaArray<T>> d_buffer) { if (x_buffer) { dev_fpx_buffer_ = x_buffer; if (dev_fpx_buffer_->getSize() < m_batch * x_size_) { RPU_FATAL("X batch buffer size too small."); } } if (d_buffer) { dev_fpd_buffer_ = d_buffer; if (dev_fpd_buffer_->getSize() < m_batch * d_size_) { RPU_FATAL("D batch buffer size too small."); } } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::doFPupdate( XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, const T lr, const int m_batch, const bool x_trans, const bool d_trans, const T beta) { const T *x_out = copyIterator2Buffer(x_in, dev_fpx_buffer_, x_size_ * m_batch); const T *d_out = copyIterator2Buffer(d_in, dev_fpd_buffer_, d_size_ * m_batch); if (m_batch == 1 && beta == 1.0) { RPU::math::ger<T>(context_, d_size_, x_size_, -lr, d_out, 1, x_out, 1, dev_weights, d_size_); } else { RPU::math::gemm<T>( context_, d_trans, !x_trans, d_size_, // M x_size_, // N m_batch, // K -lr, d_out, d_trans ? m_batch : d_size_, x_out, x_trans ? m_batch : x_size_, beta, dev_weights, d_size_); } } template <typename T> void PulsedWeightUpdater<T>::checkBuffers(int m_batch) { // make sure shared buffers are constructed if ((dev_fpx_buffer_ == nullptr) || (dev_fpx_buffer_->getSize() < x_size_ * m_batch)) { dev_fpx_buffer_ = std::make_shared<CudaArray<T>>(context_, x_size_ * m_batch); } if ((dev_fpd_buffer_ == nullptr) || (dev_fpd_buffer_->getSize() < d_size_ * m_batch)) { dev_fpd_buffer_ = std::make_shared<CudaArray<T>>(context_, d_size_ * m_batch); } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::doDirectUpdate( XInputIteratorT x_in, DInputIteratorT d_in, AbstractRPUDeviceCuda<T> *rpucuda_device, T *dev_weights, const T lr, const PulsedUpdateMetaParameter<T> &up, const int m_batch, const bool x_trans, const bool d_trans, const T beta) { checkBuffers(m_batch); // make sure they are created (we need them also for float * iterator) const T *x_out = copyIterator2Buffer(x_in, dev_fpx_buffer_, x_size_ * m_batch); const T *d_out = copyIterator2Buffer(d_in, dev_fpd_buffer_, d_size_ * m_batch); if (!rpucuda_device->hasDirectUpdate()) { RPU_FATAL("Device does not support a direct update"); } rpucuda_device->doDirectUpdate( x_out, d_out, dev_weights, lr, m_batch, x_trans, d_trans, beta, up, dev_fpx_buffer_->getData(), // this might overrite x_out dev_fpd_buffer_->getData()); } template <typename T> bool PulsedWeightUpdater<T>::checkForFPUpdate( AbstractRPUDeviceCuda<T> *rpucuda_device_in, const PulsedUpdateMetaParameter<T> &up) { return (up.pulse_type == PulseType::None) || (rpucuda_device_in == nullptr) || !rpucuda_device_in->isPulsedDevice() || (rpucuda_device_in->implements() == DeviceUpdateType::FloatingPoint); } #define FORCE_TUNING_THRES 0 template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::update( XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, AbstractRPUDeviceCuda<T> *rpucuda_device_in, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans, const bool d_trans) { // FP update if no device is given if (rpucuda_device_in != nullptr && rpucuda_device_in->hasDirectUpdate()) { doDirectUpdate(x_in, d_in, rpucuda_device_in, dev_weights, lr, up, m_batch, x_trans, d_trans); return; } else if ( checkForFPUpdate(rpucuda_device_in, up) || (up.pulse_type == PulseType::NoneWithDevice)) { doFPupdate(x_in, d_in, dev_weights, lr, m_batch, x_trans, d_trans); if (up.pulse_type == PulseType::NoneWithDevice) { // apply bounds rpucuda_device_in->clipWeights(dev_weights, -1.0); } return; } // safe because of isPulsedDevice PulsedRPUDeviceCudaBase<T> *rpucuda_device = static_cast<PulsedRPUDeviceCudaBase<T> *>(rpucuda_device_in); bool force_tuning = false; // check need for init (or re-init) DeviceUpdateType update_type = rpucuda_device->implements(); if (update_type != update_type_) //|| (!blm_->checkBuffer(m_batch,BL))) { // we do not check for change in x_size/d_size, but they are assumed to be constant as well! force_tuning = true; update_type_ = update_type; update_count_ = 0; // init kernels valid_kernels_ = getValidUpdateKernels(rpucuda_device, m_batch, up); if (valid_kernels_.size() == 0) { RPU_FATAL("Cannot find valid update kernels"); } kernel_pars_ = valid_kernels_[0]; // this will be modified if tuned if (up._debug_kernel_index >= 0) { // set default for debugging // just get a valid kpars (will be overwritten if tuning is used below) force_tuning = false; int kidx = up._debug_kernel_index; if (up._debug_kernel_index >= valid_kernels_.size()) { std::cout << "DEBUG WARNING: kernel index out of range " << valid_kernels_.size() << std::endl; kidx = 0; } kernel_pars_ = valid_kernels_[kidx]; if (kernel_pars_->getUseBo64() == 1) { std::cout << "DEBUG WARNING: cannot test BO64 direct. Set to translate " << std::endl; kernel_pars_->forceBo64Translate(); } if (kidx == 0) { kernel_pars_->force32(); // debug hack: might break kernel in the worst case kernel_pars_->forceNonTrans(); // debug hack: might break kernel in the worst case std::cout << "DEBUG WARNING: Kernel index 0: FORCED 32 and non-trans" << std::endl; } std::cout << "Selected kernel index " << kidx << " out of " << valid_kernels_.size() << std::endl; kernel_pars_->print(); } } if (update_count_ < FORCE_TUNING_THRES) { // only once again update_count_ += 1; force_tuning = force_tuning || (update_count_ == FORCE_TUNING_THRES); } // tune if requested if (force_tuning) { this->tuneUpdate( kernel_pars_, valid_kernels_, x_in, d_in, dev_weights, rpucuda_device, up, lr, m_batch, x_trans, d_trans); } // do update this->executeUpdate( kernel_pars_, x_in, d_in, dev_weights, rpucuda_device, up, lr, m_batch, x_trans, d_trans); } #define RPU_PWU_ITER_TEMPLATE(NUM_T, XITERT, DITERT) \ template void PulsedWeightUpdater<NUM_T>::update( \ XITERT, DITERT, NUM_T *, AbstractRPUDeviceCuda<NUM_T> *, \ const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, const int, const bool, const bool); \ template void PulsedWeightUpdater<NUM_T>::doFPupdate( \ XITERT, DITERT, NUM_T *, const NUM_T, const int, const bool, const bool, const NUM_T); \ template void PulsedWeightUpdater<NUM_T>::doDirectUpdate( \ XITERT, DITERT, AbstractRPUDeviceCuda<NUM_T> *, NUM_T *, const NUM_T, \ const PulsedUpdateMetaParameter<NUM_T> &, const int, const bool, const bool, const NUM_T); \ template void PulsedWeightUpdater<NUM_T>::tuneUpdate( \ pwukp_t<NUM_T> &, pwukpvec_t<NUM_T> &, XITERT, DITERT, NUM_T *, \ PulsedRPUDeviceCudaBase<NUM_T> *, const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, \ const int, const bool, const bool); \ template void PulsedWeightUpdater<NUM_T>::executeUpdate( \ pwukp_t<NUM_T>, XITERT, DITERT, NUM_T *, PulsedRPUDeviceCudaBase<NUM_T> *, \ const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, const int, const bool, const bool); #define TRANSFLOAT(TRANS) TRANS, float template class PulsedWeightUpdater<float>; RPU_PWU_ITER_TEMPLATE(float, IndexReaderTransInputIterator<float>, const float *); RPU_PWU_ITER_TEMPLATE(float, IndexReaderInputIterator<float>, const float *); RPU_PWU_ITER_TEMPLATE(float, const float *, const float *); RPU_PWU_ITER_TEMPLATE( float, IndexReaderTransInputIterator<float>, PermuterTransInputIterator<float>); RPU_PWU_ITER_TEMPLATE(float, const float *, PermuterTransInputIterator<float>); RPU_PWU_ITER_TEMPLATE( float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, SliceInputIterator<TRANSFLOAT(true)>); RPU_PWU_ITER_TEMPLATE( float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, SliceInputIterator<TRANSFLOAT(false)>); RPU_PWU_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(true)>); RPU_PWU_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(false)>); RPU_PWU_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, const float *); RPU_PWU_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, const float *); #undef TRANSFLOAT #ifdef RPU_USE_DOUBLE #define TRANSDOUBLE(TRANS) TRANS, double template class PulsedWeightUpdater<double>; RPU_PWU_ITER_TEMPLATE(double, IndexReaderTransInputIterator<double>, const double *); RPU_PWU_ITER_TEMPLATE(double, IndexReaderInputIterator<double>, const double *); RPU_PWU_ITER_TEMPLATE(double, const double *, const double *); RPU_PWU_ITER_TEMPLATE( double, IndexReaderTransInputIterator<double>, PermuterTransInputIterator<double>); RPU_PWU_ITER_TEMPLATE(double, const double *, PermuterTransInputIterator<double>); RPU_PWU_ITER_TEMPLATE( double, IndexReaderSliceInputIterator<TRANSDOUBLE(true)>, SliceInputIterator<TRANSDOUBLE(true)>); RPU_PWU_ITER_TEMPLATE( double, IndexReaderSliceInputIterator<TRANSDOUBLE(false)>, SliceInputIterator<TRANSDOUBLE(false)>); RPU_PWU_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(true)>); RPU_PWU_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(false)>); RPU_PWU_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(true)>, const double *); RPU_PWU_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(false)>, const double *); #undef TRANSDOUBLE #endif #undef RPU_PWU_ITER_TEMPLATE } // namespace RPU
4058e3e12d5d3b0c9efa58b0c78f0d74afa2e4c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "IntegralImage.h" #include "bitmap_image.hpp" #include <cmath> const float IntegralImage::cR = (float).2989; const float IntegralImage::cG = (float).5870; const float IntegralImage::cB = (float).1140; float IntegralImage::getValue(int y, int x) { return this->Matrix[y][x]; } void IntegralImage::setValue(int y, int x, float value) { this->Matrix[y][x] = value; } IntegralImage::IntegralImage() { this->Width = 0; this->Height = 0; this->Matrix = NULL; } IntegralImage::IntegralImage(int width, int height) { this->Width = width; this->Height = height; hipMalloc((void **) &(this->Matrix), height*sizeof(float *)); float ** temp[height]; hipMemcpy(temp, this->Matrix, height*sizeof(float *), hipMemcpyDeviceToHost); for (int ii = 0; ii < height; ii++) hipMalloc((void **) &(temp[ii]), (width)*sizeof(float)); hipMemcpy(this->Matrix, temp, height*sizeof(float *), hipMemcpyHostToDevice); } IntegralImage::~IntegralImage() { if (Matrix != NULL) { float ** temp[this->Height]; hipMemcpy(temp, this->Matrix, (this->Height)*sizeof(float *), hipMemcpyDeviceToHost); for (int ii = 0; ii < this->Height; ii++) hipFree(temp[ii]); hipFree(this->Matrix); } } /* example kernel */ __global__ void kernelFromImageCols(unsigned char * d_image, float ** d_pic, int Width, int Height, unsigned int row_increment, unsigned int bytes_per_pixel) { float colsum = (float)(0.0); unsigned char red, green, blue; unsigned int row_increment_ = row_increment; unsigned int bytes_per_pixel_ = bytes_per_pixel; int width = Width; int height = Height; const float cR = (float).2989; const float cG = (float).5870; const float cB = (float).1140; unsigned int x = threadIdx.x + blockDim.x*blockIdx.x; if (x >= width) return; for (unsigned int y = 0; y < height; y++) { blue = d_image[(y * row_increment_) + (x * bytes_per_pixel_ + 0)]; green = d_image[(y * row_increment_) + (x * bytes_per_pixel_ + 1)]; red = d_image[(y * row_increment_) + (x * bytes_per_pixel_ + 2)]; colsum += (cR * red + cG * green + cB * blue) / (float)255; d_pic[y][x] = colsum; } } __global__ void kernelFromImageRows(float ** d_pic, int Width, int Height){ int width = Width; int height = Height; int y = threadIdx.x + blockDim.x*blockIdx.x; if (y >= height) return; float rowsum = d_pic[y][0]; for (unsigned int x = 1; x < width; x++) { rowsum += d_pic[y][x]; d_pic[y][x] = rowsum; } } IntegralImage * IntegralImage::FromImage(bitmap_image &h_image) { int ThreadsPerBlock = 256; int BlocksPerGrid; IntegralImage * h_pic = new IntegralImage(h_image.width(), h_image.height()); unsigned char * d_image; hipMalloc((void**) &d_image, (h_image.length_)*sizeof(unsigned char)); hipMemcpy(d_image, h_image.data_, (h_image.length_)*sizeof(unsigned char), hipMemcpyHostToDevice); BlocksPerGrid = ((h_pic->Width / ThreadsPerBlock) + 1); hipLaunchKernelGGL(( kernelFromImageCols) , dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, d_image, h_pic->Matrix, h_pic->Width, h_pic->Height, h_image.row_increment_, h_image.bytes_per_pixel_); hipDeviceSynchronize(); BlocksPerGrid = ((h_pic->Height / ThreadsPerBlock) + 1); hipLaunchKernelGGL(( kernelFromImageRows) , dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, h_pic->Matrix, h_pic->Width, h_pic->Height); hipDeviceSynchronize(); hipFree(d_image); return h_pic; } float IntegralImage::BoxIntegral(int row, int col, int rows, int cols) { // The subtraction by one for row/col is because row/col is inclusive. int r1 = ::min(row, Height) - 1; int c1 = ::min(col, Width) - 1; int r2 = ::min(row + rows, Height) - 1; int c2 = ::min(col + cols, Width) - 1; float A = 0, B = 0, C = 0, D = 0; if (r1 >= 0 && c1 >= 0) A = Matrix[r1][c1]; if (r1 >= 0 && c2 >= 0) B = Matrix[r1][c2]; if (r2 >= 0 && c1 >= 0) C = Matrix[r2][c1]; if (r2 >= 0 && c2 >= 0) D = Matrix[r2][c2]; return ::max((float)0, A - B - C + D); } // Get Haar Wavelet X repsonse float IntegralImage::HaarX(int row, int column, int size) { return BoxIntegral(row - size / 2, column, size, size / 2) - 1 * BoxIntegral(row - size / 2, column - size / 2, size, size / 2); } // Get Haar Wavelet Y repsonse float IntegralImage::HaarY(int row, int column, int size) { return BoxIntegral(row, column - size / 2, size / 2, size) - 1 * BoxIntegral(row - size / 2, column - size / 2, size / 2, size); }
4058e3e12d5d3b0c9efa58b0c78f0d74afa2e4c5.cu
#include "IntegralImage.h" #include "bitmap_image.hpp" #include <cmath> const float IntegralImage::cR = (float).2989; const float IntegralImage::cG = (float).5870; const float IntegralImage::cB = (float).1140; float IntegralImage::getValue(int y, int x) { return this->Matrix[y][x]; } void IntegralImage::setValue(int y, int x, float value) { this->Matrix[y][x] = value; } IntegralImage::IntegralImage() { this->Width = 0; this->Height = 0; this->Matrix = NULL; } IntegralImage::IntegralImage(int width, int height) { this->Width = width; this->Height = height; cudaMalloc((void **) &(this->Matrix), height*sizeof(float *)); float ** temp[height]; cudaMemcpy(temp, this->Matrix, height*sizeof(float *), cudaMemcpyDeviceToHost); for (int ii = 0; ii < height; ii++) cudaMalloc((void **) &(temp[ii]), (width)*sizeof(float)); cudaMemcpy(this->Matrix, temp, height*sizeof(float *), cudaMemcpyHostToDevice); } IntegralImage::~IntegralImage() { if (Matrix != NULL) { float ** temp[this->Height]; cudaMemcpy(temp, this->Matrix, (this->Height)*sizeof(float *), cudaMemcpyDeviceToHost); for (int ii = 0; ii < this->Height; ii++) cudaFree(temp[ii]); cudaFree(this->Matrix); } } /* example kernel */ __global__ void kernelFromImageCols(unsigned char * d_image, float ** d_pic, int Width, int Height, unsigned int row_increment, unsigned int bytes_per_pixel) { float colsum = (float)(0.0); unsigned char red, green, blue; unsigned int row_increment_ = row_increment; unsigned int bytes_per_pixel_ = bytes_per_pixel; int width = Width; int height = Height; const float cR = (float).2989; const float cG = (float).5870; const float cB = (float).1140; unsigned int x = threadIdx.x + blockDim.x*blockIdx.x; if (x >= width) return; for (unsigned int y = 0; y < height; y++) { blue = d_image[(y * row_increment_) + (x * bytes_per_pixel_ + 0)]; green = d_image[(y * row_increment_) + (x * bytes_per_pixel_ + 1)]; red = d_image[(y * row_increment_) + (x * bytes_per_pixel_ + 2)]; colsum += (cR * red + cG * green + cB * blue) / (float)255; d_pic[y][x] = colsum; } } __global__ void kernelFromImageRows(float ** d_pic, int Width, int Height){ int width = Width; int height = Height; int y = threadIdx.x + blockDim.x*blockIdx.x; if (y >= height) return; float rowsum = d_pic[y][0]; for (unsigned int x = 1; x < width; x++) { rowsum += d_pic[y][x]; d_pic[y][x] = rowsum; } } IntegralImage * IntegralImage::FromImage(bitmap_image &h_image) { int ThreadsPerBlock = 256; int BlocksPerGrid; IntegralImage * h_pic = new IntegralImage(h_image.width(), h_image.height()); unsigned char * d_image; cudaMalloc((void**) &d_image, (h_image.length_)*sizeof(unsigned char)); cudaMemcpy(d_image, h_image.data_, (h_image.length_)*sizeof(unsigned char), cudaMemcpyHostToDevice); BlocksPerGrid = ((h_pic->Width / ThreadsPerBlock) + 1); kernelFromImageCols <<< BlocksPerGrid, ThreadsPerBlock >>> (d_image, h_pic->Matrix, h_pic->Width, h_pic->Height, h_image.row_increment_, h_image.bytes_per_pixel_); cudaDeviceSynchronize(); BlocksPerGrid = ((h_pic->Height / ThreadsPerBlock) + 1); kernelFromImageRows <<< BlocksPerGrid, ThreadsPerBlock >>> (h_pic->Matrix, h_pic->Width, h_pic->Height); cudaDeviceSynchronize(); cudaFree(d_image); return h_pic; } float IntegralImage::BoxIntegral(int row, int col, int rows, int cols) { // The subtraction by one for row/col is because row/col is inclusive. int r1 = std::min(row, Height) - 1; int c1 = std::min(col, Width) - 1; int r2 = std::min(row + rows, Height) - 1; int c2 = std::min(col + cols, Width) - 1; float A = 0, B = 0, C = 0, D = 0; if (r1 >= 0 && c1 >= 0) A = Matrix[r1][c1]; if (r1 >= 0 && c2 >= 0) B = Matrix[r1][c2]; if (r2 >= 0 && c1 >= 0) C = Matrix[r2][c1]; if (r2 >= 0 && c2 >= 0) D = Matrix[r2][c2]; return std::max((float)0, A - B - C + D); } // Get Haar Wavelet X repsonse float IntegralImage::HaarX(int row, int column, int size) { return BoxIntegral(row - size / 2, column, size, size / 2) - 1 * BoxIntegral(row - size / 2, column - size / 2, size, size / 2); } // Get Haar Wavelet Y repsonse float IntegralImage::HaarY(int row, int column, int size) { return BoxIntegral(row, column - size / 2, size / 2, size) - 1 * BoxIntegral(row - size / 2, column - size / 2, size / 2, size); }
2806d4bfff6d64c1501b314c3f894755c65b16f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ //////////////////////////////////////////////////////////////////////////////// // Global types //////////////////////////////////////////////////////////////////////////////// #include <stdlib.h> #include <stdio.h> #include <helper_cuda.h> #include <hiprand/hiprand_kernel.h> #include "MonteCarlo_common.h" //////////////////////////////////////////////////////////////////////////////// // Helper reduction template // Please see the "reduction" CUDA Sample for more information //////////////////////////////////////////////////////////////////////////////// #include "MonteCarlo_reduction.cuh" //////////////////////////////////////////////////////////////////////////////// // Internal GPU-side data structures //////////////////////////////////////////////////////////////////////////////// #define MAX_OPTIONS (1024*1024) //Preprocessed input option data typedef struct { real S; real X; real MuByT; real VBySqrtT; } __TOptionData; //////////////////////////////////////////////////////////////////////////////// // Overloaded shortcut payoff functions for different precision modes //////////////////////////////////////////////////////////////////////////////// __device__ inline float endCallValue(float S, float X, float r, float MuByT, float VBySqrtT) { float callValue = S * __expf(MuByT + VBySqrtT * r) - X; return (callValue > 0.0F) ? callValue : 0.0F; } __device__ inline double endCallValue(double S, double X, double r, double MuByT, double VBySqrtT) { double callValue = S * exp(MuByT + VBySqrtT * r) - X; return (callValue > 0.0) ? callValue : 0.0; } #define THREAD_N 256 //////////////////////////////////////////////////////////////////////////////// // This kernel computes the integral over all paths using a single thread block // per option. It is fastest when the number of thread blocks times the work per // block is high enough to keep the GPU busy. //////////////////////////////////////////////////////////////////////////////// static __global__ void MonteCarloOneBlockPerOption( hiprandState_t * __restrict rngStates, const __TOptionData * __restrict d_OptionData, __TOptionValue * __restrict d_CallValue, int pathN, int optionN) { const int SUM_N = THREAD_N; __shared__ real s_SumCall[SUM_N]; __shared__ real s_Sum2Call[SUM_N]; // determine global thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; // Copy random number state to local memory for efficiency hiprandState_t localState = rngStates[tid]; for(int optionIndex = blockIdx.x; optionIndex < optionN; optionIndex += gridDim.x) { const real S = d_OptionData[optionIndex].S; const real X = d_OptionData[optionIndex].X; const real MuByT = d_OptionData[optionIndex].MuByT; const real VBySqrtT = d_OptionData[optionIndex].VBySqrtT; //Cycle through the entire samples array: //derive end stock price for each path //accumulate partial integrals into intermediate shared memory buffer for (int iSum = threadIdx.x; iSum < SUM_N; iSum += blockDim.x) { __TOptionValue sumCall = {0, 0}; #pragma unroll 8 for (int i = iSum; i < pathN; i += SUM_N) { real r = hiprand_normal(&localState); real callValue = endCallValue(S, X, r, MuByT, VBySqrtT); sumCall.Expected += callValue; sumCall.Confidence += callValue * callValue; } s_SumCall[iSum] = sumCall.Expected; s_Sum2Call[iSum] = sumCall.Confidence; } //Reduce shared memory accumulators //and write final result to global memory sumReduce<real, SUM_N, THREAD_N>(s_SumCall, s_Sum2Call); if (threadIdx.x == 0) { __TOptionValue t = {s_SumCall[0], s_Sum2Call[0]}; d_CallValue[optionIndex] = t; } } } static __global__ void rngSetupStates( hiprandState_t *rngState, int device_id) { // determine global thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; // Each threadblock gets different seed, // Threads within a threadblock get different sequence numbers hiprand_init(blockIdx.x + gridDim.x * device_id, threadIdx.x, 0, &rngState[tid]); } //////////////////////////////////////////////////////////////////////////////// // Host-side interface to GPU Monte Carlo //////////////////////////////////////////////////////////////////////////////// extern "C" void initMonteCarloGPU(TOptionPlan *plan) { checkCudaErrors(hipMalloc(&plan->d_OptionData, sizeof(__TOptionData)*(plan->optionCount))); checkCudaErrors(hipMalloc(&plan->d_CallValue, sizeof(__TOptionValue)*(plan->optionCount))); checkCudaErrors(hipHostMalloc(&plan->h_OptionData, sizeof(__TOptionData)*(plan->optionCount))); //Allocate internal device memory checkCudaErrors(hipHostMalloc(&plan->h_CallValue, sizeof(__TOptionValue)*(plan->optionCount))); //Allocate states for pseudo random number generators checkCudaErrors(hipMalloc((void **) &plan->rngStates, plan->gridSize * THREAD_N * sizeof(hiprandState_t))); // place each device pathN random numbers apart on the random number sequence hipLaunchKernelGGL(( rngSetupStates), dim3(plan->gridSize), dim3(THREAD_N), 0, 0, plan->rngStates, plan->device); getLastCudaError("rngSetupStates kernel failed.\n"); } //Compute statistics and deallocate internal device memory extern "C" void closeMonteCarloGPU(TOptionPlan *plan) { for (int i = 0; i < plan->optionCount; i++) { const double RT = plan->optionData[i].R * plan->optionData[i].T; const double sum = plan->h_CallValue[i].Expected; const double sum2 = plan->h_CallValue[i].Confidence; const double pathN = plan->pathN; //Derive average from the total sum and discount by riskfree rate plan->callValue[i].Expected = (float)(exp(-RT) * sum / pathN); //Standard deviation double stdDev = sqrt((pathN * sum2 - sum * sum)/ (pathN * (pathN - 1))); //Confidence width; in 95% of all cases theoretical value lies within these borders plan->callValue[i].Confidence = (float)(exp(-RT) * 1.96 * stdDev / sqrt(pathN)); } checkCudaErrors(hipFree(plan->rngStates)); checkCudaErrors(hipHostFree(plan->h_CallValue)); checkCudaErrors(hipHostFree(plan->h_OptionData)); checkCudaErrors(hipFree(plan->d_CallValue)); checkCudaErrors(hipFree(plan->d_OptionData)); } //Main computations extern "C" void MonteCarloGPU(TOptionPlan *plan, hipStream_t stream) { __TOptionValue *h_CallValue = plan->h_CallValue; if (plan->optionCount <= 0 || plan->optionCount > MAX_OPTIONS) { printf("MonteCarloGPU(): bad option count.\n"); return; } __TOptionData * h_OptionData = (__TOptionData *)plan->h_OptionData; for (int i = 0; i < plan->optionCount; i++) { const double T = plan->optionData[i].T; const double R = plan->optionData[i].R; const double V = plan->optionData[i].V; const double MuByT = (R - 0.5 * V * V) * T; const double VBySqrtT = V * sqrt(T); h_OptionData[i].S = (real)plan->optionData[i].S; h_OptionData[i].X = (real)plan->optionData[i].X; h_OptionData[i].MuByT = (real)MuByT; h_OptionData[i].VBySqrtT = (real)VBySqrtT; } checkCudaErrors(hipMemcpyAsync( plan->d_OptionData, h_OptionData, plan->optionCount * sizeof(__TOptionData), hipMemcpyHostToDevice, stream )); hipLaunchKernelGGL(( MonteCarloOneBlockPerOption), dim3(plan->gridSize), dim3(THREAD_N), 0, stream, plan->rngStates, (__TOptionData *)(plan->d_OptionData), (__TOptionValue *)(plan->d_CallValue), plan->pathN, plan->optionCount ); getLastCudaError("MonteCarloOneBlockPerOption() execution failed\n"); checkCudaErrors(hipMemcpyAsync( h_CallValue, plan->d_CallValue, plan->optionCount * sizeof(__TOptionValue), hipMemcpyDeviceToHost, stream )); //hipDeviceSynchronize(); }
2806d4bfff6d64c1501b314c3f894755c65b16f6.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ //////////////////////////////////////////////////////////////////////////////// // Global types //////////////////////////////////////////////////////////////////////////////// #include <stdlib.h> #include <stdio.h> #include <helper_cuda.h> #include <curand_kernel.h> #include "MonteCarlo_common.h" //////////////////////////////////////////////////////////////////////////////// // Helper reduction template // Please see the "reduction" CUDA Sample for more information //////////////////////////////////////////////////////////////////////////////// #include "MonteCarlo_reduction.cuh" //////////////////////////////////////////////////////////////////////////////// // Internal GPU-side data structures //////////////////////////////////////////////////////////////////////////////// #define MAX_OPTIONS (1024*1024) //Preprocessed input option data typedef struct { real S; real X; real MuByT; real VBySqrtT; } __TOptionData; //////////////////////////////////////////////////////////////////////////////// // Overloaded shortcut payoff functions for different precision modes //////////////////////////////////////////////////////////////////////////////// __device__ inline float endCallValue(float S, float X, float r, float MuByT, float VBySqrtT) { float callValue = S * __expf(MuByT + VBySqrtT * r) - X; return (callValue > 0.0F) ? callValue : 0.0F; } __device__ inline double endCallValue(double S, double X, double r, double MuByT, double VBySqrtT) { double callValue = S * exp(MuByT + VBySqrtT * r) - X; return (callValue > 0.0) ? callValue : 0.0; } #define THREAD_N 256 //////////////////////////////////////////////////////////////////////////////// // This kernel computes the integral over all paths using a single thread block // per option. It is fastest when the number of thread blocks times the work per // block is high enough to keep the GPU busy. //////////////////////////////////////////////////////////////////////////////// static __global__ void MonteCarloOneBlockPerOption( curandState * __restrict rngStates, const __TOptionData * __restrict d_OptionData, __TOptionValue * __restrict d_CallValue, int pathN, int optionN) { const int SUM_N = THREAD_N; __shared__ real s_SumCall[SUM_N]; __shared__ real s_Sum2Call[SUM_N]; // determine global thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; // Copy random number state to local memory for efficiency curandState localState = rngStates[tid]; for(int optionIndex = blockIdx.x; optionIndex < optionN; optionIndex += gridDim.x) { const real S = d_OptionData[optionIndex].S; const real X = d_OptionData[optionIndex].X; const real MuByT = d_OptionData[optionIndex].MuByT; const real VBySqrtT = d_OptionData[optionIndex].VBySqrtT; //Cycle through the entire samples array: //derive end stock price for each path //accumulate partial integrals into intermediate shared memory buffer for (int iSum = threadIdx.x; iSum < SUM_N; iSum += blockDim.x) { __TOptionValue sumCall = {0, 0}; #pragma unroll 8 for (int i = iSum; i < pathN; i += SUM_N) { real r = curand_normal(&localState); real callValue = endCallValue(S, X, r, MuByT, VBySqrtT); sumCall.Expected += callValue; sumCall.Confidence += callValue * callValue; } s_SumCall[iSum] = sumCall.Expected; s_Sum2Call[iSum] = sumCall.Confidence; } //Reduce shared memory accumulators //and write final result to global memory sumReduce<real, SUM_N, THREAD_N>(s_SumCall, s_Sum2Call); if (threadIdx.x == 0) { __TOptionValue t = {s_SumCall[0], s_Sum2Call[0]}; d_CallValue[optionIndex] = t; } } } static __global__ void rngSetupStates( curandState *rngState, int device_id) { // determine global thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; // Each threadblock gets different seed, // Threads within a threadblock get different sequence numbers curand_init(blockIdx.x + gridDim.x * device_id, threadIdx.x, 0, &rngState[tid]); } //////////////////////////////////////////////////////////////////////////////// // Host-side interface to GPU Monte Carlo //////////////////////////////////////////////////////////////////////////////// extern "C" void initMonteCarloGPU(TOptionPlan *plan) { checkCudaErrors(cudaMalloc(&plan->d_OptionData, sizeof(__TOptionData)*(plan->optionCount))); checkCudaErrors(cudaMalloc(&plan->d_CallValue, sizeof(__TOptionValue)*(plan->optionCount))); checkCudaErrors(cudaMallocHost(&plan->h_OptionData, sizeof(__TOptionData)*(plan->optionCount))); //Allocate internal device memory checkCudaErrors(cudaMallocHost(&plan->h_CallValue, sizeof(__TOptionValue)*(plan->optionCount))); //Allocate states for pseudo random number generators checkCudaErrors(cudaMalloc((void **) &plan->rngStates, plan->gridSize * THREAD_N * sizeof(curandState))); // place each device pathN random numbers apart on the random number sequence rngSetupStates<<<plan->gridSize, THREAD_N>>>(plan->rngStates, plan->device); getLastCudaError("rngSetupStates kernel failed.\n"); } //Compute statistics and deallocate internal device memory extern "C" void closeMonteCarloGPU(TOptionPlan *plan) { for (int i = 0; i < plan->optionCount; i++) { const double RT = plan->optionData[i].R * plan->optionData[i].T; const double sum = plan->h_CallValue[i].Expected; const double sum2 = plan->h_CallValue[i].Confidence; const double pathN = plan->pathN; //Derive average from the total sum and discount by riskfree rate plan->callValue[i].Expected = (float)(exp(-RT) * sum / pathN); //Standard deviation double stdDev = sqrt((pathN * sum2 - sum * sum)/ (pathN * (pathN - 1))); //Confidence width; in 95% of all cases theoretical value lies within these borders plan->callValue[i].Confidence = (float)(exp(-RT) * 1.96 * stdDev / sqrt(pathN)); } checkCudaErrors(cudaFree(plan->rngStates)); checkCudaErrors(cudaFreeHost(plan->h_CallValue)); checkCudaErrors(cudaFreeHost(plan->h_OptionData)); checkCudaErrors(cudaFree(plan->d_CallValue)); checkCudaErrors(cudaFree(plan->d_OptionData)); } //Main computations extern "C" void MonteCarloGPU(TOptionPlan *plan, cudaStream_t stream) { __TOptionValue *h_CallValue = plan->h_CallValue; if (plan->optionCount <= 0 || plan->optionCount > MAX_OPTIONS) { printf("MonteCarloGPU(): bad option count.\n"); return; } __TOptionData * h_OptionData = (__TOptionData *)plan->h_OptionData; for (int i = 0; i < plan->optionCount; i++) { const double T = plan->optionData[i].T; const double R = plan->optionData[i].R; const double V = plan->optionData[i].V; const double MuByT = (R - 0.5 * V * V) * T; const double VBySqrtT = V * sqrt(T); h_OptionData[i].S = (real)plan->optionData[i].S; h_OptionData[i].X = (real)plan->optionData[i].X; h_OptionData[i].MuByT = (real)MuByT; h_OptionData[i].VBySqrtT = (real)VBySqrtT; } checkCudaErrors(cudaMemcpyAsync( plan->d_OptionData, h_OptionData, plan->optionCount * sizeof(__TOptionData), cudaMemcpyHostToDevice, stream )); MonteCarloOneBlockPerOption<<<plan->gridSize, THREAD_N, 0, stream>>>( plan->rngStates, (__TOptionData *)(plan->d_OptionData), (__TOptionValue *)(plan->d_CallValue), plan->pathN, plan->optionCount ); getLastCudaError("MonteCarloOneBlockPerOption() execution failed\n"); checkCudaErrors(cudaMemcpyAsync( h_CallValue, plan->d_CallValue, plan->optionCount * sizeof(__TOptionValue), cudaMemcpyDeviceToHost, stream )); //cudaDeviceSynchronize(); }
2bd96bc78cc18aea4946076a7cb091bd76a41428.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <GPUMatrix/GPUMatrix.h> #include <stdio.h> namespace { __global__ void H() { printf("Hello World from GPU! %d %d\n", blockIdx.x, threadIdx.x); } } namespace GPUMatrix { void HelloThreadIdx() {hipLaunchKernelGGL(( H), dim3(2), dim3(4), 0, 0, ); } } // namespace GPUMatrix
2bd96bc78cc18aea4946076a7cb091bd76a41428.cu
#include <GPUMatrix/GPUMatrix.h> #include <stdio.h> namespace { __global__ void H() { printf("Hello World from GPU! %d %d\n", blockIdx.x, threadIdx.x); } } namespace GPUMatrix { void HelloThreadIdx() { H<<<2, 4>>>(); } } // namespace GPUMatrix
6aa0e58ddacf72b7c2e313915e0820f54048e868.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "router.h" //#define wrap_ring(index, add, ring_size) (uint16_t) ((index + add) & (ring_size -1)) /*********************************************************************** * Static variables for the input/output files ***********************************************************************/ static FILE *routingTable; static FILE *inputFile; static FILE *outputFile; /*********************************************************************** * Variables related to intializing functions below ***********************************************************************/ int ec; // In this variable we save the error codes that produces some methods in "io.c" /** VARIABLES RELATED with the tables **/ short *mtable; // Main Table short *stable; // Second Table unsigned short extended_IPs; // Number of networks using the extended table. /** VARIABLES RELATED WITH THE TABLES INIZIALIZATION **/ long ip_index; uint32_t *IP_addr; int *aux_prefixLength; int *aux_outInterface; /*** VARIABLES RELACIONADAS CON LOOKUP */ //struct timeval start, end; /*** VARIABLES RELATED WITH THE PERFORMANCE INFORMATION*/ int *processedPackets; double *totalTableAccesses; double *totalPacketProcessingTime; /*** VARIABLES RELATED WITH GPU NF*/ short *d_mtable; short *d_stable; extern int *pkt_cnt; extern unsigned char *pktBuf; extern int *nbBoard; extern int *statusBoard; /*********************************************************************** * ip_v4 lookup function for GPU (18.08.31) ***********************************************************************/ __device__ void d_interface_lookup(uint32_t *IP_lookup, short *d_mtable, short *d_stable) { unsigned short interface; interface = d_mtable[*IP_lookup>>8]; if(interface>>16 != 0) { interface = d_stable[(interface & 0x7FFF)*256 + (*IP_lookup & 0x000000FF)]; } //printf("[CKJUNG]__interface: %d\n", interface); } /*********************************************************************** * GPU_NF#1: Router (18.09.17) ***********************************************************************/ __global__ void router(unsigned char * pktBuf, int *nbBoard, int *statusBoard, short* d_mtable, short* d_stable, int* pkt_cnt) { __shared__ uint8_t chapter_idx; unsigned char * buf = NULL; __shared__ int nb; if(threadIdx.x == 0){ chapter_idx = 0; nb = 0; } __syncthreads(); while(true) { //Persistent Kernel, trick for memory synch, access, blahblah? #if 1 ////////////////////////////////// NF's code Here /////////////////////////////////// __syncthreads(); if(threadIdx.x == 0 && statusBoard[chapter_idx] == -1){ nb = nbBoard[chapter_idx]; } __syncthreads(); if(threadIdx.x < nb){ //printf("[Router] tid: %d\n", threadIdx.x); buf = &pktBuf[chapter_idx * PKT_BATCH_SIZE + threadIdx.x * PKT_SIZE]; #if 1 struct iphdr* iph = (struct iphdr*)(buf + sizeof(struct ethhdr)); uint16_t* _daddr = (uint16_t*)&(iph->daddr); uint32_t daddr = 0; memcpy(&daddr, _daddr, 4); d_interface_lookup(&daddr, d_mtable, d_stable); #endif atomicAdd(&statusBoard[chapter_idx], 1); atomicAdd(pkt_cnt, 1); if(threadIdx.x == 0){ atomicAdd(&statusBoard[chapter_idx], 1); nb = 0; } } __syncthreads(); if(threadIdx.x == 0){ chapter_idx++; chapter_idx %= CHAPTER_NUM; } ////////////////////////////////// NF's code Here /////////////////////////////////// #endif } if(threadIdx.x == 0) printf("End of gpu_router!\n"); } /******************************************************************** * Initalize file descriptors * * routingTableName contains FIB info (argv[1] of main function) * inputFileName contains IP addresses (argv[2] of main function) * ***********************************************************************/ int initializeIO(char *routingTableName, char *inputFileName){ char outputFileName[100]; routingTable = fopen(routingTableName, "r"); printf("%s\n", routingTableName); if (routingTable == NULL) return ROUTING_TABLE_NOT_FOUND; inputFile = fopen(inputFileName, "r"); if (inputFile == NULL) { fclose(routingTable); return INPUT_FILE_NOT_FOUND; } sprintf(outputFileName, "%s%s", inputFileName, OUTPUT_NAME); outputFile = fopen(outputFileName, "w"); if (outputFile == NULL) { fclose(routingTable); fclose(inputFile); return CANNOT_CREATE_OUTPUT; } return OK; } /*********************************************************************** * Close the input/output files ***********************************************************************/ void freeIO() { fclose(inputFile); fclose(outputFile); fclose(routingTable); } /*********************************************************************** * Write explanation for error identifier (verbose mode) ***********************************************************************/ void printIOExplanationError(int result){ switch(result) { case ROUTING_TABLE_NOT_FOUND: printf("Routing table not found\n"); exit(0); case INPUT_FILE_NOT_FOUND: printf("Input file not found\n"); exit(0); case BAD_ROUTING_TABLE: printf("Bad routing table structure\n"); exit(0); case BAD_INPUT_FILE: printf("Bad input file structure\n"); exit(0); case PARSE_ERROR: printf("Parse error\n"); exit(0); case CANNOT_CREATE_OUTPUT: printf("Cannot create output file\n"); exit(0); case REACHED_EOF: printf("Reached End Of File\n"); exit(0); default: printf("Unknown error\n"); exit(0); } exit(0); } /*********************************************************************** * Read one entry in the FIB * * It should be noted that prefix, prefixLength and outInterface are * pointers since they are used as output parameters * ***********************************************************************/ int readFIBLine(uint32_t *prefix, int *prefixLength, int *outInterface){ int n[4], result; result = fscanf(routingTable, "%i.%i.%i.%i/%i\t%i\n", &n[0], &n[1], &n[2], &n[3], prefixLength, outInterface); // CKJUNG, 18.08.21 // printf("\nn0: %d, n1: %d, n2: %d, n3: %d\n", n[0], n[1], n[2], n[3]); // ~ CKJUNG if (result == EOF) return REACHED_EOF; else if (result != 6) return BAD_ROUTING_TABLE; else{ //remember that pentium architecture is little endian *prefix = (n[0]<<24) + (n[1]<<16) + (n[2]<<8) + n[3]; //*prefix = n[0]*pow(2,24) + n[1]*pow(2,16) + n[2]*pow(2,8) + n[3]; // CKJUNG, 18.08.21 //printf("prefix : %d\n", *prefix); // ~ CKJUNG return OK; } } /*********************************************************************** * Read one entry in the input packet file * * Again, it should be noted that IPAddress is a pointer since it is used * as output parameter * ***********************************************************************/ int readInputPacketFileLine(uint32_t *IPAddress){ int n[4], result; result = fscanf(inputFile, "%i.%i.%i.%i\n", &n[0], &n[1], &n[2], &n[3]); if (result == EOF) return REACHED_EOF; else if (result != 4) return BAD_INPUT_FILE; else{ //remember that pentium architecture is little endian *IPAddress = (n[0]<<24) + (n[1]<<16) + (n[2]<<8) + n[3]; //*IPAddress = n[0]*pow(2,24) + n[1]*pow(2,16) + n[2]*pow(2,8) + n[3]; return OK; } } /*********************************************************************** * Print a line to the output file * * gettimeofday(&initialTime, NULL) must be called right before the lookup function * * gettimeofday(&finalTime, NULL) must be called right after the lookup function * * The lookup function must return (either as output parameter or as return value) * the number of hash tables that have been accessed for every IP address * ***********************************************************************/ void printOutputLine(uint32_t IPAddress, int outInterface, struct timeval *initialTime, struct timeval *finalTime, double *searchingTime, int numberOfHashtables) { unsigned long sec, usec; usec = finalTime->tv_usec - initialTime->tv_usec; if (usec > finalTime->tv_usec) initialTime->tv_sec += 1; sec = finalTime->tv_sec - initialTime->tv_sec; *searchingTime = 1000000*sec + usec; //remember that output interface equals 0 means no matching //remember that if no matching but default route is specified in the FIB, the default output interface //must be stored to avoid dropping the packet (i.e., MISS) if (!outInterface){ fprintf(outputFile,"%i.%i.%i.%i;%s;%i;%.0lf\n",IPAddress >> 24, (IPAddress >> 16) & 0x000000ff, (IPAddress >> 8) & 0x000000ff, IPAddress & 0x000000ff , "MISS",numberOfHashtables, *searchingTime); } else{ fprintf(outputFile,"%i.%i.%i.%i;%i;%i;%.0lf\n",IPAddress >> 24, (IPAddress >> 16) & 0x000000ff, (IPAddress >> 8) & 0x000000ff, IPAddress & 0x000000ff , outInterface,numberOfHashtables, *searchingTime); } } /*********************************************************************** * Print memory and CPU time * * For more info: man getrusage * ***********************************************************************/ void printMemoryTimeUsage(){ float user_time, system_time; long int memory; struct rusage usage; if (getrusage (RUSAGE_SELF, &usage)){ printf("Resource measurement failed.\n"); } else{ user_time = (float)usage.ru_utime.tv_sec+(float)usage.ru_utime.tv_usec/1000000; system_time = (float)usage.ru_stime.tv_sec+(float)usage.ru_stime.tv_usec/1000000; memory = usage.ru_maxrss; fprintf(outputFile, "Memory (Kbytes) = %ld\n", memory ); fprintf(outputFile, "CPU Time (secs)= %.6f\n\n", user_time+system_time); } } /*********************************************************************** * Print execution summary to the output file * * It should be noted that: * *averageTableAccesses = totalTableAccesses/processedPackets * *averagePacketProcessingTime = totalPacketProcessingTime/processedPackets * ***********************************************************************/ void printSummary(int processedPackets, double averageTableAccesses, double averagePacketProcessingTime){ fprintf(outputFile, "\nPackets processed= %i\n", processedPackets); fprintf(outputFile, "Average table accesses= %.2lf\n", averageTableAccesses); fprintf(outputFile,"Average packet processing time (usecs)= %.2lf\n", averagePacketProcessingTime); printMemoryTimeUsage(); } void initializeFIB() { IP_addr = (uint32_t*)calloc(1,sizeof(int)); aux_prefixLength = (int*)calloc(1,sizeof(int)); aux_outInterface = (int*)calloc(1,sizeof(int)); //Now we have the prefix, the ip and the interface ec = readFIBLine(IP_addr, aux_prefixLength, aux_outInterface); while(ec == 0){ //WHILE NOT EOF OR ANOTHER TYPE OF ERROR long int number_of_hosts = 0; // We calculate the number of hosts affected by the mask // 2 24 - PREFIJO if(*aux_prefixLength <= 24){ number_of_hosts = pow(2,24 - *aux_prefixLength); for(ip_index = 0; ip_index < number_of_hosts; ip_index++) { mtable[(*IP_addr>>8) + ip_index] = *aux_outInterface; } } else{ number_of_hosts = pow(2,32 - *aux_prefixLength); if(mtable[*IP_addr>>8]>>15 == 0) { // 1. REALLOC MEMORY, we reserve 256 more chunks for the new interfaces stable = (short*)realloc(stable, 256*(extended_IPs + 1)*2); // 2. COPY FROM MTABLE TO STABLE // recorremos todo el rango de IP's del ultimo byte de la IP, copiando lo anterior for(ip_index = 0; ip_index <= 255; ip_index++) { stable[extended_IPs*256 + ip_index] = mtable[*IP_addr>>8]; } // 3. UPDATE MTABLE VALUE WITH THE INDEX OF STABLE // We write the "index" to the address in the stable and the bit 1 in the 16th position (0b1000000000000000) mtable[*IP_addr>>8] = extended_IPs | 0x8000; // 4. POPULATE THE STABLE CHUNK WITH THE SPECIFIED NEW ADDRESS for(ip_index = (*IP_addr & 0xFF); ip_index < number_of_hosts + (*IP_addr & 0xFF); ip_index++) { stable[extended_IPs*256 + ip_index] = *aux_outInterface; } extended_IPs++; } else{ // If it already exists a chunk for this Ip range inside stable for(ip_index = (*IP_addr & 0xFF); ip_index < number_of_hosts + (*IP_addr & 0xFF); ip_index++) { stable[(mtable[*IP_addr>>8] & 0x7FFF)*256 + ip_index] = *aux_outInterface; } } } //Now we get another IP, interface and interface ec = readFIBLine(IP_addr,aux_prefixLength,aux_outInterface); } free(IP_addr); free(aux_prefixLength); free(aux_outInterface); } /** * [Look for an IP address inside the main table and secundary table stored in RAM] * Input: *IP_lookup * Output. *interface *ntables */ void interface_lookup(uint32_t *IP_lookup, short int *ntables,unsigned short *interface) { *interface = mtable[*IP_lookup>>8]; if(*interface>>15 == 0) { *ntables = 1; return; } else { *ntables = 2; *interface = stable[(*interface & 0x7FFF)*256 + (*IP_lookup & 0x000000FF)]; // 0x7fff = 0b0111111111111111 to adquire just the address to the 2nd table return; } return; } /** * [Perform routing process, going through the file and looking for the best Interface for each IP] * * Output: *processedPackets *totalTableAccesses *totalPacketProcessingTime */ void compute_routes() { uint32_t *IP_lookup = (uint32_t*)calloc(1,sizeof(uint32_t)); unsigned short *interface = (unsigned short*)calloc(1,sizeof(unsigned short)); double *searching_time = (double*)calloc(1,sizeof(double)); short int *number_of_tables = (short int*)calloc(1,sizeof(short int)); ec = readInputPacketFileLine(IP_lookup); while(ec == 0) { //gettimeofday(&start, NULL); interface_lookup(IP_lookup,number_of_tables, interface); //gettimeofday(&end, NULL); //printOutputLine(*IP_lookup, *interface, &start, &end,searching_time, *number_of_tables); *processedPackets = *processedPackets + 1; *totalTableAccesses = *totalTableAccesses + *number_of_tables; *totalPacketProcessingTime = *totalPacketProcessingTime + *searching_time; ec = readInputPacketFileLine(IP_lookup); } free(IP_lookup); free(interface); free(searching_time); free(number_of_tables); } extern "C" void initialize_router(void) { // CKJUNG, 18.08.22 [NF #1:IP lookup] Setting RIB ///////////////////////////////////////////////////// //short *d_mtable; //short *d_stable; printf("____[Initialize]__NF #1__Router__\n"); ASSERTRT(hipMalloc((void**)&d_mtable, MTABLE_ENTRIES_LENGTH*sizeof(short))); ASSERT_CUDA(hipMemset(d_mtable, 0, MTABLE_ENTRIES_LENGTH*sizeof(short))); mtable = (short*)calloc(MTABLE_ENTRIES_LENGTH, sizeof(short)); processedPackets = (int*)calloc(1, sizeof(int)); totalTableAccesses = (double*)calloc(1, sizeof(double)); totalPacketProcessingTime = (double*)calloc(1, sizeof(double)); ec = 0; extended_IPs = 0; ec = initializeIO((char*)"./apps/lib/ck_table", (char*)"./apps/lib/p2"); // Initialize Input if(ec != 0){ printf("\nERROR: \n\t"); printIOExplanationError(ec); //return -1; } initializeFIB(); // CKJUNG, size of stable is fixed after initializeFIB ASSERTRT(hipMalloc((void**)&d_stable, 256*(extended_IPs + 1)*2)); ASSERT_CUDA(hipMemset(d_stable, 0, 256*(extended_IPs + 1)*2)); printf("[CKJUNG] initializeFIB() done.\n"); #if 1 // CKJUNG, copy Routing table from DRAM --> GDDR, Here! hipError_t mtable_err = hipMemcpy(d_mtable, mtable, MTABLE_ENTRIES_LENGTH*sizeof(short), hipMemcpyHostToDevice); hipError_t stable_err = hipMemcpy(d_stable, stable, 256*(extended_IPs + 1)*2, hipMemcpyHostToDevice); if(mtable_err != hipSuccess || stable_err != hipSuccess) { printf("[Error] hipMemcpy for \"mtable\" or \"stable\" has failed.\n"); }else{ START_GRN printf("[Router] Routing table (m-table & s-table) is ready.\n"); END } #endif compute_routes(); printf("[CKJUNG] compute_routes() done. please check \"[InputFileName].out\"\n"); // printSummary(*processedPackets, (*totalTableAccesses / *processedPackets), (*totalPacketProcessingTime / *processedPackets)); freeIO();/* Freeing Resources */ free(mtable); free(stable); free(processedPackets); free(totalTableAccesses); free(totalPacketProcessingTime); hipStream_t cuda_stream2; ASSERT_CUDA(hipStreamCreateWithFlags(&cuda_stream2,hipStreamNonBlocking)); printf("NF#1: Router\n"); hipLaunchKernelGGL(( router), dim3(ROUTER_TB_NUM), dim3(ROUTER_T_NUM), 0, cuda_stream2 , pktBuf, nbBoard, statusBoard, d_mtable, d_stable, pkt_cnt); START_GRN printf("[Done]____[Initialize]__NF #1__Router__\n"); END // hipDeviceSynchronize(); //return 0; } void finalize_router(void) { ASSERT_CUDA(hipFree(d_mtable)); ASSERT_CUDA(hipFree(d_stable)); }
6aa0e58ddacf72b7c2e313915e0820f54048e868.cu
#include "router.h" //#define wrap_ring(index, add, ring_size) (uint16_t) ((index + add) & (ring_size -1)) /*********************************************************************** * Static variables for the input/output files ***********************************************************************/ static FILE *routingTable; static FILE *inputFile; static FILE *outputFile; /*********************************************************************** * Variables related to intializing functions below ***********************************************************************/ int ec; // In this variable we save the error codes that produces some methods in "io.c" /** VARIABLES RELATED with the tables **/ short *mtable; // Main Table short *stable; // Second Table unsigned short extended_IPs; // Number of networks using the extended table. /** VARIABLES RELATED WITH THE TABLES INIZIALIZATION **/ long ip_index; uint32_t *IP_addr; int *aux_prefixLength; int *aux_outInterface; /*** VARIABLES RELACIONADAS CON LOOKUP */ //struct timeval start, end; /*** VARIABLES RELATED WITH THE PERFORMANCE INFORMATION*/ int *processedPackets; double *totalTableAccesses; double *totalPacketProcessingTime; /*** VARIABLES RELATED WITH GPU NF*/ short *d_mtable; short *d_stable; extern int *pkt_cnt; extern unsigned char *pktBuf; extern int *nbBoard; extern int *statusBoard; /*********************************************************************** * ip_v4 lookup function for GPU (18.08.31) ***********************************************************************/ __device__ void d_interface_lookup(uint32_t *IP_lookup, short *d_mtable, short *d_stable) { unsigned short interface; interface = d_mtable[*IP_lookup>>8]; if(interface>>16 != 0) { interface = d_stable[(interface & 0x7FFF)*256 + (*IP_lookup & 0x000000FF)]; } //printf("[CKJUNG]__interface: %d\n", interface); } /*********************************************************************** * GPU_NF#1: Router (18.09.17) ***********************************************************************/ __global__ void router(unsigned char * pktBuf, int *nbBoard, int *statusBoard, short* d_mtable, short* d_stable, int* pkt_cnt) { __shared__ uint8_t chapter_idx; unsigned char * buf = NULL; __shared__ int nb; if(threadIdx.x == 0){ chapter_idx = 0; nb = 0; } __syncthreads(); while(true) { //Persistent Kernel, trick for memory synch, access, blahblah? #if 1 ////////////////////////////////// NF's code Here /////////////////////////////////// __syncthreads(); if(threadIdx.x == 0 && statusBoard[chapter_idx] == -1){ nb = nbBoard[chapter_idx]; } __syncthreads(); if(threadIdx.x < nb){ //printf("[Router] tid: %d\n", threadIdx.x); buf = &pktBuf[chapter_idx * PKT_BATCH_SIZE + threadIdx.x * PKT_SIZE]; #if 1 struct iphdr* iph = (struct iphdr*)(buf + sizeof(struct ethhdr)); uint16_t* _daddr = (uint16_t*)&(iph->daddr); uint32_t daddr = 0; memcpy(&daddr, _daddr, 4); d_interface_lookup(&daddr, d_mtable, d_stable); #endif atomicAdd(&statusBoard[chapter_idx], 1); atomicAdd(pkt_cnt, 1); if(threadIdx.x == 0){ atomicAdd(&statusBoard[chapter_idx], 1); nb = 0; } } __syncthreads(); if(threadIdx.x == 0){ chapter_idx++; chapter_idx %= CHAPTER_NUM; } ////////////////////////////////// NF's code Here /////////////////////////////////// #endif } if(threadIdx.x == 0) printf("End of gpu_router!\n"); } /******************************************************************** * Initalize file descriptors * * routingTableName contains FIB info (argv[1] of main function) * inputFileName contains IP addresses (argv[2] of main function) * ***********************************************************************/ int initializeIO(char *routingTableName, char *inputFileName){ char outputFileName[100]; routingTable = fopen(routingTableName, "r"); printf("%s\n", routingTableName); if (routingTable == NULL) return ROUTING_TABLE_NOT_FOUND; inputFile = fopen(inputFileName, "r"); if (inputFile == NULL) { fclose(routingTable); return INPUT_FILE_NOT_FOUND; } sprintf(outputFileName, "%s%s", inputFileName, OUTPUT_NAME); outputFile = fopen(outputFileName, "w"); if (outputFile == NULL) { fclose(routingTable); fclose(inputFile); return CANNOT_CREATE_OUTPUT; } return OK; } /*********************************************************************** * Close the input/output files ***********************************************************************/ void freeIO() { fclose(inputFile); fclose(outputFile); fclose(routingTable); } /*********************************************************************** * Write explanation for error identifier (verbose mode) ***********************************************************************/ void printIOExplanationError(int result){ switch(result) { case ROUTING_TABLE_NOT_FOUND: printf("Routing table not found\n"); exit(0); case INPUT_FILE_NOT_FOUND: printf("Input file not found\n"); exit(0); case BAD_ROUTING_TABLE: printf("Bad routing table structure\n"); exit(0); case BAD_INPUT_FILE: printf("Bad input file structure\n"); exit(0); case PARSE_ERROR: printf("Parse error\n"); exit(0); case CANNOT_CREATE_OUTPUT: printf("Cannot create output file\n"); exit(0); case REACHED_EOF: printf("Reached End Of File\n"); exit(0); default: printf("Unknown error\n"); exit(0); } exit(0); } /*********************************************************************** * Read one entry in the FIB * * It should be noted that prefix, prefixLength and outInterface are * pointers since they are used as output parameters * ***********************************************************************/ int readFIBLine(uint32_t *prefix, int *prefixLength, int *outInterface){ int n[4], result; result = fscanf(routingTable, "%i.%i.%i.%i/%i\t%i\n", &n[0], &n[1], &n[2], &n[3], prefixLength, outInterface); // CKJUNG, 18.08.21 // printf("\nn0: %d, n1: %d, n2: %d, n3: %d\n", n[0], n[1], n[2], n[3]); // ~ CKJUNG if (result == EOF) return REACHED_EOF; else if (result != 6) return BAD_ROUTING_TABLE; else{ //remember that pentium architecture is little endian *prefix = (n[0]<<24) + (n[1]<<16) + (n[2]<<8) + n[3]; //*prefix = n[0]*pow(2,24) + n[1]*pow(2,16) + n[2]*pow(2,8) + n[3]; // CKJUNG, 18.08.21 //printf("prefix : %d\n", *prefix); // ~ CKJUNG return OK; } } /*********************************************************************** * Read one entry in the input packet file * * Again, it should be noted that IPAddress is a pointer since it is used * as output parameter * ***********************************************************************/ int readInputPacketFileLine(uint32_t *IPAddress){ int n[4], result; result = fscanf(inputFile, "%i.%i.%i.%i\n", &n[0], &n[1], &n[2], &n[3]); if (result == EOF) return REACHED_EOF; else if (result != 4) return BAD_INPUT_FILE; else{ //remember that pentium architecture is little endian *IPAddress = (n[0]<<24) + (n[1]<<16) + (n[2]<<8) + n[3]; //*IPAddress = n[0]*pow(2,24) + n[1]*pow(2,16) + n[2]*pow(2,8) + n[3]; return OK; } } /*********************************************************************** * Print a line to the output file * * gettimeofday(&initialTime, NULL) must be called right before the lookup function * * gettimeofday(&finalTime, NULL) must be called right after the lookup function * * The lookup function must return (either as output parameter or as return value) * the number of hash tables that have been accessed for every IP address * ***********************************************************************/ void printOutputLine(uint32_t IPAddress, int outInterface, struct timeval *initialTime, struct timeval *finalTime, double *searchingTime, int numberOfHashtables) { unsigned long sec, usec; usec = finalTime->tv_usec - initialTime->tv_usec; if (usec > finalTime->tv_usec) initialTime->tv_sec += 1; sec = finalTime->tv_sec - initialTime->tv_sec; *searchingTime = 1000000*sec + usec; //remember that output interface equals 0 means no matching //remember that if no matching but default route is specified in the FIB, the default output interface //must be stored to avoid dropping the packet (i.e., MISS) if (!outInterface){ fprintf(outputFile,"%i.%i.%i.%i;%s;%i;%.0lf\n",IPAddress >> 24, (IPAddress >> 16) & 0x000000ff, (IPAddress >> 8) & 0x000000ff, IPAddress & 0x000000ff , "MISS",numberOfHashtables, *searchingTime); } else{ fprintf(outputFile,"%i.%i.%i.%i;%i;%i;%.0lf\n",IPAddress >> 24, (IPAddress >> 16) & 0x000000ff, (IPAddress >> 8) & 0x000000ff, IPAddress & 0x000000ff , outInterface,numberOfHashtables, *searchingTime); } } /*********************************************************************** * Print memory and CPU time * * For more info: man getrusage * ***********************************************************************/ void printMemoryTimeUsage(){ float user_time, system_time; long int memory; struct rusage usage; if (getrusage (RUSAGE_SELF, &usage)){ printf("Resource measurement failed.\n"); } else{ user_time = (float)usage.ru_utime.tv_sec+(float)usage.ru_utime.tv_usec/1000000; system_time = (float)usage.ru_stime.tv_sec+(float)usage.ru_stime.tv_usec/1000000; memory = usage.ru_maxrss; fprintf(outputFile, "Memory (Kbytes) = %ld\n", memory ); fprintf(outputFile, "CPU Time (secs)= %.6f\n\n", user_time+system_time); } } /*********************************************************************** * Print execution summary to the output file * * It should be noted that: * *averageTableAccesses = totalTableAccesses/processedPackets * *averagePacketProcessingTime = totalPacketProcessingTime/processedPackets * ***********************************************************************/ void printSummary(int processedPackets, double averageTableAccesses, double averagePacketProcessingTime){ fprintf(outputFile, "\nPackets processed= %i\n", processedPackets); fprintf(outputFile, "Average table accesses= %.2lf\n", averageTableAccesses); fprintf(outputFile,"Average packet processing time (usecs)= %.2lf\n", averagePacketProcessingTime); printMemoryTimeUsage(); } void initializeFIB() { IP_addr = (uint32_t*)calloc(1,sizeof(int)); aux_prefixLength = (int*)calloc(1,sizeof(int)); aux_outInterface = (int*)calloc(1,sizeof(int)); //Now we have the prefix, the ip and the interface ec = readFIBLine(IP_addr, aux_prefixLength, aux_outInterface); while(ec == 0){ //WHILE NOT EOF OR ANOTHER TYPE OF ERROR long int number_of_hosts = 0; // We calculate the number of hosts affected by the mask // 2 24 - PREFIJO if(*aux_prefixLength <= 24){ number_of_hosts = pow(2,24 - *aux_prefixLength); for(ip_index = 0; ip_index < number_of_hosts; ip_index++) { mtable[(*IP_addr>>8) + ip_index] = *aux_outInterface; } } else{ number_of_hosts = pow(2,32 - *aux_prefixLength); if(mtable[*IP_addr>>8]>>15 == 0) { // 1. REALLOC MEMORY, we reserve 256 more chunks for the new interfaces stable = (short*)realloc(stable, 256*(extended_IPs + 1)*2); // 2. COPY FROM MTABLE TO STABLE // recorremos todo el rango de IP's del ultimo byte de la IP, copiando lo anterior for(ip_index = 0; ip_index <= 255; ip_index++) { stable[extended_IPs*256 + ip_index] = mtable[*IP_addr>>8]; } // 3. UPDATE MTABLE VALUE WITH THE INDEX OF STABLE // We write the "index" to the address in the stable and the bit 1 in the 16th position (0b1000000000000000) mtable[*IP_addr>>8] = extended_IPs | 0x8000; // 4. POPULATE THE STABLE CHUNK WITH THE SPECIFIED NEW ADDRESS for(ip_index = (*IP_addr & 0xFF); ip_index < number_of_hosts + (*IP_addr & 0xFF); ip_index++) { stable[extended_IPs*256 + ip_index] = *aux_outInterface; } extended_IPs++; } else{ // If it already exists a chunk for this Ip range inside stable for(ip_index = (*IP_addr & 0xFF); ip_index < number_of_hosts + (*IP_addr & 0xFF); ip_index++) { stable[(mtable[*IP_addr>>8] & 0x7FFF)*256 + ip_index] = *aux_outInterface; } } } //Now we get another IP, interface and interface ec = readFIBLine(IP_addr,aux_prefixLength,aux_outInterface); } free(IP_addr); free(aux_prefixLength); free(aux_outInterface); } /** * [Look for an IP address inside the main table and secundary table stored in RAM] * Input: *IP_lookup * Output. *interface *ntables */ void interface_lookup(uint32_t *IP_lookup, short int *ntables,unsigned short *interface) { *interface = mtable[*IP_lookup>>8]; if(*interface>>15 == 0) { *ntables = 1; return; } else { *ntables = 2; *interface = stable[(*interface & 0x7FFF)*256 + (*IP_lookup & 0x000000FF)]; // 0x7fff = 0b0111111111111111 to adquire just the address to the 2nd table return; } return; } /** * [Perform routing process, going through the file and looking for the best Interface for each IP] * * Output: *processedPackets *totalTableAccesses *totalPacketProcessingTime */ void compute_routes() { uint32_t *IP_lookup = (uint32_t*)calloc(1,sizeof(uint32_t)); unsigned short *interface = (unsigned short*)calloc(1,sizeof(unsigned short)); double *searching_time = (double*)calloc(1,sizeof(double)); short int *number_of_tables = (short int*)calloc(1,sizeof(short int)); ec = readInputPacketFileLine(IP_lookup); while(ec == 0) { //gettimeofday(&start, NULL); interface_lookup(IP_lookup,number_of_tables, interface); //gettimeofday(&end, NULL); //printOutputLine(*IP_lookup, *interface, &start, &end,searching_time, *number_of_tables); *processedPackets = *processedPackets + 1; *totalTableAccesses = *totalTableAccesses + *number_of_tables; *totalPacketProcessingTime = *totalPacketProcessingTime + *searching_time; ec = readInputPacketFileLine(IP_lookup); } free(IP_lookup); free(interface); free(searching_time); free(number_of_tables); } extern "C" void initialize_router(void) { // CKJUNG, 18.08.22 [NF #1:IP lookup] Setting RIB ///////////////////////////////////////////////////// //short *d_mtable; //short *d_stable; printf("____[Initialize]__NF #1__Router__\n"); ASSERTRT(cudaMalloc((void**)&d_mtable, MTABLE_ENTRIES_LENGTH*sizeof(short))); ASSERT_CUDA(cudaMemset(d_mtable, 0, MTABLE_ENTRIES_LENGTH*sizeof(short))); mtable = (short*)calloc(MTABLE_ENTRIES_LENGTH, sizeof(short)); processedPackets = (int*)calloc(1, sizeof(int)); totalTableAccesses = (double*)calloc(1, sizeof(double)); totalPacketProcessingTime = (double*)calloc(1, sizeof(double)); ec = 0; extended_IPs = 0; ec = initializeIO((char*)"./apps/lib/ck_table", (char*)"./apps/lib/p2"); // Initialize Input if(ec != 0){ printf("\nERROR: \n\t"); printIOExplanationError(ec); //return -1; } initializeFIB(); // CKJUNG, size of stable is fixed after initializeFIB ASSERTRT(cudaMalloc((void**)&d_stable, 256*(extended_IPs + 1)*2)); ASSERT_CUDA(cudaMemset(d_stable, 0, 256*(extended_IPs + 1)*2)); printf("[CKJUNG] initializeFIB() done.\n"); #if 1 // CKJUNG, copy Routing table from DRAM --> GDDR, Here! cudaError_t mtable_err = cudaMemcpy(d_mtable, mtable, MTABLE_ENTRIES_LENGTH*sizeof(short), cudaMemcpyHostToDevice); cudaError_t stable_err = cudaMemcpy(d_stable, stable, 256*(extended_IPs + 1)*2, cudaMemcpyHostToDevice); if(mtable_err != cudaSuccess || stable_err != cudaSuccess) { printf("[Error] cudaMemcpy for \"mtable\" or \"stable\" has failed.\n"); }else{ START_GRN printf("[Router] Routing table (m-table & s-table) is ready.\n"); END } #endif compute_routes(); printf("[CKJUNG] compute_routes() done. please check \"[InputFileName].out\"\n"); // printSummary(*processedPackets, (*totalTableAccesses / *processedPackets), (*totalPacketProcessingTime / *processedPackets)); freeIO();/* Freeing Resources */ free(mtable); free(stable); free(processedPackets); free(totalTableAccesses); free(totalPacketProcessingTime); cudaStream_t cuda_stream2; ASSERT_CUDA(cudaStreamCreateWithFlags(&cuda_stream2,cudaStreamNonBlocking)); printf("NF#1: Router\n"); router<<< ROUTER_TB_NUM, ROUTER_T_NUM, 0, cuda_stream2 >>> (pktBuf, nbBoard, statusBoard, d_mtable, d_stable, pkt_cnt); START_GRN printf("[Done]____[Initialize]__NF #1__Router__\n"); END // cudaDeviceSynchronize(); //return 0; } void finalize_router(void) { ASSERT_CUDA(cudaFree(d_mtable)); ASSERT_CUDA(cudaFree(d_stable)); }
7e50d845151a0fc9013ef5bcc83055d2f2dd2c7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //matrix multiply using shared memory for optimization #include <cassert> #include <cstdlib> #include <ctime> #include <random> #include <iostream> #define BSZ 128 #define TSZ 16 #define SZ (BSZ * TSZ) #define TT double using namespace std; default_random_engine& get_default_random_engine(){ static default_random_engine eng(time(0)); return eng; } template <typename T> void random_matrix(T* m, size_t sz){ uniform_real_distribution<T> dist(-100.F, 100.F); default_random_engine& eng = get_default_random_engine(); for (size_t i = 0; i < sz; ++i) m[i] = dist(eng); } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } template <typename T> struct CudaMtx { T* data; size_t rows; size_t cols; size_t stride; }; template <typename T> struct Mtx { public: T* data; size_t rows; size_t cols; bool is_cuda; Mtx(bool is_cuda, size_t rows, size_t cols): data(nullptr), rows(rows), cols(cols), is_cuda(is_cuda) { if (is_cuda) { gpuErrchk(hipMalloc(&data, sizeof(T) * rows * cols)); } else data = new T[rows * cols]; } ~Mtx(){ if (is_cuda) { gpuErrchk(hipFree(data)); } else delete[] data; } CudaMtx<T> cuda_mtx(){ assert(is_cuda); CudaMtx<T> ret; ret.data = data; ret.rows = rows; ret.cols = ret.stride = cols; return ret; } }; template <typename T> __device__ T get_elem(CudaMtx<T>& a, size_t i, size_t j){ return a.data[i * a.stride + j]; } template <typename T> __device__ void set_elem(CudaMtx<T>& a, size_t i, size_t j, T val){ a.data[i * a.stride + j] = val; } template <typename T> __device__ CudaMtx<T> sub_matrix_stride(CudaMtx<T>& m, size_t row_stride, size_t col_stride){ CudaMtx<T> ret; ret.data = &m.data[m.cols * TSZ * row_stride + TSZ * col_stride]; ret.rows = ret.cols = TSZ; ret.stride = m.stride; return ret; } template <typename T> __global__ void matrix_multiply_cuda_v2(CudaMtx<T> c, CudaMtx<T> a, CudaMtx<T> b){ size_t bx = blockIdx.x, by = blockIdx.y; CudaMtx<T> csub = sub_matrix_stride(c, bx, by); T cval = 0.; size_t row = threadIdx.x, col = threadIdx.y; for (size_t i = 0; i < BSZ; ++i){ CudaMtx<T> asub = sub_matrix_stride(a, bx, i); CudaMtx<T> bsub = sub_matrix_stride(b, i, by); __shared__ T amem[TSZ][TSZ]; __shared__ T bmem[TSZ][TSZ]; amem[row][col] = get_elem(asub, row, col); bmem[row][col] = get_elem(bsub, row, col); __syncthreads(); for (size_t j = 0; j < TSZ; ++j) cval += amem[row][j] * bmem[j][col]; __syncthreads(); } set_elem(csub, row, col, cval); } template <typename T> clock_t matrix_multiply_v1(Mtx<T>& c, Mtx<T>& a, Mtx<T>& b){ for (size_t i = 0; i < c.rows; ++i) for (size_t j = 0; j < c.cols; ++j){ c.data[i * c.cols + j] = 0.; for (size_t k = 0; k < a.cols; ++k) c.data[i * c.cols + j] += a.data[i * a.cols + k] * b.data[k * b.cols + j]; } return clock(); } int main(){ Mtx<TT> c(false, SZ, SZ), a(false, SZ, SZ), b(false, SZ, SZ), d(false, SZ, SZ); Mtx<TT> dc(true, SZ, SZ), da(true, SZ, SZ), db(true, SZ, SZ); random_matrix(a.data, SZ * SZ); random_matrix(b.data, SZ * SZ); clock_t timing_start = clock(); gpuErrchk(hipMemcpy(da.data, a.data, sizeof(TT) * SZ * SZ, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(db.data, b.data, sizeof(TT) * SZ * SZ, hipMemcpyHostToDevice)); dim3 dblock(BSZ, BSZ); dim3 dthread(TSZ, TSZ); hipLaunchKernelGGL(( matrix_multiply_cuda_v2), dim3(dblock), dim3(dthread), 0, 0, dc.cuda_mtx(), da.cuda_mtx(), db.cuda_mtx()); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipMemcpy(c.data, dc.data, sizeof(TT) * SZ * SZ, hipMemcpyDeviceToHost)); cout << "CUDA time: " << (clock() - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; timing_start = clock(); clock_t timing_end = matrix_multiply_v1(d, a, b); cout << "CPU time: " << (timing_end - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; size_t mismatch = 0; for (size_t i = 0; i < SZ * SZ; ++i) if (fabs(c.data[i] - d.data[i]) / d.data[i] > 5e-3F){ cout << "difference: " << (fabs(c.data[i] - d.data[i]) / d.data[i]) << endl; mismatch++; break; } if (mismatch == 0) cout << "All values match" << endl; else cout << mismatch << " differences" << endl; }
7e50d845151a0fc9013ef5bcc83055d2f2dd2c7b.cu
//matrix multiply using shared memory for optimization #include <cassert> #include <cstdlib> #include <ctime> #include <random> #include <iostream> #define BSZ 128 #define TSZ 16 #define SZ (BSZ * TSZ) #define TT double using namespace std; default_random_engine& get_default_random_engine(){ static default_random_engine eng(time(0)); return eng; } template <typename T> void random_matrix(T* m, size_t sz){ uniform_real_distribution<T> dist(-100.F, 100.F); default_random_engine& eng = get_default_random_engine(); for (size_t i = 0; i < sz; ++i) m[i] = dist(eng); } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } template <typename T> struct CudaMtx { T* data; size_t rows; size_t cols; size_t stride; }; template <typename T> struct Mtx { public: T* data; size_t rows; size_t cols; bool is_cuda; Mtx(bool is_cuda, size_t rows, size_t cols): data(nullptr), rows(rows), cols(cols), is_cuda(is_cuda) { if (is_cuda) { gpuErrchk(cudaMalloc(&data, sizeof(T) * rows * cols)); } else data = new T[rows * cols]; } ~Mtx(){ if (is_cuda) { gpuErrchk(cudaFree(data)); } else delete[] data; } CudaMtx<T> cuda_mtx(){ assert(is_cuda); CudaMtx<T> ret; ret.data = data; ret.rows = rows; ret.cols = ret.stride = cols; return ret; } }; template <typename T> __device__ T get_elem(CudaMtx<T>& a, size_t i, size_t j){ return a.data[i * a.stride + j]; } template <typename T> __device__ void set_elem(CudaMtx<T>& a, size_t i, size_t j, T val){ a.data[i * a.stride + j] = val; } template <typename T> __device__ CudaMtx<T> sub_matrix_stride(CudaMtx<T>& m, size_t row_stride, size_t col_stride){ CudaMtx<T> ret; ret.data = &m.data[m.cols * TSZ * row_stride + TSZ * col_stride]; ret.rows = ret.cols = TSZ; ret.stride = m.stride; return ret; } template <typename T> __global__ void matrix_multiply_cuda_v2(CudaMtx<T> c, CudaMtx<T> a, CudaMtx<T> b){ size_t bx = blockIdx.x, by = blockIdx.y; CudaMtx<T> csub = sub_matrix_stride(c, bx, by); T cval = 0.; size_t row = threadIdx.x, col = threadIdx.y; for (size_t i = 0; i < BSZ; ++i){ CudaMtx<T> asub = sub_matrix_stride(a, bx, i); CudaMtx<T> bsub = sub_matrix_stride(b, i, by); __shared__ T amem[TSZ][TSZ]; __shared__ T bmem[TSZ][TSZ]; amem[row][col] = get_elem(asub, row, col); bmem[row][col] = get_elem(bsub, row, col); __syncthreads(); for (size_t j = 0; j < TSZ; ++j) cval += amem[row][j] * bmem[j][col]; __syncthreads(); } set_elem(csub, row, col, cval); } template <typename T> clock_t matrix_multiply_v1(Mtx<T>& c, Mtx<T>& a, Mtx<T>& b){ for (size_t i = 0; i < c.rows; ++i) for (size_t j = 0; j < c.cols; ++j){ c.data[i * c.cols + j] = 0.; for (size_t k = 0; k < a.cols; ++k) c.data[i * c.cols + j] += a.data[i * a.cols + k] * b.data[k * b.cols + j]; } return clock(); } int main(){ Mtx<TT> c(false, SZ, SZ), a(false, SZ, SZ), b(false, SZ, SZ), d(false, SZ, SZ); Mtx<TT> dc(true, SZ, SZ), da(true, SZ, SZ), db(true, SZ, SZ); random_matrix(a.data, SZ * SZ); random_matrix(b.data, SZ * SZ); clock_t timing_start = clock(); gpuErrchk(cudaMemcpy(da.data, a.data, sizeof(TT) * SZ * SZ, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(db.data, b.data, sizeof(TT) * SZ * SZ, cudaMemcpyHostToDevice)); dim3 dblock(BSZ, BSZ); dim3 dthread(TSZ, TSZ); matrix_multiply_cuda_v2<<<dblock, dthread>>>(dc.cuda_mtx(), da.cuda_mtx(), db.cuda_mtx()); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(c.data, dc.data, sizeof(TT) * SZ * SZ, cudaMemcpyDeviceToHost)); cout << "CUDA time: " << (clock() - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; timing_start = clock(); clock_t timing_end = matrix_multiply_v1(d, a, b); cout << "CPU time: " << (timing_end - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; size_t mismatch = 0; for (size_t i = 0; i < SZ * SZ; ++i) if (fabs(c.data[i] - d.data[i]) / d.data[i] > 5e-3F){ cout << "difference: " << (fabs(c.data[i] - d.data[i]) / d.data[i]) << endl; mismatch++; break; } if (mismatch == 0) cout << "All values match" << endl; else cout << mismatch << " differences" << endl; }
eb69bba04d070defc1fcd50dc1eeae0d8667710f.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- #include "open3d/core/Dispatch.h" #include "open3d/core/Dtype.h" #include "open3d/core/MemoryManager.h" #include "open3d/core/ParallelFor.h" #include "open3d/core/SizeVector.h" #include "open3d/core/Tensor.h" #include "open3d/core/hashmap/CUDA/StdGPUHashBackend.h" #include "open3d/core/hashmap/DeviceHashBackend.h" #include "open3d/core/hashmap/Dispatch.h" #include "open3d/core/hashmap/HashMap.h" #include "open3d/t/geometry/kernel/GeometryIndexer.h" #include "open3d/t/geometry/kernel/GeometryMacros.h" #include "open3d/t/geometry/kernel/VoxelBlockGrid.h" #include "open3d/t/geometry/kernel/VoxelBlockGridImpl.h" #include "open3d/utility/Logging.h" namespace open3d { namespace t { namespace geometry { namespace kernel { namespace voxel_grid { struct Coord3i { OPEN3D_HOST_DEVICE Coord3i(index_t x, index_t y, index_t z) : x_(x), y_(y), z_(z) {} OPEN3D_HOST_DEVICE bool operator==(const Coord3i &other) const { return x_ == other.x_ && y_ == other.y_ && z_ == other.z_; } index_t x_; index_t y_; index_t z_; }; void PointCloudTouchCUDA(std::shared_ptr<core::HashMap> &hashmap, const core::Tensor &points, core::Tensor &voxel_block_coords, index_t voxel_grid_resolution, float voxel_size, float sdf_trunc) { index_t resolution = voxel_grid_resolution; float block_size = voxel_size * resolution; index_t n = points.GetLength(); const float *pcd_ptr = static_cast<const float *>(points.GetDataPtr()); core::Device device = points.GetDevice(); core::Tensor block_coordi({8 * n, 3}, core::Int32, device); index_t *block_coordi_ptr = static_cast<index_t *>(block_coordi.GetDataPtr()); core::Tensor count(std::vector<index_t>{0}, {}, core::Int32, device); index_t *count_ptr = static_cast<index_t *>(count.GetDataPtr()); core::ParallelFor(hashmap->GetDevice(), n, [=] OPEN3D_DEVICE(index_t workload_idx) { float x = pcd_ptr[3 * workload_idx + 0]; float y = pcd_ptr[3 * workload_idx + 1]; float z = pcd_ptr[3 * workload_idx + 2]; index_t xb_lo = static_cast<index_t>( floorf((x - sdf_trunc) / block_size)); index_t xb_hi = static_cast<index_t>( floorf((x + sdf_trunc) / block_size)); index_t yb_lo = static_cast<index_t>( floorf((y - sdf_trunc) / block_size)); index_t yb_hi = static_cast<index_t>( floorf((y + sdf_trunc) / block_size)); index_t zb_lo = static_cast<index_t>( floorf((z - sdf_trunc) / block_size)); index_t zb_hi = static_cast<index_t>( floorf((z + sdf_trunc) / block_size)); for (index_t xb = xb_lo; xb <= xb_hi; ++xb) { for (index_t yb = yb_lo; yb <= yb_hi; ++yb) { for (index_t zb = zb_lo; zb <= zb_hi; ++zb) { index_t idx = atomicAdd(count_ptr, 1); block_coordi_ptr[3 * idx + 0] = xb; block_coordi_ptr[3 * idx + 1] = yb; block_coordi_ptr[3 * idx + 2] = zb; } } } }); index_t total_block_count = count.Item<index_t>(); if (total_block_count == 0) { utility::LogError( "No block is touched in TSDF volume, abort integration. Please " "check specified parameters, especially depth_scale and " "voxel_size"); } block_coordi = block_coordi.Slice(0, 0, total_block_count); core::Tensor block_buf_indices, block_masks; hashmap->Activate(block_coordi.Slice(0, 0, count.Item<index_t>()), block_buf_indices, block_masks); voxel_block_coords = block_coordi.IndexGet({block_masks}); } void DepthTouchCUDA(std::shared_ptr<core::HashMap> &hashmap, const core::Tensor &depth, const core::Tensor &intrinsic, const core::Tensor &extrinsic, core::Tensor &voxel_block_coords, index_t voxel_grid_resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_max, index_t stride) { core::Device device = depth.GetDevice(); NDArrayIndexer depth_indexer(depth, 2); core::Tensor pose = t::geometry::InverseTransformation(extrinsic); TransformIndexer ti(intrinsic, pose, 1.0f); // Output index_t rows_strided = depth_indexer.GetShape(0) / stride; index_t cols_strided = depth_indexer.GetShape(1) / stride; index_t n = rows_strided * cols_strided; const index_t step_size = 3; const index_t est_multipler_factor = (step_size + 1); static core::Tensor block_coordi; if (block_coordi.GetLength() != est_multipler_factor * n) { block_coordi = core::Tensor({est_multipler_factor * n, 3}, core::Dtype::Int32, device); } // Counter core::Tensor count(std::vector<index_t>{0}, {1}, core::Dtype::Int32, device); index_t *count_ptr = count.GetDataPtr<index_t>(); index_t *block_coordi_ptr = block_coordi.GetDataPtr<index_t>(); index_t resolution = voxel_grid_resolution; float block_size = voxel_size * resolution; DISPATCH_DTYPE_TO_TEMPLATE(depth.GetDtype(), [&]() { core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) { index_t y = (workload_idx / cols_strided) * stride; index_t x = (workload_idx % cols_strided) * stride; float d = *depth_indexer.GetDataPtr<scalar_t>(x, y) / depth_scale; if (d > 0 && d < depth_max) { float x_c = 0, y_c = 0, z_c = 0; ti.Unproject(static_cast<float>(x), static_cast<float>(y), 1.0, &x_c, &y_c, &z_c); float x_g = 0, y_g = 0, z_g = 0; ti.RigidTransform(x_c, y_c, z_c, &x_g, &y_g, &z_g); // Origin float x_o = 0, y_o = 0, z_o = 0; ti.GetCameraPosition(&x_o, &y_o, &z_o); // Direction float x_d = x_g - x_o; float y_d = y_g - y_o; float z_d = z_g - z_o; const float t_min = max(d - sdf_trunc, 0.0); const float t_max = min(d + sdf_trunc, depth_max); const float t_step = (t_max - t_min) / step_size; float t = t_min; index_t idx = OPEN3D_ATOMIC_ADD(count_ptr, (step_size + 1)); for (index_t step = 0; step <= step_size; ++step) { index_t offset = (step + idx) * 3; index_t xb = static_cast<index_t>( floorf((x_o + t * x_d) / block_size)); index_t yb = static_cast<index_t>( floorf((y_o + t * y_d) / block_size)); index_t zb = static_cast<index_t>( floorf((z_o + t * z_d) / block_size)); block_coordi_ptr[offset + 0] = xb; block_coordi_ptr[offset + 1] = yb; block_coordi_ptr[offset + 2] = zb; t += t_step; } } }); }); index_t total_block_count = static_cast<index_t>(count[0].Item<index_t>()); if (total_block_count == 0) { utility::LogError( "No block is touched in TSDF volume, abort integration. Please " "check specified parameters, especially depth_scale and " "voxel_size"); } total_block_count = ::min(total_block_count, static_cast<index_t>(hashmap->GetCapacity())); block_coordi = block_coordi.Slice(0, 0, total_block_count); core::Tensor block_addrs, block_masks; hashmap->Activate(block_coordi, block_addrs, block_masks); // Customized IndexGet (generic version too slow) voxel_block_coords = core::Tensor({hashmap->Size(), 3}, core::Int32, device); index_t *voxel_block_coord_ptr = voxel_block_coords.GetDataPtr<index_t>(); bool *block_masks_ptr = block_masks.GetDataPtr<bool>(); count[0] = 0; core::ParallelFor(device, total_block_count, [=] OPEN3D_DEVICE(index_t workload_idx) { if (block_masks_ptr[workload_idx]) { index_t idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); index_t offset_lhs = 3 * idx; index_t offset_rhs = 3 * workload_idx; voxel_block_coord_ptr[offset_lhs + 0] = block_coordi_ptr[offset_rhs + 0]; voxel_block_coord_ptr[offset_lhs + 1] = block_coordi_ptr[offset_rhs + 1]; voxel_block_coord_ptr[offset_lhs + 2] = block_coordi_ptr[offset_rhs + 2]; } }); OPEN3D_CUDA_CHECK(hipDeviceSynchronize()); } #define FN_ARGUMENTS \ const core::Tensor &depth, const core::Tensor &color, \ const core::Tensor &indices, const core::Tensor &block_keys, \ TensorMap &block_values, const core::Tensor &depth_intrinsic, \ const core::Tensor &color_intrinsic, \ const core::Tensor &extrinsic, index_t resolution, \ float voxel_size, float sdf_trunc, float depth_scale, \ float depth_max template void IntegrateCUDA<uint16_t, uint8_t, float, uint16_t, uint16_t>( FN_ARGUMENTS); template void IntegrateCUDA<uint16_t, uint8_t, float, float, float>( FN_ARGUMENTS); template void IntegrateCUDA<float, float, float, uint16_t, uint16_t>( FN_ARGUMENTS); template void IntegrateCUDA<float, float, float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS #define FN_ARGUMENTS \ std::shared_ptr<core::HashMap> &hashmap, const TensorMap &block_value_map, \ const core::Tensor &range_map, TensorMap &renderings_map, \ const core::Tensor &intrinsic, const core::Tensor &extrinsic, \ index_t h, index_t w, index_t block_resolution, float voxel_size, \ float depth_scale, float depth_min, float depth_max, \ float weight_threshold, float trunc_voxel_multiplier, \ int range_map_down_factor template void RayCastCUDA<float, uint16_t, uint16_t>(FN_ARGUMENTS); template void RayCastCUDA<float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS #define FN_ARGUMENTS \ const core::Tensor &block_indices, const core::Tensor &nb_block_indices, \ const core::Tensor &nb_block_masks, \ const core::Tensor &block_keys, const TensorMap &block_value_map, \ core::Tensor &points, core::Tensor &normals, core::Tensor &colors, \ index_t block_resolution, float voxel_size, \ float weight_threshold, index_t &valid_size template void ExtractPointCloudCUDA<float, uint16_t, uint16_t>(FN_ARGUMENTS); template void ExtractPointCloudCUDA<float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS void ExtractTriangleMeshCUDA(const core::Tensor &block_indices, const core::Tensor &inv_block_indices, const core::Tensor &nb_block_indices, const core::Tensor &nb_block_masks, const core::Tensor &block_keys, const std::vector<core::Tensor> &block_values, core::Tensor &vertices, core::Tensor &triangles, core::Tensor &vertex_normals, core::Tensor &vertex_colors, index_t block_resolution, float voxel_size, float weight_threshold, index_t &vertex_count); #define FN_ARGUMENTS \ const core::Tensor &block_indices, const core::Tensor &inv_block_indices, \ const core::Tensor &nb_block_indices, \ const core::Tensor &nb_block_masks, \ const core::Tensor &block_keys, const TensorMap &block_value_map, \ core::Tensor &vertices, core::Tensor &triangles, \ core::Tensor &vertex_normals, core::Tensor &vertex_colors, \ index_t block_resolution, float voxel_size, \ float weight_threshold, index_t &vertex_count template void ExtractTriangleMeshCUDA<float, uint16_t, uint16_t>(FN_ARGUMENTS); template void ExtractTriangleMeshCUDA<float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS } // namespace voxel_grid } // namespace kernel } // namespace geometry } // namespace t } // namespace open3d
eb69bba04d070defc1fcd50dc1eeae0d8667710f.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- #include "open3d/core/Dispatch.h" #include "open3d/core/Dtype.h" #include "open3d/core/MemoryManager.h" #include "open3d/core/ParallelFor.h" #include "open3d/core/SizeVector.h" #include "open3d/core/Tensor.h" #include "open3d/core/hashmap/CUDA/StdGPUHashBackend.h" #include "open3d/core/hashmap/DeviceHashBackend.h" #include "open3d/core/hashmap/Dispatch.h" #include "open3d/core/hashmap/HashMap.h" #include "open3d/t/geometry/kernel/GeometryIndexer.h" #include "open3d/t/geometry/kernel/GeometryMacros.h" #include "open3d/t/geometry/kernel/VoxelBlockGrid.h" #include "open3d/t/geometry/kernel/VoxelBlockGridImpl.h" #include "open3d/utility/Logging.h" namespace open3d { namespace t { namespace geometry { namespace kernel { namespace voxel_grid { struct Coord3i { OPEN3D_HOST_DEVICE Coord3i(index_t x, index_t y, index_t z) : x_(x), y_(y), z_(z) {} OPEN3D_HOST_DEVICE bool operator==(const Coord3i &other) const { return x_ == other.x_ && y_ == other.y_ && z_ == other.z_; } index_t x_; index_t y_; index_t z_; }; void PointCloudTouchCUDA(std::shared_ptr<core::HashMap> &hashmap, const core::Tensor &points, core::Tensor &voxel_block_coords, index_t voxel_grid_resolution, float voxel_size, float sdf_trunc) { index_t resolution = voxel_grid_resolution; float block_size = voxel_size * resolution; index_t n = points.GetLength(); const float *pcd_ptr = static_cast<const float *>(points.GetDataPtr()); core::Device device = points.GetDevice(); core::Tensor block_coordi({8 * n, 3}, core::Int32, device); index_t *block_coordi_ptr = static_cast<index_t *>(block_coordi.GetDataPtr()); core::Tensor count(std::vector<index_t>{0}, {}, core::Int32, device); index_t *count_ptr = static_cast<index_t *>(count.GetDataPtr()); core::ParallelFor(hashmap->GetDevice(), n, [=] OPEN3D_DEVICE(index_t workload_idx) { float x = pcd_ptr[3 * workload_idx + 0]; float y = pcd_ptr[3 * workload_idx + 1]; float z = pcd_ptr[3 * workload_idx + 2]; index_t xb_lo = static_cast<index_t>( floorf((x - sdf_trunc) / block_size)); index_t xb_hi = static_cast<index_t>( floorf((x + sdf_trunc) / block_size)); index_t yb_lo = static_cast<index_t>( floorf((y - sdf_trunc) / block_size)); index_t yb_hi = static_cast<index_t>( floorf((y + sdf_trunc) / block_size)); index_t zb_lo = static_cast<index_t>( floorf((z - sdf_trunc) / block_size)); index_t zb_hi = static_cast<index_t>( floorf((z + sdf_trunc) / block_size)); for (index_t xb = xb_lo; xb <= xb_hi; ++xb) { for (index_t yb = yb_lo; yb <= yb_hi; ++yb) { for (index_t zb = zb_lo; zb <= zb_hi; ++zb) { index_t idx = atomicAdd(count_ptr, 1); block_coordi_ptr[3 * idx + 0] = xb; block_coordi_ptr[3 * idx + 1] = yb; block_coordi_ptr[3 * idx + 2] = zb; } } } }); index_t total_block_count = count.Item<index_t>(); if (total_block_count == 0) { utility::LogError( "No block is touched in TSDF volume, abort integration. Please " "check specified parameters, especially depth_scale and " "voxel_size"); } block_coordi = block_coordi.Slice(0, 0, total_block_count); core::Tensor block_buf_indices, block_masks; hashmap->Activate(block_coordi.Slice(0, 0, count.Item<index_t>()), block_buf_indices, block_masks); voxel_block_coords = block_coordi.IndexGet({block_masks}); } void DepthTouchCUDA(std::shared_ptr<core::HashMap> &hashmap, const core::Tensor &depth, const core::Tensor &intrinsic, const core::Tensor &extrinsic, core::Tensor &voxel_block_coords, index_t voxel_grid_resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_max, index_t stride) { core::Device device = depth.GetDevice(); NDArrayIndexer depth_indexer(depth, 2); core::Tensor pose = t::geometry::InverseTransformation(extrinsic); TransformIndexer ti(intrinsic, pose, 1.0f); // Output index_t rows_strided = depth_indexer.GetShape(0) / stride; index_t cols_strided = depth_indexer.GetShape(1) / stride; index_t n = rows_strided * cols_strided; const index_t step_size = 3; const index_t est_multipler_factor = (step_size + 1); static core::Tensor block_coordi; if (block_coordi.GetLength() != est_multipler_factor * n) { block_coordi = core::Tensor({est_multipler_factor * n, 3}, core::Dtype::Int32, device); } // Counter core::Tensor count(std::vector<index_t>{0}, {1}, core::Dtype::Int32, device); index_t *count_ptr = count.GetDataPtr<index_t>(); index_t *block_coordi_ptr = block_coordi.GetDataPtr<index_t>(); index_t resolution = voxel_grid_resolution; float block_size = voxel_size * resolution; DISPATCH_DTYPE_TO_TEMPLATE(depth.GetDtype(), [&]() { core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) { index_t y = (workload_idx / cols_strided) * stride; index_t x = (workload_idx % cols_strided) * stride; float d = *depth_indexer.GetDataPtr<scalar_t>(x, y) / depth_scale; if (d > 0 && d < depth_max) { float x_c = 0, y_c = 0, z_c = 0; ti.Unproject(static_cast<float>(x), static_cast<float>(y), 1.0, &x_c, &y_c, &z_c); float x_g = 0, y_g = 0, z_g = 0; ti.RigidTransform(x_c, y_c, z_c, &x_g, &y_g, &z_g); // Origin float x_o = 0, y_o = 0, z_o = 0; ti.GetCameraPosition(&x_o, &y_o, &z_o); // Direction float x_d = x_g - x_o; float y_d = y_g - y_o; float z_d = z_g - z_o; const float t_min = max(d - sdf_trunc, 0.0); const float t_max = min(d + sdf_trunc, depth_max); const float t_step = (t_max - t_min) / step_size; float t = t_min; index_t idx = OPEN3D_ATOMIC_ADD(count_ptr, (step_size + 1)); for (index_t step = 0; step <= step_size; ++step) { index_t offset = (step + idx) * 3; index_t xb = static_cast<index_t>( floorf((x_o + t * x_d) / block_size)); index_t yb = static_cast<index_t>( floorf((y_o + t * y_d) / block_size)); index_t zb = static_cast<index_t>( floorf((z_o + t * z_d) / block_size)); block_coordi_ptr[offset + 0] = xb; block_coordi_ptr[offset + 1] = yb; block_coordi_ptr[offset + 2] = zb; t += t_step; } } }); }); index_t total_block_count = static_cast<index_t>(count[0].Item<index_t>()); if (total_block_count == 0) { utility::LogError( "No block is touched in TSDF volume, abort integration. Please " "check specified parameters, especially depth_scale and " "voxel_size"); } total_block_count = std::min(total_block_count, static_cast<index_t>(hashmap->GetCapacity())); block_coordi = block_coordi.Slice(0, 0, total_block_count); core::Tensor block_addrs, block_masks; hashmap->Activate(block_coordi, block_addrs, block_masks); // Customized IndexGet (generic version too slow) voxel_block_coords = core::Tensor({hashmap->Size(), 3}, core::Int32, device); index_t *voxel_block_coord_ptr = voxel_block_coords.GetDataPtr<index_t>(); bool *block_masks_ptr = block_masks.GetDataPtr<bool>(); count[0] = 0; core::ParallelFor(device, total_block_count, [=] OPEN3D_DEVICE(index_t workload_idx) { if (block_masks_ptr[workload_idx]) { index_t idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); index_t offset_lhs = 3 * idx; index_t offset_rhs = 3 * workload_idx; voxel_block_coord_ptr[offset_lhs + 0] = block_coordi_ptr[offset_rhs + 0]; voxel_block_coord_ptr[offset_lhs + 1] = block_coordi_ptr[offset_rhs + 1]; voxel_block_coord_ptr[offset_lhs + 2] = block_coordi_ptr[offset_rhs + 2]; } }); OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); } #define FN_ARGUMENTS \ const core::Tensor &depth, const core::Tensor &color, \ const core::Tensor &indices, const core::Tensor &block_keys, \ TensorMap &block_values, const core::Tensor &depth_intrinsic, \ const core::Tensor &color_intrinsic, \ const core::Tensor &extrinsic, index_t resolution, \ float voxel_size, float sdf_trunc, float depth_scale, \ float depth_max template void IntegrateCUDA<uint16_t, uint8_t, float, uint16_t, uint16_t>( FN_ARGUMENTS); template void IntegrateCUDA<uint16_t, uint8_t, float, float, float>( FN_ARGUMENTS); template void IntegrateCUDA<float, float, float, uint16_t, uint16_t>( FN_ARGUMENTS); template void IntegrateCUDA<float, float, float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS #define FN_ARGUMENTS \ std::shared_ptr<core::HashMap> &hashmap, const TensorMap &block_value_map, \ const core::Tensor &range_map, TensorMap &renderings_map, \ const core::Tensor &intrinsic, const core::Tensor &extrinsic, \ index_t h, index_t w, index_t block_resolution, float voxel_size, \ float depth_scale, float depth_min, float depth_max, \ float weight_threshold, float trunc_voxel_multiplier, \ int range_map_down_factor template void RayCastCUDA<float, uint16_t, uint16_t>(FN_ARGUMENTS); template void RayCastCUDA<float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS #define FN_ARGUMENTS \ const core::Tensor &block_indices, const core::Tensor &nb_block_indices, \ const core::Tensor &nb_block_masks, \ const core::Tensor &block_keys, const TensorMap &block_value_map, \ core::Tensor &points, core::Tensor &normals, core::Tensor &colors, \ index_t block_resolution, float voxel_size, \ float weight_threshold, index_t &valid_size template void ExtractPointCloudCUDA<float, uint16_t, uint16_t>(FN_ARGUMENTS); template void ExtractPointCloudCUDA<float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS void ExtractTriangleMeshCUDA(const core::Tensor &block_indices, const core::Tensor &inv_block_indices, const core::Tensor &nb_block_indices, const core::Tensor &nb_block_masks, const core::Tensor &block_keys, const std::vector<core::Tensor> &block_values, core::Tensor &vertices, core::Tensor &triangles, core::Tensor &vertex_normals, core::Tensor &vertex_colors, index_t block_resolution, float voxel_size, float weight_threshold, index_t &vertex_count); #define FN_ARGUMENTS \ const core::Tensor &block_indices, const core::Tensor &inv_block_indices, \ const core::Tensor &nb_block_indices, \ const core::Tensor &nb_block_masks, \ const core::Tensor &block_keys, const TensorMap &block_value_map, \ core::Tensor &vertices, core::Tensor &triangles, \ core::Tensor &vertex_normals, core::Tensor &vertex_colors, \ index_t block_resolution, float voxel_size, \ float weight_threshold, index_t &vertex_count template void ExtractTriangleMeshCUDA<float, uint16_t, uint16_t>(FN_ARGUMENTS); template void ExtractTriangleMeshCUDA<float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS } // namespace voxel_grid } // namespace kernel } // namespace geometry } // namespace t } // namespace open3d
dc06bc5ecf4cb81aedb4f2c1f9ee4cd952043318.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file multibox_detection.cu * \brief MultiBoxDetection op * \author Joshua Zhang */ #include "./multibox_detection-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define MULTIBOX_DETECTION_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template<typename DType> __device__ void Clip(DType *value, const DType lower, const DType upper) { if ((*value) < lower) *value = lower; if ((*value) > upper) *value = upper; } template<typename DType> __device__ void CalculateOverlap(const DType *a, const DType *b, DType *iou) { DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0])); DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1])); DType i = w * h; DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i; (*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u); } template<typename DType> __global__ void DetectionForwardKernel(DType *out, DType *outanchor,const DType *cls_prob, const DType *loc_pred, const DType *anchors, DType *temp_space, const int num_classes, const int num_anchors, const float threshold, const bool clip, const float vx, const float vy, const float vw, const float vh, const float nms_threshold, const bool force_suppress, const int nms_topk) { const int nbatch = blockIdx.x; // each block for each batch int index = threadIdx.x; __shared__ int valid_count; out += nbatch * num_anchors * 6; outanchor += nbatch * num_anchors * 6; cls_prob += nbatch * num_anchors * num_classes; loc_pred += nbatch * num_anchors * 4; if (index == 0) { valid_count = 0; } __syncthreads(); // apply prediction to anchors for (int i = index; i < num_anchors; i += blockDim.x) { DType score = -1; int id = 0; for (int j = 1; j < num_classes; ++j) { DType temp = cls_prob[j * num_anchors + i]; if (temp > score) { score = temp; id = j; } } // valid class int pos = atomicAdd(&valid_count, 1); out[pos * 6] = id - 1; // restore original class id out[pos * 6 + 1] = (id == 0 ? DType(-1) : score); int offset = i * 4; DType al = anchors[offset]; DType at = anchors[offset + 1]; DType ar = anchors[offset + 2]; DType ab = anchors[offset + 3]; DType aw = ar - al; DType ah = ab - at; DType ax = (al + ar) / 2.f; DType ay = (at + ab) / 2.f; DType ox = loc_pred[offset] * vx * aw + ax; DType oy = loc_pred[offset + 1] * vy * ah + ay; DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2; DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2; DType xmin = ox - ow; DType ymin = oy - oh; DType xmax = ox + ow; DType ymax = oy + oh; if (clip) { Clip(&xmin, DType(0), DType(1)); Clip(&ymin, DType(0), DType(1)); Clip(&xmax, DType(0), DType(1)); Clip(&ymax, DType(0), DType(1)); } out[pos * 6 + 2] = xmin; out[pos * 6 + 3] = ymin; out[pos * 6 + 4] = xmax; out[pos * 6 + 5] = ymax; outanchor[pos * 6 ]=out[pos * 6 ]; outanchor[pos * 6 + 1]=out[pos * 6 + 1]; outanchor[pos * 6 + 2]=out[pos * 6 + 2]; outanchor[pos * 6 + 3]=out[pos * 6 + 3]; outanchor[pos * 6 + 4]=out[pos * 6 + 4]; outanchor[pos * 6 + 5]=out[pos * 6 + 5]; } __syncthreads(); if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return; // if (index == 0) printf("%d\n", valid_count); // descent sort according to scores const int size = valid_count; temp_space += nbatch * num_anchors * 6; DType *src = out; DType *dst = temp_space; for (int width = 2; width < (size << 1); width <<= 1) { int slices = (size - 1) / (blockDim.x * width) + 1; int start = width * index * slices; for (int slice = 0; slice < slices; ++slice) { if (start >= size) break; int middle = start + (width >> 1); if (middle > size) middle = size; int end = start + width; if (end > size) end = size; int i = start; int j = middle; for (int k = start; k < end; ++k) { DType score_i = i < size ? src[i * 6 + 1] : DType(-1); DType score_j = j < size ? src[j * 6 + 1] : DType(-1); if (i < middle && (j >= end || score_i > score_j)) { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[i * 6 + n]; } ++i; } else { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[j * 6 + n]; } ++j; } } start += width; } __syncthreads(); src = src == out? temp_space : out; dst = dst == out? temp_space : out; } __syncthreads(); if (src == temp_space) { // copy from temp to out for (int i = index; i < size * 6; i += blockDim.x) { out[i] = temp_space[i]; } __syncthreads(); } // keep top k detections int ntop = size; if (nms_topk > 0 && nms_topk < ntop) { ntop = nms_topk; for (int i = ntop + index; i < size; i += blockDim.x) { out[i * 6] = -1; } __syncthreads(); } // apply NMS for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) { DType compare_id = out[compare_pos * 6]; if (compare_id < 0) continue; // not a valid positive detection, skip DType *compare_loc_ptr = out + compare_pos * 6 + 2; for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) { DType class_id = out[i * 6]; if (class_id < 0) continue; if (force_suppress || (class_id == compare_id)) { DType iou; CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou); if (iou >= nms_threshold) { out[i * 6] = -1; } } } __syncthreads(); } } } // namespace cuda template<typename DType> inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType> &out, const Tensor<cpu, 3, DType> &outanchor, const Tensor<gpu, 3, DType> &cls_prob, const Tensor<gpu, 2, DType> &loc_pred, const Tensor<gpu, 2, DType> &anchors, const Tensor<gpu, 3, DType> &temp_space, const float threshold, const bool clip, const nnvm::Tuple<float> &variances, const float nms_threshold, const bool force_suppress, const int nms_topk) { CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4"; const int num_classes = cls_prob.size(1); const int num_anchors = cls_prob.size(2); const int num_batches = cls_prob.size(0); const int num_threads = cuda::kMaxThreadsPerBlock; int num_blocks = num_batches; cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward"); hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipLaunchKernelGGL(( cuda::DetectionForwardKernel), dim3(num_blocks), dim3(num_threads), 0, stream, out.dptr_, outanchor.dptr_ cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_, num_classes, num_anchors, threshold, clip, variances[0], variances[1], variances[2], variances[3], nms_threshold, force_suppress, nms_topk); MULTIBOX_DETECTION_CUDA_CHECK(hipPeekAtLastError()); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new MultiBoxDetectionOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
dc06bc5ecf4cb81aedb4f2c1f9ee4cd952043318.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file multibox_detection.cu * \brief MultiBoxDetection op * \author Joshua Zhang */ #include "./multibox_detection-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define MULTIBOX_DETECTION_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template<typename DType> __device__ void Clip(DType *value, const DType lower, const DType upper) { if ((*value) < lower) *value = lower; if ((*value) > upper) *value = upper; } template<typename DType> __device__ void CalculateOverlap(const DType *a, const DType *b, DType *iou) { DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0])); DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1])); DType i = w * h; DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i; (*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u); } template<typename DType> __global__ void DetectionForwardKernel(DType *out, DType *outanchor,const DType *cls_prob, const DType *loc_pred, const DType *anchors, DType *temp_space, const int num_classes, const int num_anchors, const float threshold, const bool clip, const float vx, const float vy, const float vw, const float vh, const float nms_threshold, const bool force_suppress, const int nms_topk) { const int nbatch = blockIdx.x; // each block for each batch int index = threadIdx.x; __shared__ int valid_count; out += nbatch * num_anchors * 6; outanchor += nbatch * num_anchors * 6; cls_prob += nbatch * num_anchors * num_classes; loc_pred += nbatch * num_anchors * 4; if (index == 0) { valid_count = 0; } __syncthreads(); // apply prediction to anchors for (int i = index; i < num_anchors; i += blockDim.x) { DType score = -1; int id = 0; for (int j = 1; j < num_classes; ++j) { DType temp = cls_prob[j * num_anchors + i]; if (temp > score) { score = temp; id = j; } } // valid class int pos = atomicAdd(&valid_count, 1); out[pos * 6] = id - 1; // restore original class id out[pos * 6 + 1] = (id == 0 ? DType(-1) : score); int offset = i * 4; DType al = anchors[offset]; DType at = anchors[offset + 1]; DType ar = anchors[offset + 2]; DType ab = anchors[offset + 3]; DType aw = ar - al; DType ah = ab - at; DType ax = (al + ar) / 2.f; DType ay = (at + ab) / 2.f; DType ox = loc_pred[offset] * vx * aw + ax; DType oy = loc_pred[offset + 1] * vy * ah + ay; DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2; DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2; DType xmin = ox - ow; DType ymin = oy - oh; DType xmax = ox + ow; DType ymax = oy + oh; if (clip) { Clip(&xmin, DType(0), DType(1)); Clip(&ymin, DType(0), DType(1)); Clip(&xmax, DType(0), DType(1)); Clip(&ymax, DType(0), DType(1)); } out[pos * 6 + 2] = xmin; out[pos * 6 + 3] = ymin; out[pos * 6 + 4] = xmax; out[pos * 6 + 5] = ymax; outanchor[pos * 6 ]=out[pos * 6 ]; outanchor[pos * 6 + 1]=out[pos * 6 + 1]; outanchor[pos * 6 + 2]=out[pos * 6 + 2]; outanchor[pos * 6 + 3]=out[pos * 6 + 3]; outanchor[pos * 6 + 4]=out[pos * 6 + 4]; outanchor[pos * 6 + 5]=out[pos * 6 + 5]; } __syncthreads(); if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return; // if (index == 0) printf("%d\n", valid_count); // descent sort according to scores const int size = valid_count; temp_space += nbatch * num_anchors * 6; DType *src = out; DType *dst = temp_space; for (int width = 2; width < (size << 1); width <<= 1) { int slices = (size - 1) / (blockDim.x * width) + 1; int start = width * index * slices; for (int slice = 0; slice < slices; ++slice) { if (start >= size) break; int middle = start + (width >> 1); if (middle > size) middle = size; int end = start + width; if (end > size) end = size; int i = start; int j = middle; for (int k = start; k < end; ++k) { DType score_i = i < size ? src[i * 6 + 1] : DType(-1); DType score_j = j < size ? src[j * 6 + 1] : DType(-1); if (i < middle && (j >= end || score_i > score_j)) { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[i * 6 + n]; } ++i; } else { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[j * 6 + n]; } ++j; } } start += width; } __syncthreads(); src = src == out? temp_space : out; dst = dst == out? temp_space : out; } __syncthreads(); if (src == temp_space) { // copy from temp to out for (int i = index; i < size * 6; i += blockDim.x) { out[i] = temp_space[i]; } __syncthreads(); } // keep top k detections int ntop = size; if (nms_topk > 0 && nms_topk < ntop) { ntop = nms_topk; for (int i = ntop + index; i < size; i += blockDim.x) { out[i * 6] = -1; } __syncthreads(); } // apply NMS for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) { DType compare_id = out[compare_pos * 6]; if (compare_id < 0) continue; // not a valid positive detection, skip DType *compare_loc_ptr = out + compare_pos * 6 + 2; for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) { DType class_id = out[i * 6]; if (class_id < 0) continue; if (force_suppress || (class_id == compare_id)) { DType iou; CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou); if (iou >= nms_threshold) { out[i * 6] = -1; } } } __syncthreads(); } } } // namespace cuda template<typename DType> inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType> &out, const Tensor<cpu, 3, DType> &outanchor, const Tensor<gpu, 3, DType> &cls_prob, const Tensor<gpu, 2, DType> &loc_pred, const Tensor<gpu, 2, DType> &anchors, const Tensor<gpu, 3, DType> &temp_space, const float threshold, const bool clip, const nnvm::Tuple<float> &variances, const float nms_threshold, const bool force_suppress, const int nms_topk) { CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4"; const int num_classes = cls_prob.size(1); const int num_anchors = cls_prob.size(2); const int num_batches = cls_prob.size(0); const int num_threads = cuda::kMaxThreadsPerBlock; int num_blocks = num_batches; cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward"); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); cuda::DetectionForwardKernel<<<num_blocks, num_threads, 0, stream>>>(out.dptr_, outanchor.dptr_ cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_, num_classes, num_anchors, threshold, clip, variances[0], variances[1], variances[2], variances[3], nms_threshold, force_suppress, nms_topk); MULTIBOX_DETECTION_CUDA_CHECK(cudaPeekAtLastError()); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new MultiBoxDetectionOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
70fa8b7160a69f06f73de9bc941d1e2b1d055966.hip
// !!! This is a file automatically generated by hipify!!! #include "texture_hip.cuh" rtDeclareVariable(int, nx, , ); rtDeclareVariable(int, ny, , ); rtDeclareVariable(int, nn, , ); rtTextureSampler<float4, 2> data; RT_CALLABLE_PROGRAM float3 sampleTexture(float u, float v, float3 p) { return make_float3(tex2D(data, u, v)); }
70fa8b7160a69f06f73de9bc941d1e2b1d055966.cu
#include "texture.cuh" rtDeclareVariable(int, nx, , ); rtDeclareVariable(int, ny, , ); rtDeclareVariable(int, nn, , ); rtTextureSampler<float4, 2> data; RT_CALLABLE_PROGRAM float3 sampleTexture(float u, float v, float3 p) { return make_float3(tex2D(data, u, v)); }
083621c68f36e2d9a9a8db39eadaf74029f7f0fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "convolutional_layer.h" #include "deconvolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "hip/hip_runtime.h" } extern "C" void forward_deconvolutional_layer_gpu(layer l, network net) { int i; int m = l.size*l.size*l.n; int n = l.h*l.w; int k = l.c; fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); for(i=0; i<l.batch; ++i) { float *a = l.weights_gpu; float *b = net.input_gpu + i*l.c*l.h*l.w; float *c = net.workspace; gemm_gpu(1,0,m,n,k,1,a,m,b,n,0,c,n); col2im_gpu(net.workspace, l.out_c, l.out_h, l.out_w, l.size, l.stride, l.pad, l.output_gpu+i*l.outputs); } if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } activate_array_gpu(l.output_gpu, l.batch*l.n*l.out_w*l.out_h, l.activation); } extern "C" void backward_deconvolutional_layer_gpu(layer l, network net) { int i; //constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1); gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if(l.batch_normalize) { backward_batchnorm_layer_gpu(l, net); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } //if(net.delta_gpu) memset(net.delta_gpu, 0, l.batch*l.h*l.w*l.c*sizeof(float)); for(i=0; i<l.batch; ++i) { int m = l.c; int n = l.size*l.size*l.n; int k = l.h*l.w; float *a = net.input_gpu + i*m*k; float *b = net.workspace; float *c = l.weight_updates_gpu; im2col_gpu(l.delta_gpu + i*l.outputs, l.out_c, l.out_h, l.out_w, l.size, l.stride, l.pad, b); gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n); if(net.delta_gpu) { int m = l.c; int n = l.h*l.w; int k = l.size*l.size*l.n; float *a = l.weights_gpu; float *b = net.workspace; float *c = net.delta_gpu + i*n*m; gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n); } } } extern "C" void pull_deconvolutional_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize) { cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } extern "C" void push_deconvolutional_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize) { cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void update_deconvolutional_layer_gpu(layer l, update_args a) { float learning_rate = a.learning_rate*l.learning_rate_scale; float momentum = a.momentum; float decay = a.decay; int batch = a.batch; if(a.adam) { adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); if(l.scales_gpu) { adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); } } else { axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); if(l.scales_gpu) { axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); } } }
083621c68f36e2d9a9a8db39eadaf74029f7f0fa.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "convolutional_layer.h" #include "deconvolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "cuda.h" } extern "C" void forward_deconvolutional_layer_gpu(layer l, network net) { int i; int m = l.size*l.size*l.n; int n = l.h*l.w; int k = l.c; fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); for(i=0; i<l.batch; ++i) { float *a = l.weights_gpu; float *b = net.input_gpu + i*l.c*l.h*l.w; float *c = net.workspace; gemm_gpu(1,0,m,n,k,1,a,m,b,n,0,c,n); col2im_gpu(net.workspace, l.out_c, l.out_h, l.out_w, l.size, l.stride, l.pad, l.output_gpu+i*l.outputs); } if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } activate_array_gpu(l.output_gpu, l.batch*l.n*l.out_w*l.out_h, l.activation); } extern "C" void backward_deconvolutional_layer_gpu(layer l, network net) { int i; //constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1); gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if(l.batch_normalize) { backward_batchnorm_layer_gpu(l, net); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } //if(net.delta_gpu) memset(net.delta_gpu, 0, l.batch*l.h*l.w*l.c*sizeof(float)); for(i=0; i<l.batch; ++i) { int m = l.c; int n = l.size*l.size*l.n; int k = l.h*l.w; float *a = net.input_gpu + i*m*k; float *b = net.workspace; float *c = l.weight_updates_gpu; im2col_gpu(l.delta_gpu + i*l.outputs, l.out_c, l.out_h, l.out_w, l.size, l.stride, l.pad, b); gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n); if(net.delta_gpu) { int m = l.c; int n = l.h*l.w; int k = l.size*l.size*l.n; float *a = l.weights_gpu; float *b = net.workspace; float *c = net.delta_gpu + i*n*m; gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n); } } } extern "C" void pull_deconvolutional_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize) { cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } extern "C" void push_deconvolutional_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize) { cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void update_deconvolutional_layer_gpu(layer l, update_args a) { float learning_rate = a.learning_rate*l.learning_rate_scale; float momentum = a.momentum; float decay = a.decay; int batch = a.batch; if(a.adam) { adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); if(l.scales_gpu) { adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); } } else { axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); if(l.scales_gpu) { axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); } } }
399aefe88cbee76728ab61b8855a98473cc5e65c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Lesson2.h" #include <cudaDefs.h> #include <cassert> namespace lesson2 { const size_t Rows = 15; const size_t Cols = 20; const size_t BlockSize = 3; __global__ void fill(int* matrix, size_t rows, size_t cols, size_t pitch) { int row = blockIdx.x * BlockSize + threadIdx.x; int col = blockIdx.y * BlockSize + threadIdx.y; if (row >= rows || col >= cols) return; int index = row * pitch + col; int value = col * rows + row; //printf("r=%-5d c=%-5d index=%-5d v=%-5d\n", row, col, index, value); matrix[index] = value; } __global__ void increment(int* matrix, size_t rows, size_t cols, size_t pitch) { int row = blockIdx.x * BlockSize + threadIdx.x; int col = blockIdx.y * BlockSize + threadIdx.y; if (row >= rows || col >= cols) return; int index = row * pitch + col; int value = col * rows + row; matrix[index]++; } template<typename T> bool arraysEqual(T *a, T *b, size_t length) { for (size_t i = 0; i < length; i++) if (a[i] != b[i]) return false; return true; } void run() { int *dMatrix; size_t pitchInBytes = 0; checkCudaErrors(hipMallocPitch((void**)&dMatrix, &pitchInBytes, Cols * sizeof(int), Rows)); size_t pitch = pitchInBytes / sizeof(int); dim3 grid = dim3(getNumberOfParts(Rows, BlockSize), getNumberOfParts(Cols, BlockSize)); dim3 block = dim3(BlockSize, BlockSize); fill << <grid, block >> > (dMatrix, Rows, Cols, pitch); checkDeviceMatrix(dMatrix, pitchInBytes, Rows, Cols, "%-3d ", "dMatrix"); increment << <grid, block >> > (dMatrix, Rows, Cols, pitch); checkDeviceMatrix(dMatrix, pitchInBytes, Rows, Cols, "%-3d ", "dMatrix"); int *expectedMatrix = new int[Rows * Cols]; for (size_t i = 0; i < Rows * Cols; i++) expectedMatrix[i] = i + 1; int *matrix = new int[pitch * Rows]; checkCudaErrors(hipMemcpy2D(matrix, pitchInBytes, dMatrix, pitchInBytes, Cols * sizeof(int), Rows, hipMemcpyKind::hipMemcpyDeviceToHost)); checkHostMatrix(matrix, pitchInBytes, Rows, Cols, "%-3d ", "matrix"); //assert(arraysEqual(expectedMatrix, matrix, Rows * Cols)); delete[] matrix; delete[] expectedMatrix; hipFree(dMatrix); } }
399aefe88cbee76728ab61b8855a98473cc5e65c.cu
#include "Lesson2.h" #include <cudaDefs.h> #include <cassert> namespace lesson2 { const size_t Rows = 15; const size_t Cols = 20; const size_t BlockSize = 3; __global__ void fill(int* matrix, size_t rows, size_t cols, size_t pitch) { int row = blockIdx.x * BlockSize + threadIdx.x; int col = blockIdx.y * BlockSize + threadIdx.y; if (row >= rows || col >= cols) return; int index = row * pitch + col; int value = col * rows + row; //printf("r=%-5d c=%-5d index=%-5d v=%-5d\n", row, col, index, value); matrix[index] = value; } __global__ void increment(int* matrix, size_t rows, size_t cols, size_t pitch) { int row = blockIdx.x * BlockSize + threadIdx.x; int col = blockIdx.y * BlockSize + threadIdx.y; if (row >= rows || col >= cols) return; int index = row * pitch + col; int value = col * rows + row; matrix[index]++; } template<typename T> bool arraysEqual(T *a, T *b, size_t length) { for (size_t i = 0; i < length; i++) if (a[i] != b[i]) return false; return true; } void run() { int *dMatrix; size_t pitchInBytes = 0; checkCudaErrors(cudaMallocPitch((void**)&dMatrix, &pitchInBytes, Cols * sizeof(int), Rows)); size_t pitch = pitchInBytes / sizeof(int); dim3 grid = dim3(getNumberOfParts(Rows, BlockSize), getNumberOfParts(Cols, BlockSize)); dim3 block = dim3(BlockSize, BlockSize); fill << <grid, block >> > (dMatrix, Rows, Cols, pitch); checkDeviceMatrix(dMatrix, pitchInBytes, Rows, Cols, "%-3d ", "dMatrix"); increment << <grid, block >> > (dMatrix, Rows, Cols, pitch); checkDeviceMatrix(dMatrix, pitchInBytes, Rows, Cols, "%-3d ", "dMatrix"); int *expectedMatrix = new int[Rows * Cols]; for (size_t i = 0; i < Rows * Cols; i++) expectedMatrix[i] = i + 1; int *matrix = new int[pitch * Rows]; checkCudaErrors(cudaMemcpy2D(matrix, pitchInBytes, dMatrix, pitchInBytes, Cols * sizeof(int), Rows, cudaMemcpyKind::cudaMemcpyDeviceToHost)); checkHostMatrix(matrix, pitchInBytes, Rows, Cols, "%-3d ", "matrix"); //assert(arraysEqual(expectedMatrix, matrix, Rows * Cols)); delete[] matrix; delete[] expectedMatrix; cudaFree(dMatrix); } }
7e08ad05943c093d3302c89dd07223d73fc395ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Second CUDA implemenation of Prim's Minimum Spanning Tree Algorithm // // // Please refer to the report for documentation on all the data structures used // here, as well as an outline of the implementation. #include "cuda1_prim.hpp" // // Kernel implementing the first phase of min reduction primitive // // local block minima are stored in a temporary array v_red // __global__ void min_reduction1(uint32_t *inbound, uint32_t *weights, uint2 *v_red, uint32_t num_vertices) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; extern __shared__ uint2 shm[]; // Initial assignment of shared memory -> of node is note reachable the weight is set to +inf shm[threadIdx.x].x = idx; shm[threadIdx.x + blockDim.x].x = idx + blockDim.x; shm[threadIdx.x].y = idx < num_vertices && inbound[idx] > num_vertices ? weights[idx] : UINT32_MAX; shm[threadIdx.x + blockDim.x].y = UINT32_MAX; // reduction loop for (int j = blockDim.x * SHM_FACTOR; j > 1; j /= 2) { for (int k = 0; k < SHM_FACTOR; k++) { if (shm[threadIdx.x].y > shm[threadIdx.x + j / 2].y) { shm[threadIdx.x].x = shm[threadIdx.x + j / 2].x; shm[threadIdx.x].y = shm[threadIdx.x + j / 2].y; } } __syncthreads(); } // store best local solution in temporary array if (threadIdx.x == 0) { v_red[blockIdx.x].x = shm[0].x; v_red[blockIdx.x].y = shm[0].y; } } // // Kernel implementing the second phase of min reduction primitive // // temporary reduction array v_red is reduced and best solution stored in v_red[0] // __global__ void min_reduction2(uint2 *v_red, uint32_t *current_node, uint32_t *last_node, uint32_t red1_blocks) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; // if only one block available -> best solution is already in v_red[0] if (red1_blocks == 1) { if (idx == 1) { *last_node = *current_node; *current_node = v_red[0].x; } return; } uint32_t half_size = red1_blocks / 2; // reduction loop for (int j = half_size; j > 1; j /= 2) { for (int i = 0; i < j; i += blockDim.x) { if (idx + i < j) { if (v_red[idx + i + j].y < v_red[idx + i].y) { v_red[idx + i].x = v_red[idx + i + j].x; v_red[idx + i].y = v_red[idx + i + j].y; } } } __syncthreads(); } // adjust current and last nodes to best result if (idx == 0) { *last_node = *current_node; if (v_red[1].y < v_red[0].y) { *current_node = v_red[1].x; } else { *current_node = v_red[0].x; } } } // // Kernel implementing the first weight update phase primitive // // Uses the compact adjacency list as read-only input, and writes to the MST // data structure. // // Each thread accesses only one "row" of the MST data structure, so there is // no need to synchronize anything. // // The position in the solution array is the corresponding inbound node of the new nodes reachable from current_node // __global__ void update_mst(uint2 *outbound_vertices, uint2 *inbound_vertices, uint32_t *outbound, uint32_t *weights, uint32_t *current_node) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; uint32_t start_index = outbound_vertices[*current_node].y; uint32_t end_index = start_index + outbound_vertices[*current_node].x; if (idx < end_index - start_index) { uint32_t edge_idx = idx + start_index; if (inbound_vertices[edge_idx].y < weights[inbound_vertices[edge_idx].x]) { weights[inbound_vertices[edge_idx].x] = inbound_vertices[edge_idx].y; outbound[inbound_vertices[edge_idx].x] = *current_node; } } } // // Kernel implementing the second weight update phase primitive // // Take the best fitting edge and store it store it at x-th position in the solution array. x is the last node found // __global__ void update_mst2(uint32_t *outbound, uint32_t *inbound, uint32_t *weights, uint32_t *current_node, uint32_t *last_node) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx == 0) { outbound[*last_node] = outbound[*current_node]; inbound[*last_node] = *current_node; weights[*last_node] = weights[*current_node]; weights[*current_node] = UINT32_MAX; } } // // Initialize the compact adjacency list representation (Wang et al.) // // Refer to the report for a detailed explanation of this data structure. // // The input graph is generated using our own graph generator, which can be // found in base/. // void cuda1Setup(const Graph& g, uint2 *&inbound_vertices, uint2 *&outbound_vertices) { uint32_t pos = 0; for (uint32_t v = 0; v < g.num_vertices(); ++v) { std::vector<EdgeTarget> neighbors; g.neighbors(v, neighbors); outbound_vertices[v].x = neighbors.size(); outbound_vertices[v].y = v == 0 ? 0 : v == 1 ? outbound_vertices[v - 1].x : outbound_vertices[v - 1].y + outbound_vertices[v - 1].x; for (auto nb = neighbors.begin(); nb < neighbors.end(); ++nb) { inbound_vertices[pos].x = nb->vertex_to; inbound_vertices[pos++].y = nb->weight; } } } // allocates all resources needed on the device void allocate_resources(uint32_t num_vertices, uint32_t num_edges, uint2 *& inbound_vertices, uint2 *& outbound_vertices, uint2 *& d_inbound_vertices, uint2 *& d_outbound_vertices, uint2 *&d_red_array, uint32_t *outbound, uint32_t *inbound, uint32_t *weights, uint32_t current_node, uint32_t *&d_outbound, uint32_t *&d_inbound, uint32_t *&d_weights, uint32_t *&d_current_node, uint32_t *&d_last_node, uint32_t num_blocks) { hipMalloc(&d_inbound_vertices, num_edges * 2 * sizeof(uint2)); hipMalloc(&d_outbound_vertices, num_vertices * sizeof(uint2)); hipMalloc(&d_outbound, num_vertices * sizeof(uint32_t)); hipMalloc(&d_inbound, num_vertices * sizeof(uint32_t)); hipMalloc(&d_weights, num_vertices * sizeof(uint32_t)); hipMalloc(&d_current_node, sizeof(uint32_t)); hipMalloc(&d_last_node, sizeof(uint32_t)); hipMalloc(&d_red_array, num_blocks * sizeof(uint2)); hipMemcpy(d_inbound_vertices, inbound_vertices, num_edges * 2 * sizeof(uint2), hipMemcpyHostToDevice); hipMemcpy(d_outbound_vertices, outbound_vertices, num_vertices * sizeof(uint2), hipMemcpyHostToDevice); hipMemcpy(d_outbound, outbound, num_vertices * sizeof(uint32_t), hipMemcpyHostToDevice); hipMemcpy(d_inbound, inbound, num_vertices * sizeof(uint32_t), hipMemcpyHostToDevice); hipMemcpy(d_weights, weights, num_vertices * sizeof(uint32_t), hipMemcpyHostToDevice); hipMemcpy(d_current_node, &current_node, sizeof(uint32_t), hipMemcpyHostToDevice); } // frees all allocated resources on the device void free_resources(uint2 *& d_inbound_vertices, uint2 *& d_outbound_vertices, uint2 *&d_red_array, uint32_t *&d_outbound, uint32_t *&d_inbound, uint32_t *&d_weights, uint32_t *&d_current_node, uint32_t *&d_last_node) { hipFree(d_inbound_vertices); hipFree(d_outbound_vertices); hipFree(d_inbound); hipFree(d_outbound); hipFree(d_weights); hipFree(d_current_node); hipFree(d_last_node); hipFree(d_red_array); } // function for calculating an optimal number of threads to the current vertices count uint32_t get_num_threads(uint32_t num_vertices) { if (num_vertices < 8196) { return 32; } else if (num_vertices < 16384) { return 128; } else if(num_vertices < 131072) { return 512; } else { return 1024; } } // function for calculating an optimal number of blocks to the current vertices and thread count uint32_t get_num_blocks(uint32_t num_vertices, uint32_t num_threads) { uint32_t blockfactor = (num_vertices - 1) / num_threads; uint32_t num_blocks = 1; while (blockfactor != 0) { blockfactor = blockfactor >> 1; num_blocks = num_blocks << 1; } return num_blocks; } void cuda1PrimAlgorithm(uint32_t num_vertices, uint32_t num_edges, uint2 *outbound_vertices, uint2 *inbound_vertices, uint32_t *outbound, uint32_t *inbound, uint32_t *weights) { { // declaration of device pointers uint2 * d_inbound_vertices = NULL, *d_outbound_vertices = NULL; uint32_t *d_outbound = NULL, *d_inbound = NULL, *d_weights = NULL; uint2 *d_red_array = NULL; // start node uint32_t current_node = 0, *d_current_node = 0, *d_last_node = NULL; // calculate an optimal distribution od blocks and threads uint32_t num_threads = get_num_threads(num_vertices); uint32_t num_blocks = get_num_blocks(num_vertices, num_threads); // allocate resources allocate_resources(num_vertices, num_edges,inbound_vertices, outbound_vertices, d_inbound_vertices, d_outbound_vertices, d_red_array, outbound, inbound, weights, current_node, d_outbound, d_inbound, d_weights, d_current_node, d_last_node, num_blocks); // calculate the size of the shared memory needed on the device. This value is given to the kernel as third parameter uint32_t shm_size = num_threads * sizeof(uint2) * SHM_FACTOR; // main loop where prim's algorithm is executed for (int i = 0; i < num_vertices - 1; i++) { update_mst << <num_blocks, num_threads >> > (d_outbound_vertices, d_inbound_vertices, d_outbound, d_weights, d_current_node); min_reduction1 << <num_blocks, num_threads, shm_size >> > (d_inbound, d_weights, d_red_array, num_vertices); min_reduction2 << <1, num_threads >> > (d_red_array, d_current_node, d_last_node, num_blocks); update_mst2 << <num_blocks, num_threads >> > (d_outbound, d_inbound, d_weights, d_current_node, d_last_node); } // copy results from device hipMemcpy(outbound, d_outbound, num_vertices * sizeof(uint32_t), hipMemcpyDeviceToHost); hipMemcpy(inbound, d_inbound, num_vertices * sizeof(uint32_t), hipMemcpyDeviceToHost); hipMemcpy(weights, d_weights, num_vertices * sizeof(uint32_t), hipMemcpyDeviceToHost); // free resources free_resources(d_inbound_vertices, d_outbound_vertices, d_red_array, d_outbound, d_inbound, d_weights, d_current_node, d_last_node); } }
7e08ad05943c093d3302c89dd07223d73fc395ec.cu
// // Second CUDA implemenation of Prim's Minimum Spanning Tree Algorithm // // // Please refer to the report for documentation on all the data structures used // here, as well as an outline of the implementation. #include "cuda1_prim.hpp" // // Kernel implementing the first phase of min reduction primitive // // local block minima are stored in a temporary array v_red // __global__ void min_reduction1(uint32_t *inbound, uint32_t *weights, uint2 *v_red, uint32_t num_vertices) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; extern __shared__ uint2 shm[]; // Initial assignment of shared memory -> of node is note reachable the weight is set to +inf shm[threadIdx.x].x = idx; shm[threadIdx.x + blockDim.x].x = idx + blockDim.x; shm[threadIdx.x].y = idx < num_vertices && inbound[idx] > num_vertices ? weights[idx] : UINT32_MAX; shm[threadIdx.x + blockDim.x].y = UINT32_MAX; // reduction loop for (int j = blockDim.x * SHM_FACTOR; j > 1; j /= 2) { for (int k = 0; k < SHM_FACTOR; k++) { if (shm[threadIdx.x].y > shm[threadIdx.x + j / 2].y) { shm[threadIdx.x].x = shm[threadIdx.x + j / 2].x; shm[threadIdx.x].y = shm[threadIdx.x + j / 2].y; } } __syncthreads(); } // store best local solution in temporary array if (threadIdx.x == 0) { v_red[blockIdx.x].x = shm[0].x; v_red[blockIdx.x].y = shm[0].y; } } // // Kernel implementing the second phase of min reduction primitive // // temporary reduction array v_red is reduced and best solution stored in v_red[0] // __global__ void min_reduction2(uint2 *v_red, uint32_t *current_node, uint32_t *last_node, uint32_t red1_blocks) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; // if only one block available -> best solution is already in v_red[0] if (red1_blocks == 1) { if (idx == 1) { *last_node = *current_node; *current_node = v_red[0].x; } return; } uint32_t half_size = red1_blocks / 2; // reduction loop for (int j = half_size; j > 1; j /= 2) { for (int i = 0; i < j; i += blockDim.x) { if (idx + i < j) { if (v_red[idx + i + j].y < v_red[idx + i].y) { v_red[idx + i].x = v_red[idx + i + j].x; v_red[idx + i].y = v_red[idx + i + j].y; } } } __syncthreads(); } // adjust current and last nodes to best result if (idx == 0) { *last_node = *current_node; if (v_red[1].y < v_red[0].y) { *current_node = v_red[1].x; } else { *current_node = v_red[0].x; } } } // // Kernel implementing the first weight update phase primitive // // Uses the compact adjacency list as read-only input, and writes to the MST // data structure. // // Each thread accesses only one "row" of the MST data structure, so there is // no need to synchronize anything. // // The position in the solution array is the corresponding inbound node of the new nodes reachable from current_node // __global__ void update_mst(uint2 *outbound_vertices, uint2 *inbound_vertices, uint32_t *outbound, uint32_t *weights, uint32_t *current_node) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; uint32_t start_index = outbound_vertices[*current_node].y; uint32_t end_index = start_index + outbound_vertices[*current_node].x; if (idx < end_index - start_index) { uint32_t edge_idx = idx + start_index; if (inbound_vertices[edge_idx].y < weights[inbound_vertices[edge_idx].x]) { weights[inbound_vertices[edge_idx].x] = inbound_vertices[edge_idx].y; outbound[inbound_vertices[edge_idx].x] = *current_node; } } } // // Kernel implementing the second weight update phase primitive // // Take the best fitting edge and store it store it at x-th position in the solution array. x is the last node found // __global__ void update_mst2(uint32_t *outbound, uint32_t *inbound, uint32_t *weights, uint32_t *current_node, uint32_t *last_node) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx == 0) { outbound[*last_node] = outbound[*current_node]; inbound[*last_node] = *current_node; weights[*last_node] = weights[*current_node]; weights[*current_node] = UINT32_MAX; } } // // Initialize the compact adjacency list representation (Wang et al.) // // Refer to the report for a detailed explanation of this data structure. // // The input graph is generated using our own graph generator, which can be // found in base/. // void cuda1Setup(const Graph& g, uint2 *&inbound_vertices, uint2 *&outbound_vertices) { uint32_t pos = 0; for (uint32_t v = 0; v < g.num_vertices(); ++v) { std::vector<EdgeTarget> neighbors; g.neighbors(v, neighbors); outbound_vertices[v].x = neighbors.size(); outbound_vertices[v].y = v == 0 ? 0 : v == 1 ? outbound_vertices[v - 1].x : outbound_vertices[v - 1].y + outbound_vertices[v - 1].x; for (auto nb = neighbors.begin(); nb < neighbors.end(); ++nb) { inbound_vertices[pos].x = nb->vertex_to; inbound_vertices[pos++].y = nb->weight; } } } // allocates all resources needed on the device void allocate_resources(uint32_t num_vertices, uint32_t num_edges, uint2 *& inbound_vertices, uint2 *& outbound_vertices, uint2 *& d_inbound_vertices, uint2 *& d_outbound_vertices, uint2 *&d_red_array, uint32_t *outbound, uint32_t *inbound, uint32_t *weights, uint32_t current_node, uint32_t *&d_outbound, uint32_t *&d_inbound, uint32_t *&d_weights, uint32_t *&d_current_node, uint32_t *&d_last_node, uint32_t num_blocks) { cudaMalloc(&d_inbound_vertices, num_edges * 2 * sizeof(uint2)); cudaMalloc(&d_outbound_vertices, num_vertices * sizeof(uint2)); cudaMalloc(&d_outbound, num_vertices * sizeof(uint32_t)); cudaMalloc(&d_inbound, num_vertices * sizeof(uint32_t)); cudaMalloc(&d_weights, num_vertices * sizeof(uint32_t)); cudaMalloc(&d_current_node, sizeof(uint32_t)); cudaMalloc(&d_last_node, sizeof(uint32_t)); cudaMalloc(&d_red_array, num_blocks * sizeof(uint2)); cudaMemcpy(d_inbound_vertices, inbound_vertices, num_edges * 2 * sizeof(uint2), cudaMemcpyHostToDevice); cudaMemcpy(d_outbound_vertices, outbound_vertices, num_vertices * sizeof(uint2), cudaMemcpyHostToDevice); cudaMemcpy(d_outbound, outbound, num_vertices * sizeof(uint32_t), cudaMemcpyHostToDevice); cudaMemcpy(d_inbound, inbound, num_vertices * sizeof(uint32_t), cudaMemcpyHostToDevice); cudaMemcpy(d_weights, weights, num_vertices * sizeof(uint32_t), cudaMemcpyHostToDevice); cudaMemcpy(d_current_node, &current_node, sizeof(uint32_t), cudaMemcpyHostToDevice); } // frees all allocated resources on the device void free_resources(uint2 *& d_inbound_vertices, uint2 *& d_outbound_vertices, uint2 *&d_red_array, uint32_t *&d_outbound, uint32_t *&d_inbound, uint32_t *&d_weights, uint32_t *&d_current_node, uint32_t *&d_last_node) { cudaFree(d_inbound_vertices); cudaFree(d_outbound_vertices); cudaFree(d_inbound); cudaFree(d_outbound); cudaFree(d_weights); cudaFree(d_current_node); cudaFree(d_last_node); cudaFree(d_red_array); } // function for calculating an optimal number of threads to the current vertices count uint32_t get_num_threads(uint32_t num_vertices) { if (num_vertices < 8196) { return 32; } else if (num_vertices < 16384) { return 128; } else if(num_vertices < 131072) { return 512; } else { return 1024; } } // function for calculating an optimal number of blocks to the current vertices and thread count uint32_t get_num_blocks(uint32_t num_vertices, uint32_t num_threads) { uint32_t blockfactor = (num_vertices - 1) / num_threads; uint32_t num_blocks = 1; while (blockfactor != 0) { blockfactor = blockfactor >> 1; num_blocks = num_blocks << 1; } return num_blocks; } void cuda1PrimAlgorithm(uint32_t num_vertices, uint32_t num_edges, uint2 *outbound_vertices, uint2 *inbound_vertices, uint32_t *outbound, uint32_t *inbound, uint32_t *weights) { { // declaration of device pointers uint2 * d_inbound_vertices = NULL, *d_outbound_vertices = NULL; uint32_t *d_outbound = NULL, *d_inbound = NULL, *d_weights = NULL; uint2 *d_red_array = NULL; // start node uint32_t current_node = 0, *d_current_node = 0, *d_last_node = NULL; // calculate an optimal distribution od blocks and threads uint32_t num_threads = get_num_threads(num_vertices); uint32_t num_blocks = get_num_blocks(num_vertices, num_threads); // allocate resources allocate_resources(num_vertices, num_edges,inbound_vertices, outbound_vertices, d_inbound_vertices, d_outbound_vertices, d_red_array, outbound, inbound, weights, current_node, d_outbound, d_inbound, d_weights, d_current_node, d_last_node, num_blocks); // calculate the size of the shared memory needed on the device. This value is given to the kernel as third parameter uint32_t shm_size = num_threads * sizeof(uint2) * SHM_FACTOR; // main loop where prim's algorithm is executed for (int i = 0; i < num_vertices - 1; i++) { update_mst << <num_blocks, num_threads >> > (d_outbound_vertices, d_inbound_vertices, d_outbound, d_weights, d_current_node); min_reduction1 << <num_blocks, num_threads, shm_size >> > (d_inbound, d_weights, d_red_array, num_vertices); min_reduction2 << <1, num_threads >> > (d_red_array, d_current_node, d_last_node, num_blocks); update_mst2 << <num_blocks, num_threads >> > (d_outbound, d_inbound, d_weights, d_current_node, d_last_node); } // copy results from device cudaMemcpy(outbound, d_outbound, num_vertices * sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaMemcpy(inbound, d_inbound, num_vertices * sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaMemcpy(weights, d_weights, num_vertices * sizeof(uint32_t), cudaMemcpyDeviceToHost); // free resources free_resources(d_inbound_vertices, d_outbound_vertices, d_red_array, d_outbound, d_inbound, d_weights, d_current_node, d_last_node); } }
decf7043006c3a10b9c96a45031084331bbcd3eb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdbool.h> #include <sys/time.h> double getTimeStamp() { struct timeval tv; gettimeofday( &tv, NULL ); return (double) tv.tv_usec/1000000 + tv.tv_sec; } void h_addmat(float*A, float*B, float*C, int nx, int ny) { int total = nx*ny; int count = 0; int i; for(i=0; i<total; i++) { C[count] = A[count] + B[count]; count++; } return; } __global__ void f_addmat( float*A, float*B, float*C, int nx, int ny) { int idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < (nx*ny)) { C[idx] = A[idx] + B[idx]; idx += blockDim.x * gridDim.x; } } int main( int argc, char *argv[] ) { if (argc != 3) { printf("Error: wrong number\n"); exit(0); } int nx = atoi ( argv[1] ); int ny = atoi (argv[2] ); if (nx <= 0 || ny <= 0) { printf("invalid inputs\n"); exit(0); } int noElems = nx*ny; int bytes = noElems * sizeof(float); int i,j, count; count = 0; float *h_A = (float *) malloc ( bytes ); float* h_B = (float *) malloc ( bytes ); float *h_hC = (float *) malloc ( bytes ); float *h_dC = (float *) malloc ( bytes ); for (i=0; i<nx; i++) for (j=0; j<ny; j++) { h_A[count] = (float)(i+j)/3.0; count++; } count = 0; for (i=0; i<nx; i++) for (j=0; j<ny; j++) { h_B[count]= (float)3.14*(i+j); count++; } float *d_A, *d_B, *d_C ; hipMalloc( (void **) &d_A, bytes); hipMalloc( (void **) &d_B, bytes); hipMalloc( (void **) &d_C, bytes); double timeStampA = getTimeStamp(); hipMemcpy( d_A, h_A, bytes, hipMemcpyHostToDevice); hipMemcpy( d_B, h_B, bytes, hipMemcpyHostToDevice); double timeStampB = getTimeStamp(); //dim3 block(32, 32); //dim3 grid((nx + block.x-1)/block.x, (ny+block.y-1)/block.y); hipLaunchKernelGGL(( f_addmat), dim3(512), dim3(512), 0, 0, d_A, d_B, d_C, nx, ny); hipDeviceSynchronize(); double timeStampC = getTimeStamp(); hipMemcpy(h_dC, d_C, bytes, hipMemcpyDeviceToHost ); double timeStampD = getTimeStamp(); hipFree( d_A ); hipFree( d_B); hipFree( d_C); hipDeviceReset(); h_addmat(h_A, h_B, h_hC, nx, ny); count = 0; bool s = true; for(i=0; i<noElems; i++) { if( h_hC[i] != h_dC[i] ) { s = false; printf("%d \n", i); break; } } if(s) { printf("total time is %.6f, CPU GPU transfer time is %.6f, kernel time is %.6f, GPU CPU transfer time is %.6f\n ", timeStampD-timeStampA, timeStampB - timeStampA, timeStampC- timeStampB, timeStampD - timeStampC); exit(0); } printf("finished"); return 0; }
decf7043006c3a10b9c96a45031084331bbcd3eb.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <stdbool.h> #include <sys/time.h> double getTimeStamp() { struct timeval tv; gettimeofday( &tv, NULL ); return (double) tv.tv_usec/1000000 + tv.tv_sec; } void h_addmat(float*A, float*B, float*C, int nx, int ny) { int total = nx*ny; int count = 0; int i; for(i=0; i<total; i++) { C[count] = A[count] + B[count]; count++; } return; } __global__ void f_addmat( float*A, float*B, float*C, int nx, int ny) { int idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < (nx*ny)) { C[idx] = A[idx] + B[idx]; idx += blockDim.x * gridDim.x; } } int main( int argc, char *argv[] ) { if (argc != 3) { printf("Error: wrong number\n"); exit(0); } int nx = atoi ( argv[1] ); int ny = atoi (argv[2] ); if (nx <= 0 || ny <= 0) { printf("invalid inputs\n"); exit(0); } int noElems = nx*ny; int bytes = noElems * sizeof(float); int i,j, count; count = 0; float *h_A = (float *) malloc ( bytes ); float* h_B = (float *) malloc ( bytes ); float *h_hC = (float *) malloc ( bytes ); float *h_dC = (float *) malloc ( bytes ); for (i=0; i<nx; i++) for (j=0; j<ny; j++) { h_A[count] = (float)(i+j)/3.0; count++; } count = 0; for (i=0; i<nx; i++) for (j=0; j<ny; j++) { h_B[count]= (float)3.14*(i+j); count++; } float *d_A, *d_B, *d_C ; cudaMalloc( (void **) &d_A, bytes); cudaMalloc( (void **) &d_B, bytes); cudaMalloc( (void **) &d_C, bytes); double timeStampA = getTimeStamp(); cudaMemcpy( d_A, h_A, bytes, cudaMemcpyHostToDevice); cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice); double timeStampB = getTimeStamp(); //dim3 block(32, 32); //dim3 grid((nx + block.x-1)/block.x, (ny+block.y-1)/block.y); f_addmat<<<512, 512>>>( d_A, d_B, d_C, nx, ny); cudaDeviceSynchronize(); double timeStampC = getTimeStamp(); cudaMemcpy(h_dC, d_C, bytes, cudaMemcpyDeviceToHost ); double timeStampD = getTimeStamp(); cudaFree( d_A ); cudaFree( d_B); cudaFree( d_C); cudaDeviceReset(); h_addmat(h_A, h_B, h_hC, nx, ny); count = 0; bool s = true; for(i=0; i<noElems; i++) { if( h_hC[i] != h_dC[i] ) { s = false; printf("%d \n", i); break; } } if(s) { printf("total time is %.6f, CPU GPU transfer time is %.6f, kernel time is %.6f, GPU CPU transfer time is %.6f\n ", timeStampD-timeStampA, timeStampB - timeStampA, timeStampC- timeStampB, timeStampD - timeStampC); exit(0); } printf("finished"); return 0; }
84436c23c093d3eb5df80f1af91ad2b298602210.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/random.h> #include "common.h" #include "mlp.h" namespace CharacterRecognition { using Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: __global__ /** * Example of use case (follow how you did it in stream compaction) */ /*void scan(int n, int *odata, const int *idata) { timer().startGpuTimer(); // TODO timer().endGpuTimer(); } */ #define blockSize 128 __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } __host__ __device__ float genRandom(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return (float)unitDistrib(rng); } __global__ void kernInitRandomWeights(int N, float* wtMat, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { float rand = genRandom(N, index); wtMat[index] = scale * rand; } } __global__ void kernInitZero(int N, float* data) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { data[index] = 0; } } __global__ void kernSumWeights(int iDim, int oDim, float* wtMat, float* idata, float* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= oDim) { return; } for (int idx = 0; idx < iDim; idx++) { int wtIdx = idx * oDim + index; odata[index] += wtMat[wtIdx] * idata[idx]; } } __global__ void kernActivationFxn(int N, float* idata, float* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } float x = idata[index]; float e = exp(-x); odata[index] = 1.0f / (1.0f + e); } __global__ void kernCalcErrors(int N, float* target, float* output, float* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } odata[index] = target[index] - output[index]; } __global__ void kernEditWeightsji(int N, int iDim, float lambda, float* hidden, float* errors, float* outputSums, float* partialErr, float* wtMat) { // for hidden to output weights: // delta = lambda * value of hidden node * (target - output) * derivative of f(x) (where x is the sum before it went in f(x) or is just the output??) // derivative of f = f * (1-f) int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int i = index % iDim; int j = index / iDim; float x = outputSums[i]; float fx = 1.0f / (1.0f + exp(-x)); partialErr[i] = errors[i] * fx * (1 - fx); float deltaW = lambda * hidden[j] * partialErr[i]; wtMat[index] += deltaW; } __global__ void kernEditWeightskj(int N, int jDim, int iDim, float lambda, float* input, float* hiddenSums, float* partialErr, float* wji, float* wtMat) { // for hidden to output weights: // delta = lambda * value of input node * derivative of f(x) * // derivative of f = f * (1-f) int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int j = index % jDim; int k = index / jDim; float sumPropErrs = 0; for (int i = 0; i < iDim; i++) { sumPropErrs += partialErr[i] * wji[j + i * jDim]; } float x = hiddenSums[j]; float fx = 1.0f / (1.0f + exp(-x)); float deltaW = lambda * input[k] * sumPropErrs * fx * (1 - fx); wtMat[index] += deltaW; } void makeWeightMat(int n, float* data) { float* dev_data; hipMalloc((void**)&dev_data, n * sizeof(float)); kernInitRandomWeights << <n, blockSize >> > (n, dev_data, 30); hipMemcpy(data, dev_data, n * sizeof(float), hipMemcpyDeviceToHost); hipFree(dev_data); } // TODO: implement required elements for MLP sections 1 and 2 here float mlpTrain(int i, int j, int k, float* odata, float* idata, float* wkj, float* wji, float* target) { float *dev_input, *dev_hidden, *dev_output; float *dev_hiddenSums, *dev_outputSums; float *dev_wkj, *dev_wji; float *dev_target, *dev_errors, *dev_partialErr, *dev_tempwji; hipMalloc((void**)&dev_input, k * sizeof(float)); hipMalloc((void**)&dev_hidden, j * sizeof(float)); hipMalloc((void**)&dev_output, i * sizeof(float)); hipMemcpy(dev_input, idata, k * sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**)&dev_hiddenSums, j * sizeof(float)); hipMalloc((void**)&dev_outputSums, i * sizeof(float)); hipMalloc((void**)&dev_wkj, k * j * sizeof(float)); hipMalloc((void**)&dev_wji, j * i * sizeof(float)); hipMemcpy(dev_wkj, wkj, k * j * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_wji, wji, j * i * sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**)&dev_target, i * sizeof(float)); hipMalloc((void**)&dev_errors, i * sizeof(float)); hipMalloc((void**)&dev_partialErr, i * sizeof(float)); hipMalloc((void**)&dev_tempwji, i * j * sizeof(float)); hipMemcpy(dev_target, target, i * sizeof(float), hipMemcpyHostToDevice); // initialize non input buffers to zeros kernInitZero << <j, blockSize >> > (j, dev_hidden); kernInitZero << <i, blockSize >> > (i, dev_output); // input -> hidden kernSumWeights << <j, blockSize >> > (k, j, dev_wkj, dev_input, dev_hiddenSums); kernActivationFxn << <j, blockSize >> > (j, dev_hiddenSums, dev_hidden); // hidden -> output kernSumWeights << <i, blockSize >> > (j, i, dev_wji, dev_hidden, dev_outputSums); kernActivationFxn << <i, blockSize >> > (i, dev_outputSums, dev_output); // calculate error, lambda kernCalcErrors << <i, blockSize >> > (i, dev_target, dev_output, dev_errors); float* errs = new float[i]; hipMemcpy(errs, dev_errors, i * sizeof(float), hipMemcpyDeviceToHost); float sumErr = 0; for (int e = 0; e < i; e++) { sumErr += (errs[e]*errs[e]); } sumErr /= 2.0f; float lambda = sumErr; // update weights hipMemcpy(dev_tempwji, dev_wji, j * i * sizeof(float), hipMemcpyDeviceToDevice); kernEditWeightsji << <j*i, blockSize >> > (j*i, i, lambda, dev_hidden, dev_errors, dev_output, dev_partialErr, dev_wji); kernEditWeightskj << <k*j, blockSize >> > (k*j, j, i, lambda, dev_input, dev_hidden, dev_partialErr, dev_tempwji, dev_wkj); hipMemcpy(odata, dev_output, i * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(wkj, dev_wkj, k * j * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(wji, dev_wji, j * i * sizeof(float), hipMemcpyDeviceToHost); hipFree(dev_input); hipFree(dev_hidden); hipFree(dev_output); hipFree(dev_hiddenSums); hipFree(dev_outputSums); hipFree(dev_wkj); hipFree(dev_wji); hipFree(dev_target); hipFree(dev_errors); hipFree(dev_partialErr); hipFree(dev_tempwji); return sumErr; } void mlpRun(int i, int j, int k, float* odata, float* idata, float* wkj, float* wji) { float *dev_input, *dev_hidden, *dev_output; float *dev_hiddenSums, *dev_outputSums; float *dev_wkj, *dev_wji; hipMalloc((void**)&dev_input, k * sizeof(float)); hipMalloc((void**)&dev_hidden, j * sizeof(float)); hipMalloc((void**)&dev_output, i * sizeof(float)); hipMemcpy(dev_input, idata, k * sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**)&dev_hiddenSums, j * sizeof(float)); hipMalloc((void**)&dev_outputSums, i * sizeof(float)); hipMalloc((void**)&dev_wkj, k * j * sizeof(float)); hipMalloc((void**)&dev_wji, j * i * sizeof(float)); hipMemcpy(dev_wkj, wkj, k * j * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_wji, wji, j * i * sizeof(float), hipMemcpyHostToDevice); // initialize non input buffers to zeros kernInitZero << <j, blockSize >> > (j, dev_hidden); kernInitZero << <i, blockSize >> > (i, dev_output); // input -> hidden kernSumWeights << <j, blockSize >> > (k, j, dev_wkj, dev_input, dev_hiddenSums); kernActivationFxn << <j, blockSize >> > (j, dev_hiddenSums, dev_hidden); // hidden -> output kernSumWeights << <i, blockSize >> > (j, i, dev_wji, dev_hidden, dev_outputSums); kernActivationFxn << <i, blockSize >> > (i, dev_outputSums, dev_output); hipMemcpy(odata, dev_output, i * sizeof(float), hipMemcpyDeviceToHost); hipFree(dev_input); hipFree(dev_hidden); hipFree(dev_output); hipFree(dev_hiddenSums); hipFree(dev_outputSums); hipFree(dev_wkj); hipFree(dev_wji); } }
84436c23c093d3eb5df80f1af91ad2b298602210.cu
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/random.h> #include "common.h" #include "mlp.h" namespace CharacterRecognition { using Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: __global__ /** * Example of use case (follow how you did it in stream compaction) */ /*void scan(int n, int *odata, const int *idata) { timer().startGpuTimer(); // TODO timer().endGpuTimer(); } */ #define blockSize 128 __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } __host__ __device__ float genRandom(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return (float)unitDistrib(rng); } __global__ void kernInitRandomWeights(int N, float* wtMat, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { float rand = genRandom(N, index); wtMat[index] = scale * rand; } } __global__ void kernInitZero(int N, float* data) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { data[index] = 0; } } __global__ void kernSumWeights(int iDim, int oDim, float* wtMat, float* idata, float* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= oDim) { return; } for (int idx = 0; idx < iDim; idx++) { int wtIdx = idx * oDim + index; odata[index] += wtMat[wtIdx] * idata[idx]; } } __global__ void kernActivationFxn(int N, float* idata, float* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } float x = idata[index]; float e = exp(-x); odata[index] = 1.0f / (1.0f + e); } __global__ void kernCalcErrors(int N, float* target, float* output, float* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } odata[index] = target[index] - output[index]; } __global__ void kernEditWeightsji(int N, int iDim, float lambda, float* hidden, float* errors, float* outputSums, float* partialErr, float* wtMat) { // for hidden to output weights: // delta = lambda * value of hidden node * (target - output) * derivative of f(x) (where x is the sum before it went in f(x) or is just the output??) // derivative of f = f * (1-f) int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int i = index % iDim; int j = index / iDim; float x = outputSums[i]; float fx = 1.0f / (1.0f + exp(-x)); partialErr[i] = errors[i] * fx * (1 - fx); float deltaW = lambda * hidden[j] * partialErr[i]; wtMat[index] += deltaW; } __global__ void kernEditWeightskj(int N, int jDim, int iDim, float lambda, float* input, float* hiddenSums, float* partialErr, float* wji, float* wtMat) { // for hidden to output weights: // delta = lambda * value of input node * derivative of f(x) * // derivative of f = f * (1-f) int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) { return; } int j = index % jDim; int k = index / jDim; float sumPropErrs = 0; for (int i = 0; i < iDim; i++) { sumPropErrs += partialErr[i] * wji[j + i * jDim]; } float x = hiddenSums[j]; float fx = 1.0f / (1.0f + exp(-x)); float deltaW = lambda * input[k] * sumPropErrs * fx * (1 - fx); wtMat[index] += deltaW; } void makeWeightMat(int n, float* data) { float* dev_data; cudaMalloc((void**)&dev_data, n * sizeof(float)); kernInitRandomWeights << <n, blockSize >> > (n, dev_data, 30); cudaMemcpy(data, dev_data, n * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dev_data); } // TODO: implement required elements for MLP sections 1 and 2 here float mlpTrain(int i, int j, int k, float* odata, float* idata, float* wkj, float* wji, float* target) { float *dev_input, *dev_hidden, *dev_output; float *dev_hiddenSums, *dev_outputSums; float *dev_wkj, *dev_wji; float *dev_target, *dev_errors, *dev_partialErr, *dev_tempwji; cudaMalloc((void**)&dev_input, k * sizeof(float)); cudaMalloc((void**)&dev_hidden, j * sizeof(float)); cudaMalloc((void**)&dev_output, i * sizeof(float)); cudaMemcpy(dev_input, idata, k * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_hiddenSums, j * sizeof(float)); cudaMalloc((void**)&dev_outputSums, i * sizeof(float)); cudaMalloc((void**)&dev_wkj, k * j * sizeof(float)); cudaMalloc((void**)&dev_wji, j * i * sizeof(float)); cudaMemcpy(dev_wkj, wkj, k * j * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_wji, wji, j * i * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_target, i * sizeof(float)); cudaMalloc((void**)&dev_errors, i * sizeof(float)); cudaMalloc((void**)&dev_partialErr, i * sizeof(float)); cudaMalloc((void**)&dev_tempwji, i * j * sizeof(float)); cudaMemcpy(dev_target, target, i * sizeof(float), cudaMemcpyHostToDevice); // initialize non input buffers to zeros kernInitZero << <j, blockSize >> > (j, dev_hidden); kernInitZero << <i, blockSize >> > (i, dev_output); // input -> hidden kernSumWeights << <j, blockSize >> > (k, j, dev_wkj, dev_input, dev_hiddenSums); kernActivationFxn << <j, blockSize >> > (j, dev_hiddenSums, dev_hidden); // hidden -> output kernSumWeights << <i, blockSize >> > (j, i, dev_wji, dev_hidden, dev_outputSums); kernActivationFxn << <i, blockSize >> > (i, dev_outputSums, dev_output); // calculate error, lambda kernCalcErrors << <i, blockSize >> > (i, dev_target, dev_output, dev_errors); float* errs = new float[i]; cudaMemcpy(errs, dev_errors, i * sizeof(float), cudaMemcpyDeviceToHost); float sumErr = 0; for (int e = 0; e < i; e++) { sumErr += (errs[e]*errs[e]); } sumErr /= 2.0f; float lambda = sumErr; // update weights cudaMemcpy(dev_tempwji, dev_wji, j * i * sizeof(float), cudaMemcpyDeviceToDevice); kernEditWeightsji << <j*i, blockSize >> > (j*i, i, lambda, dev_hidden, dev_errors, dev_output, dev_partialErr, dev_wji); kernEditWeightskj << <k*j, blockSize >> > (k*j, j, i, lambda, dev_input, dev_hidden, dev_partialErr, dev_tempwji, dev_wkj); cudaMemcpy(odata, dev_output, i * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(wkj, dev_wkj, k * j * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(wji, dev_wji, j * i * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dev_input); cudaFree(dev_hidden); cudaFree(dev_output); cudaFree(dev_hiddenSums); cudaFree(dev_outputSums); cudaFree(dev_wkj); cudaFree(dev_wji); cudaFree(dev_target); cudaFree(dev_errors); cudaFree(dev_partialErr); cudaFree(dev_tempwji); return sumErr; } void mlpRun(int i, int j, int k, float* odata, float* idata, float* wkj, float* wji) { float *dev_input, *dev_hidden, *dev_output; float *dev_hiddenSums, *dev_outputSums; float *dev_wkj, *dev_wji; cudaMalloc((void**)&dev_input, k * sizeof(float)); cudaMalloc((void**)&dev_hidden, j * sizeof(float)); cudaMalloc((void**)&dev_output, i * sizeof(float)); cudaMemcpy(dev_input, idata, k * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_hiddenSums, j * sizeof(float)); cudaMalloc((void**)&dev_outputSums, i * sizeof(float)); cudaMalloc((void**)&dev_wkj, k * j * sizeof(float)); cudaMalloc((void**)&dev_wji, j * i * sizeof(float)); cudaMemcpy(dev_wkj, wkj, k * j * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_wji, wji, j * i * sizeof(float), cudaMemcpyHostToDevice); // initialize non input buffers to zeros kernInitZero << <j, blockSize >> > (j, dev_hidden); kernInitZero << <i, blockSize >> > (i, dev_output); // input -> hidden kernSumWeights << <j, blockSize >> > (k, j, dev_wkj, dev_input, dev_hiddenSums); kernActivationFxn << <j, blockSize >> > (j, dev_hiddenSums, dev_hidden); // hidden -> output kernSumWeights << <i, blockSize >> > (j, i, dev_wji, dev_hidden, dev_outputSums); kernActivationFxn << <i, blockSize >> > (i, dev_outputSums, dev_output); cudaMemcpy(odata, dev_output, i * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dev_input); cudaFree(dev_hidden); cudaFree(dev_output); cudaFree(dev_hiddenSums); cudaFree(dev_outputSums); cudaFree(dev_wkj); cudaFree(dev_wji); } }
abae25dec899f04c4441c1688a7e74641fd8c2d7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cmath> #include <cuml/ensemble/randomforest.hpp> #include <utility> #include "benchmark.cuh" namespace ML { namespace Bench { namespace rf { struct RegParams { DatasetParams data; RegressionParams regression; RF_params rf; }; template <typename D> struct RFRegressorModel { }; template <> struct RFRegressorModel<float> { ML::RandomForestRegressorF model; }; template <> struct RFRegressorModel<double> { ML::RandomForestRegressorD model; }; template <typename D> class RFRegressor : public RegressionFixture<D> { public: RFRegressor(const std::string& name, const RegParams& p) : RegressionFixture<D>(name, p.data, p.regression), rfParams(p.rf) { } protected: void runBenchmark(::benchmark::State& state) override { using MLCommon::Bench::CudaEventTimer; if (this->params.rowMajor) { state.SkipWithError("RFRegressor only supports col-major inputs"); } this->loopOnState(state, [this]() { auto* mPtr = &model.model; fit(*this->handle, mPtr, this->data.X, this->params.nrows, this->params.ncols, this->data.y, rfParams); CUDA_CHECK(hipStreamSynchronize(this->stream)); }); } private: RFRegressorModel<D> model; RF_params rfParams; }; template <typename D> std::vector<RegParams> getInputs() { struct DimInfo { int nrows, ncols, n_informative; }; struct std::vector<RegParams> out; RegParams p; p.data.rowMajor = false; p.regression = {.shuffle = true, // Better to shuffle when n_informative < ncols .effective_rank = -1, // dataset generation will be faster .bias = 4.5, .tail_strength = 0.5, // unused when effective_rank = -1 .noise = 1.0, .seed = 12345ULL}; p.rf = set_rf_params(10, /*max_depth */ (1 << 20), /* max_leaves */ 0.3, /* max_features */ 32, /* n_bins */ 3, /* min_samples_leaf */ 3, /* min_samples_split */ 0.0f, /* min_impurity_decrease */ true, /* bootstrap */ 500, /* n_trees */ 1.f, /* max_samples */ 1234ULL, /* seed */ ML::CRITERION::MSE, /* split_criterion */ 8, /* n_streams */ 128 /* max_batch_size */ ); std::vector<DimInfo> dim_info = {{500000, 500, 400}}; for (auto& di : dim_info) { // Let's run Bosch only for float type if (!std::is_same<D, float>::value && di.ncols == 968) continue; p.data.nrows = di.nrows; p.data.ncols = di.ncols; p.regression.n_informative = di.n_informative; p.rf.tree_params.max_features = 1.f; for (auto max_depth : std::vector<int>({7, 11, 15})) { p.rf.tree_params.max_depth = max_depth; out.push_back(p); } } return out; } ML_BENCH_REGISTER(RegParams, RFRegressor<float>, "regression", getInputs<float>()); ML_BENCH_REGISTER(RegParams, RFRegressor<double>, "regression", getInputs<double>()); } // namespace rf } // namespace Bench } // namespace ML
abae25dec899f04c4441c1688a7e74641fd8c2d7.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cmath> #include <cuml/ensemble/randomforest.hpp> #include <utility> #include "benchmark.cuh" namespace ML { namespace Bench { namespace rf { struct RegParams { DatasetParams data; RegressionParams regression; RF_params rf; }; template <typename D> struct RFRegressorModel { }; template <> struct RFRegressorModel<float> { ML::RandomForestRegressorF model; }; template <> struct RFRegressorModel<double> { ML::RandomForestRegressorD model; }; template <typename D> class RFRegressor : public RegressionFixture<D> { public: RFRegressor(const std::string& name, const RegParams& p) : RegressionFixture<D>(name, p.data, p.regression), rfParams(p.rf) { } protected: void runBenchmark(::benchmark::State& state) override { using MLCommon::Bench::CudaEventTimer; if (this->params.rowMajor) { state.SkipWithError("RFRegressor only supports col-major inputs"); } this->loopOnState(state, [this]() { auto* mPtr = &model.model; fit(*this->handle, mPtr, this->data.X, this->params.nrows, this->params.ncols, this->data.y, rfParams); CUDA_CHECK(cudaStreamSynchronize(this->stream)); }); } private: RFRegressorModel<D> model; RF_params rfParams; }; template <typename D> std::vector<RegParams> getInputs() { struct DimInfo { int nrows, ncols, n_informative; }; struct std::vector<RegParams> out; RegParams p; p.data.rowMajor = false; p.regression = {.shuffle = true, // Better to shuffle when n_informative < ncols .effective_rank = -1, // dataset generation will be faster .bias = 4.5, .tail_strength = 0.5, // unused when effective_rank = -1 .noise = 1.0, .seed = 12345ULL}; p.rf = set_rf_params(10, /*max_depth */ (1 << 20), /* max_leaves */ 0.3, /* max_features */ 32, /* n_bins */ 3, /* min_samples_leaf */ 3, /* min_samples_split */ 0.0f, /* min_impurity_decrease */ true, /* bootstrap */ 500, /* n_trees */ 1.f, /* max_samples */ 1234ULL, /* seed */ ML::CRITERION::MSE, /* split_criterion */ 8, /* n_streams */ 128 /* max_batch_size */ ); std::vector<DimInfo> dim_info = {{500000, 500, 400}}; for (auto& di : dim_info) { // Let's run Bosch only for float type if (!std::is_same<D, float>::value && di.ncols == 968) continue; p.data.nrows = di.nrows; p.data.ncols = di.ncols; p.regression.n_informative = di.n_informative; p.rf.tree_params.max_features = 1.f; for (auto max_depth : std::vector<int>({7, 11, 15})) { p.rf.tree_params.max_depth = max_depth; out.push_back(p); } } return out; } ML_BENCH_REGISTER(RegParams, RFRegressor<float>, "regression", getInputs<float>()); ML_BENCH_REGISTER(RegParams, RFRegressor<double>, "regression", getInputs<double>()); } // namespace rf } // namespace Bench } // namespace ML
0a1bc222a5fffed77223c62b5bb2e523cbf68cc5.hip
// !!! This is a file automatically generated by hipify!!! #include "Kernel.h" #include <helper_cuda.h> #include <hip/hip_runtime_api.h> static texture<float4, 2, hipReadModeElementType> texElnt; static hipArray *texElntArray = NULL; float *pTmpElntArray = NULL; float *hArrResult = NULL; float *dArrResult = NULL; extern "C" int InitTex( float *pData, int width, int height, int channel ) { int cn = UNIT*3; int fn = width*height*4; hArrResult = new float [cn]; checkCudaErrors( hipMalloc( (void**)&dArrResult, (cn)*sizeof(float) ) ); pTmpElntArray = new float [fn]; memset(pTmpElntArray, 0, fn*sizeof(float)); float *ptrData = pData; float *ptrElnt = pTmpElntArray; for (int i=0;i<width*height;i++) { for (int c=0;c<channel;c++, ptrData++) { (*(ptrElnt+c)) = (float) (*(ptrData)); } ptrElnt += 4; } hipChannelFormatDesc channelDesc; channelDesc = hipCreateChannelDesc<float4>(); checkCudaErrors( hipMallocArray(&texElntArray, &channelDesc, width, height) ); checkCudaErrors( hipMemcpy2DToArray(texElntArray, 0, 0, pTmpElntArray, width*sizeof(float4), width*sizeof(float4), height, hipMemcpyHostToDevice) ); texElnt.addressMode[0] = hipAddressModeClamp; texElnt.addressMode[1] = hipAddressModeClamp; texElnt.filterMode = hipFilterModeLinear; texElnt.normalized = false; checkCudaErrors( hipUnbindTexture(texElnt) ); checkCudaErrors( hipBindTextureToArray(texElnt, texElntArray, channelDesc) ); return 0; } static __global__ void kernel_texElnt(float* pdata, int w, int h, int c, float stride) { const int gx = blockIdx.x*blockDim.x + threadIdx.x; const int gy = blockIdx.y*blockDim.y + threadIdx.y; const int gw = gridDim.x * blockDim.x; const int gid = gy*gw + gx; float2 pnt; pnt.x = (gx)*(stride); pnt.y = 0.0625f; float4 result = tex2D( texElnt, pnt.x + 0.5, pnt.y + 0.5f); pdata[gid*3 + 0] = pnt.x; pdata[gid*3 + 1] = pnt.y; pdata[gid*3 + 2] = result.x; } extern "C" int RunKernel( int w, int h, int c, float nBase) { float stride = 1.0f / nBase; hipLaunchKernelGGL(( kernel_texElnt), dim3(1), dim3(UNIT) , 0, 0, dArrResult, w, h, c, stride); checkCudaErrors( hipMemcpy(hArrResult, dArrResult, UNIT*3*sizeof(float), hipMemcpyDeviceToHost) ); return 0; } extern "C" int UnInitTex() { delete hArrResult; delete pTmpElntArray; checkCudaErrors( hipFree(dArrResult) ); checkCudaErrors( hipFreeArray(texElntArray) ); return 0; }
0a1bc222a5fffed77223c62b5bb2e523cbf68cc5.cu
#include "Kernel.h" #include <helper_cuda.h> #include <cuda_runtime_api.h> static texture<float4, 2, cudaReadModeElementType> texElnt; static cudaArray *texElntArray = NULL; float *pTmpElntArray = NULL; float *hArrResult = NULL; float *dArrResult = NULL; extern "C" int InitTex( float *pData, int width, int height, int channel ) { int cn = UNIT*3; int fn = width*height*4; hArrResult = new float [cn]; checkCudaErrors( cudaMalloc( (void**)&dArrResult, (cn)*sizeof(float) ) ); pTmpElntArray = new float [fn]; memset(pTmpElntArray, 0, fn*sizeof(float)); float *ptrData = pData; float *ptrElnt = pTmpElntArray; for (int i=0;i<width*height;i++) { for (int c=0;c<channel;c++, ptrData++) { (*(ptrElnt+c)) = (float) (*(ptrData)); } ptrElnt += 4; } cudaChannelFormatDesc channelDesc; channelDesc = cudaCreateChannelDesc<float4>(); checkCudaErrors( cudaMallocArray(&texElntArray, &channelDesc, width, height) ); checkCudaErrors( cudaMemcpy2DToArray(texElntArray, 0, 0, pTmpElntArray, width*sizeof(float4), width*sizeof(float4), height, cudaMemcpyHostToDevice) ); texElnt.addressMode[0] = cudaAddressModeClamp; texElnt.addressMode[1] = cudaAddressModeClamp; texElnt.filterMode = cudaFilterModeLinear; texElnt.normalized = false; checkCudaErrors( cudaUnbindTexture(texElnt) ); checkCudaErrors( cudaBindTextureToArray(texElnt, texElntArray, channelDesc) ); return 0; } static __global__ void kernel_texElnt(float* pdata, int w, int h, int c, float stride) { const int gx = blockIdx.x*blockDim.x + threadIdx.x; const int gy = blockIdx.y*blockDim.y + threadIdx.y; const int gw = gridDim.x * blockDim.x; const int gid = gy*gw + gx; float2 pnt; pnt.x = (gx)*(stride); pnt.y = 0.0625f; float4 result = tex2D( texElnt, pnt.x + 0.5, pnt.y + 0.5f); pdata[gid*3 + 0] = pnt.x; pdata[gid*3 + 1] = pnt.y; pdata[gid*3 + 2] = result.x; } extern "C" int RunKernel( int w, int h, int c, float nBase) { float stride = 1.0f / nBase; kernel_texElnt<<< 1, UNIT >>> (dArrResult, w, h, c, stride); checkCudaErrors( cudaMemcpy(hArrResult, dArrResult, UNIT*3*sizeof(float), cudaMemcpyDeviceToHost) ); return 0; } extern "C" int UnInitTex() { delete hArrResult; delete pTmpElntArray; checkCudaErrors( cudaFree(dArrResult) ); checkCudaErrors( cudaFreeArray(texElntArray) ); return 0; }
5bbdf0b3dd03174e132aa5ccdb60d5ca84d17f5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE template <typename T, typename C> __global__ void awkward_IndexedArray_reduce_next_fix_offsets_64(T* outoffsets, const C* starts, int64_t startslength, int64_t outindexlength, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < startslength) { outoffsets[thread_id] = starts[thread_id]; } outoffsets[startslength] = outindexlength; } }
5bbdf0b3dd03174e132aa5ccdb60d5ca84d17f5f.cu
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE template <typename T, typename C> __global__ void awkward_IndexedArray_reduce_next_fix_offsets_64(T* outoffsets, const C* starts, int64_t startslength, int64_t outindexlength, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < startslength) { outoffsets[thread_id] = starts[thread_id]; } outoffsets[startslength] = outindexlength; } }
c9aa14befa4a0e7f71514e1e6b3d84e79a44ff6d.hip
// !!! This is a file automatically generated by hipify!!! #include <chrono> #include <iomanip> #include <iostream> #include <memory> #include <stdexcept> #include <tuple> #include <vector> #include <cstdint> #include <sstream> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hiprand/hiprand.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include "tensor.h" #include "gemm_problems.h" #ifndef PAD_KERNELS #define PAD_KERNELS 1 #endif /* Usage: The default precision is set based on the architecture and mode. By default, the program runs the benchmark in training mode. bin/gemm_bench To run inference mode, use the following command: bin/gemm_bench inference To change the precision for training/inference, use: bin/gemm_bench train <precision> bin/gemm_bench inference <precision> Supported precision types: For Maxwell GPUS: float for training and inference For Pascal GPUS: float, half for training float, half, int8 for inference */ template <typename T1, typename T2> int time_gemm(Tensor<T1> A, Tensor<T1> B, Tensor<T2> C, bool a_t, bool b_t, hipblasHandle_t cublas_handle) { #if (__CUDACC_VER_MAJOR__ >= 8) const int alpha = 1.f; const int beta = 1.f; #else const float alpha = 1.f / static_cast<float>(A.dims()[1]); const float beta = 1.f; #endif int m = C.dims()[0]; int k = a_t ? A.dims()[0] : A.dims()[1]; int n = C.dims()[1]; int numRepeats = ::max(::ceil(1e11 / (m * k * n)), 10.); hipblasStatus_t stat; #if (__CUDACC_VER_MAJOR__ >= 8) hipDataType A_type = HIP_R_32F; hipDataType B_type = HIP_R_32F; hipDataType C_type = HIP_R_32F; hipDataType compute_type = HIP_R_32F; if (std::is_same<T1, uint16_t>::value) { A_type = HIP_R_16F; B_type = HIP_R_16F; C_type = HIP_R_16F; } if (std::is_same<T1, uint8_t>::value) { A_type = HIP_R_8I; B_type = HIP_R_8I; C_type = HIP_R_32I; compute_type = HIP_R_32I; } #endif #if (__CUDACC_VER_MAJOR__ < 8) // Warm up stat = hipblasSgemm(cublas_handle, a_t ? HIPBLAS_OP_T : HIPBLAS_OP_N, b_t ? HIPBLAS_OP_T : HIPBLAS_OP_N, m, n, k, &alpha, A.begin(), A.dims()[0], B.begin(), B.dims()[0], &beta, C.begin(), C.dims()[0]); #else stat = hipblasGemmEx(cublas_handle, a_t ? HIPBLAS_OP_T : HIPBLAS_OP_N, b_t ? HIPBLAS_OP_T : HIPBLAS_OP_N, m, n, k, &alpha, A.begin(), A_type, A.dims()[0], B.begin(), B_type, B.dims()[0], &beta, C.begin(), C_type, C.dims()[0], compute_type, CUBLAS_GEMM_DFALT); #endif if (stat != HIPBLAS_STATUS_SUCCESS) { throw std::runtime_error("sgemm failed"); } hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < numRepeats; ++i) { #if (__CUDACC_VER_MAJOR__ < 8) stat = hipblasSgemm(cublas_handle, a_t ? HIPBLAS_OP_T : HIPBLAS_OP_N, b_t ? HIPBLAS_OP_T : HIPBLAS_OP_N, m, n, k, &alpha, A.begin(), A.dims()[0], B.begin(), B.dims()[0], &beta, C.begin(), C.dims()[0]); #else stat = hipblasGemmEx(cublas_handle, a_t ? HIPBLAS_OP_T : HIPBLAS_OP_N, b_t ? HIPBLAS_OP_T : HIPBLAS_OP_N, m, n, k, &alpha, A.begin(), A_type, A.dims()[0], B.begin(), B_type, B.dims()[0], &beta, C.begin(), C_type, C.dims()[0], compute_type, CUBLAS_GEMM_DFALT); #endif if (stat != HIPBLAS_STATUS_SUCCESS) { throw std::runtime_error("sgemm failed"); } } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); return static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / numRepeats); } int main(int argc, char **argv) { hipFree(0); int inference = 0; if (argc > 1) { std::string inf = "inference"; inference = argv[1] == inf ? 1 : 0; } #if (__CUDACC_VER_MAJOR__ >= 8) std::string precision; if (inference) precision = "int8"; else precision = "half"; #else std::string precision = "float"; #endif if (argc > 2) { precision = argv[2]; } hipblasHandle_t cublas_handle; hipblasStatus_t status = hipblasCreate(&cublas_handle); if (status != HIPBLAS_STATUS_SUCCESS) { std::cout << "CUBLAS init failed" << std::endl; } hiprandGenerator_t curand_gen; hiprandCreateGenerator(&curand_gen, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL); if (inference) { std::cout << std::setw(45) << "Running inference benchmark " << std::endl; } else { std::cout << std::setw(45) << "Running training benchmark " << std::endl; } std::cout << std::setw(30) << "Times" << std::endl; std::cout << std::setfill('-') << std::setw(88) << "-" << std::endl; std::cout << std::setfill(' '); std::cout << " m n k a_t b_t precision time (usec) "; if (PAD_KERNELS && precision == "int8" && inference) std::cout << " pad_kerenels "; std::cout << std::endl; int pad_kernels_count = 0; for (const auto &problem : (inference ? inference_server_set : training_set)) { int m, n, k; bool a_t, b_t; std::tie(m, n, k, a_t, b_t) = problem; int time_ms; bool skip_kernel = false; bool need_padding = false; #if (__CUDACC_VER_MAJOR__ >= 8) int pad_m; pad_m = m; if (precision == "int8") { if (pad_m%4) { pad_kernels_count++; if (PAD_KERNELS) { pad_dim(pad_m); need_padding = true; } else { skip_kernel = true; } } } #endif std::cout << std::setw(7) << m; std::cout << std::setw(7) << n; std::cout << std::setw(7) << k; std::cout << std::setw(7) << a_t ? "t" : "n"; std::cout << std::setw(7) << b_t ? "t" : "n"; std::stringstream ss; ss << "Unsupported precision requested. Precision: " << precision << " Inference: " << inference; #if (__CUDACC_VER_MAJOR__ >= 8) if (precision == "int8" & inference) { auto a = rand<uint8_t>({a_t ? k : pad_m, a_t ? pad_m : k}, curand_gen); auto b = rand<uint8_t>({b_t ? n : k, b_t ? k : n}, curand_gen); auto c = zeros<int>({pad_m, n}); std::cout << std::setw(14) << precision; if (!skip_kernel) time_ms = time_gemm<uint8_t, int>(a, b, c, a_t, b_t, cublas_handle); } else if (precision == "half") { auto a = rand<uint16_t>({a_t ? k : m, a_t ? m : k}, curand_gen); auto b = rand<uint16_t>({b_t ? n : k, b_t ? k : n}, curand_gen); auto c = zeros<uint16_t>({m, n}); std::cout << std::setw(13) << precision; time_ms = time_gemm<uint16_t, uint16_t>(a, b, c, a_t, b_t, cublas_handle); } else if (precision == "float") { auto a = rand<float>({a_t ? k : m, a_t ? m : k}, curand_gen); auto b = rand<float>({b_t ? n : k, b_t ? k : n}, curand_gen); auto c = zeros<float>({m, n}); std::cout << std::setw(13) << precision; time_ms = time_gemm<float, float>(a, b, c, a_t, b_t, cublas_handle); } else { throw std::runtime_error(ss.str()); } #else if (precision != "float") { throw std::runtime_error(ss.str()); } auto a = rand<float>({a_t ? k : m, a_t ? m : k}, curand_gen); auto b = rand<float>({b_t ? n : k, b_t ? k : n}, curand_gen); auto c = zeros<float>({m, n}); std::cout << std::setw(13) << precision; time_ms = time_gemm<float, float>(a, b, c, a_t, b_t, cublas_handle); #endif std::cout << std::setw(20) << std::setprecision(6); if (skip_kernel) { std::cout << "Not Supported"; } else { std::cout << time_ms; } if (PAD_KERNELS && precision == "int8" && inference) { std::cout << std::setw(10) << need_padding; } std::cout << std::endl; } if (precision == "int8") { std::cout << " Total kernels "; if (PAD_KERNELS) std::cout << "padded: " << pad_kernels_count << std::endl; else std::cout << "skipped: " << pad_kernels_count << std::endl; std::cout << " Total kernels: " << inference_server_set.size() << std::endl; } hipblasDestroy(cublas_handle); hiprandDestroyGenerator(curand_gen); return 0; }
c9aa14befa4a0e7f71514e1e6b3d84e79a44ff6d.cu
#include <chrono> #include <iomanip> #include <iostream> #include <memory> #include <stdexcept> #include <tuple> #include <vector> #include <cstdint> #include <sstream> #include <cuda.h> #include <cublas_v2.h> #include <curand.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include "tensor.h" #include "gemm_problems.h" #ifndef PAD_KERNELS #define PAD_KERNELS 1 #endif /* Usage: The default precision is set based on the architecture and mode. By default, the program runs the benchmark in training mode. bin/gemm_bench To run inference mode, use the following command: bin/gemm_bench inference To change the precision for training/inference, use: bin/gemm_bench train <precision> bin/gemm_bench inference <precision> Supported precision types: For Maxwell GPUS: float for training and inference For Pascal GPUS: float, half for training float, half, int8 for inference */ template <typename T1, typename T2> int time_gemm(Tensor<T1> A, Tensor<T1> B, Tensor<T2> C, bool a_t, bool b_t, cublasHandle_t cublas_handle) { #if (__CUDACC_VER_MAJOR__ >= 8) const int alpha = 1.f; const int beta = 1.f; #else const float alpha = 1.f / static_cast<float>(A.dims()[1]); const float beta = 1.f; #endif int m = C.dims()[0]; int k = a_t ? A.dims()[0] : A.dims()[1]; int n = C.dims()[1]; int numRepeats = std::max(std::ceil(1e11 / (m * k * n)), 10.); cublasStatus_t stat; #if (__CUDACC_VER_MAJOR__ >= 8) cudaDataType_t A_type = CUDA_R_32F; cudaDataType_t B_type = CUDA_R_32F; cudaDataType_t C_type = CUDA_R_32F; cudaDataType_t compute_type = CUDA_R_32F; if (std::is_same<T1, uint16_t>::value) { A_type = CUDA_R_16F; B_type = CUDA_R_16F; C_type = CUDA_R_16F; } if (std::is_same<T1, uint8_t>::value) { A_type = CUDA_R_8I; B_type = CUDA_R_8I; C_type = CUDA_R_32I; compute_type = CUDA_R_32I; } #endif #if (__CUDACC_VER_MAJOR__ < 8) // Warm up stat = cublasSgemm(cublas_handle, a_t ? CUBLAS_OP_T : CUBLAS_OP_N, b_t ? CUBLAS_OP_T : CUBLAS_OP_N, m, n, k, &alpha, A.begin(), A.dims()[0], B.begin(), B.dims()[0], &beta, C.begin(), C.dims()[0]); #else stat = cublasGemmEx(cublas_handle, a_t ? CUBLAS_OP_T : CUBLAS_OP_N, b_t ? CUBLAS_OP_T : CUBLAS_OP_N, m, n, k, &alpha, A.begin(), A_type, A.dims()[0], B.begin(), B_type, B.dims()[0], &beta, C.begin(), C_type, C.dims()[0], compute_type, CUBLAS_GEMM_DFALT); #endif if (stat != CUBLAS_STATUS_SUCCESS) { throw std::runtime_error("sgemm failed"); } cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < numRepeats; ++i) { #if (__CUDACC_VER_MAJOR__ < 8) stat = cublasSgemm(cublas_handle, a_t ? CUBLAS_OP_T : CUBLAS_OP_N, b_t ? CUBLAS_OP_T : CUBLAS_OP_N, m, n, k, &alpha, A.begin(), A.dims()[0], B.begin(), B.dims()[0], &beta, C.begin(), C.dims()[0]); #else stat = cublasGemmEx(cublas_handle, a_t ? CUBLAS_OP_T : CUBLAS_OP_N, b_t ? CUBLAS_OP_T : CUBLAS_OP_N, m, n, k, &alpha, A.begin(), A_type, A.dims()[0], B.begin(), B_type, B.dims()[0], &beta, C.begin(), C_type, C.dims()[0], compute_type, CUBLAS_GEMM_DFALT); #endif if (stat != CUBLAS_STATUS_SUCCESS) { throw std::runtime_error("sgemm failed"); } } cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); return static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / numRepeats); } int main(int argc, char **argv) { cudaFree(0); int inference = 0; if (argc > 1) { std::string inf = "inference"; inference = argv[1] == inf ? 1 : 0; } #if (__CUDACC_VER_MAJOR__ >= 8) std::string precision; if (inference) precision = "int8"; else precision = "half"; #else std::string precision = "float"; #endif if (argc > 2) { precision = argv[2]; } cublasHandle_t cublas_handle; cublasStatus_t status = cublasCreate(&cublas_handle); if (status != CUBLAS_STATUS_SUCCESS) { std::cout << "CUBLAS init failed" << std::endl; } curandGenerator_t curand_gen; curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL); if (inference) { std::cout << std::setw(45) << "Running inference benchmark " << std::endl; } else { std::cout << std::setw(45) << "Running training benchmark " << std::endl; } std::cout << std::setw(30) << "Times" << std::endl; std::cout << std::setfill('-') << std::setw(88) << "-" << std::endl; std::cout << std::setfill(' '); std::cout << " m n k a_t b_t precision time (usec) "; if (PAD_KERNELS && precision == "int8" && inference) std::cout << " pad_kerenels "; std::cout << std::endl; int pad_kernels_count = 0; for (const auto &problem : (inference ? inference_server_set : training_set)) { int m, n, k; bool a_t, b_t; std::tie(m, n, k, a_t, b_t) = problem; int time_ms; bool skip_kernel = false; bool need_padding = false; #if (__CUDACC_VER_MAJOR__ >= 8) int pad_m; pad_m = m; if (precision == "int8") { if (pad_m%4) { pad_kernels_count++; if (PAD_KERNELS) { pad_dim(pad_m); need_padding = true; } else { skip_kernel = true; } } } #endif std::cout << std::setw(7) << m; std::cout << std::setw(7) << n; std::cout << std::setw(7) << k; std::cout << std::setw(7) << a_t ? "t" : "n"; std::cout << std::setw(7) << b_t ? "t" : "n"; std::stringstream ss; ss << "Unsupported precision requested. Precision: " << precision << " Inference: " << inference; #if (__CUDACC_VER_MAJOR__ >= 8) if (precision == "int8" & inference) { auto a = rand<uint8_t>({a_t ? k : pad_m, a_t ? pad_m : k}, curand_gen); auto b = rand<uint8_t>({b_t ? n : k, b_t ? k : n}, curand_gen); auto c = zeros<int>({pad_m, n}); std::cout << std::setw(14) << precision; if (!skip_kernel) time_ms = time_gemm<uint8_t, int>(a, b, c, a_t, b_t, cublas_handle); } else if (precision == "half") { auto a = rand<uint16_t>({a_t ? k : m, a_t ? m : k}, curand_gen); auto b = rand<uint16_t>({b_t ? n : k, b_t ? k : n}, curand_gen); auto c = zeros<uint16_t>({m, n}); std::cout << std::setw(13) << precision; time_ms = time_gemm<uint16_t, uint16_t>(a, b, c, a_t, b_t, cublas_handle); } else if (precision == "float") { auto a = rand<float>({a_t ? k : m, a_t ? m : k}, curand_gen); auto b = rand<float>({b_t ? n : k, b_t ? k : n}, curand_gen); auto c = zeros<float>({m, n}); std::cout << std::setw(13) << precision; time_ms = time_gemm<float, float>(a, b, c, a_t, b_t, cublas_handle); } else { throw std::runtime_error(ss.str()); } #else if (precision != "float") { throw std::runtime_error(ss.str()); } auto a = rand<float>({a_t ? k : m, a_t ? m : k}, curand_gen); auto b = rand<float>({b_t ? n : k, b_t ? k : n}, curand_gen); auto c = zeros<float>({m, n}); std::cout << std::setw(13) << precision; time_ms = time_gemm<float, float>(a, b, c, a_t, b_t, cublas_handle); #endif std::cout << std::setw(20) << std::setprecision(6); if (skip_kernel) { std::cout << "Not Supported"; } else { std::cout << time_ms; } if (PAD_KERNELS && precision == "int8" && inference) { std::cout << std::setw(10) << need_padding; } std::cout << std::endl; } if (precision == "int8") { std::cout << " Total kernels "; if (PAD_KERNELS) std::cout << "padded: " << pad_kernels_count << std::endl; else std::cout << "skipped: " << pad_kernels_count << std::endl; std::cout << " Total kernels: " << inference_server_set.size() << std::endl; } cublasDestroy(cublas_handle); curandDestroyGenerator(curand_gen); return 0; }
9066d2c92afaaaa502c1afb6afd82753a12d7651.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*********************************************************************** Copyright (C) 2019 Hironori Fujimoto This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ***********************************************************************/ #include "sgm_gpu/hamming_cost.h" namespace sgm_gpu { //d_transform0, d_transform1, d_cost, rows, cols __global__ void HammingDistanceCostKernel ( const cost_t *d_transform0, const cost_t *d_transform1, uint8_t *d_cost, const int rows, const int cols ) { //const int Dmax= blockDim.x; // Dmax is CTA size const int y= blockIdx.x; // y is CTA Identifier const int THRid = threadIdx.x; // THRid is Thread Identifier __shared__ cost_t SharedMatch[2*MAX_DISPARITY]; __shared__ cost_t SharedBase [MAX_DISPARITY]; SharedMatch [MAX_DISPARITY+THRid] = d_transform1[y*cols+0]; // init position int n_iter = cols/MAX_DISPARITY; for (int ix=0; ix<n_iter; ix++) { const int x = ix*MAX_DISPARITY; SharedMatch [THRid] = SharedMatch [THRid + MAX_DISPARITY]; SharedMatch [THRid+MAX_DISPARITY] = d_transform1 [y*cols+x+THRid]; SharedBase [THRid] = d_transform0 [y*cols+x+THRid]; __syncthreads(); for (int i=0; i<MAX_DISPARITY; i++) { const cost_t base = SharedBase [i]; const cost_t match = SharedMatch[(MAX_DISPARITY-1-THRid)+1+i]; d_cost[(y*cols+x+i)*MAX_DISPARITY+THRid] = popcount( base ^ match ); } __syncthreads(); } // For images with cols not multiples of MAX_DISPARITY const int x = MAX_DISPARITY*(cols/MAX_DISPARITY); const int left = cols-x; if(left > 0) { SharedMatch [THRid] = SharedMatch [THRid + MAX_DISPARITY]; if(THRid < left) { SharedMatch [THRid+MAX_DISPARITY] = d_transform1 [y*cols+x+THRid]; SharedBase [THRid] = d_transform0 [y*cols+x+THRid]; } __syncthreads(); for (int i=0; i<left; i++) { const cost_t base = SharedBase [i]; const cost_t match = SharedMatch[(MAX_DISPARITY-1-THRid)+1+i]; d_cost[(y*cols+x+i)*MAX_DISPARITY+THRid] = popcount( base ^ match ); } __syncthreads(); } } } // namespace sgm_gpu
9066d2c92afaaaa502c1afb6afd82753a12d7651.cu
/*********************************************************************** Copyright (C) 2019 Hironori Fujimoto This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ***********************************************************************/ #include "sgm_gpu/hamming_cost.h" namespace sgm_gpu { //d_transform0, d_transform1, d_cost, rows, cols __global__ void HammingDistanceCostKernel ( const cost_t *d_transform0, const cost_t *d_transform1, uint8_t *d_cost, const int rows, const int cols ) { //const int Dmax= blockDim.x; // Dmax is CTA size const int y= blockIdx.x; // y is CTA Identifier const int THRid = threadIdx.x; // THRid is Thread Identifier __shared__ cost_t SharedMatch[2*MAX_DISPARITY]; __shared__ cost_t SharedBase [MAX_DISPARITY]; SharedMatch [MAX_DISPARITY+THRid] = d_transform1[y*cols+0]; // init position int n_iter = cols/MAX_DISPARITY; for (int ix=0; ix<n_iter; ix++) { const int x = ix*MAX_DISPARITY; SharedMatch [THRid] = SharedMatch [THRid + MAX_DISPARITY]; SharedMatch [THRid+MAX_DISPARITY] = d_transform1 [y*cols+x+THRid]; SharedBase [THRid] = d_transform0 [y*cols+x+THRid]; __syncthreads(); for (int i=0; i<MAX_DISPARITY; i++) { const cost_t base = SharedBase [i]; const cost_t match = SharedMatch[(MAX_DISPARITY-1-THRid)+1+i]; d_cost[(y*cols+x+i)*MAX_DISPARITY+THRid] = popcount( base ^ match ); } __syncthreads(); } // For images with cols not multiples of MAX_DISPARITY const int x = MAX_DISPARITY*(cols/MAX_DISPARITY); const int left = cols-x; if(left > 0) { SharedMatch [THRid] = SharedMatch [THRid + MAX_DISPARITY]; if(THRid < left) { SharedMatch [THRid+MAX_DISPARITY] = d_transform1 [y*cols+x+THRid]; SharedBase [THRid] = d_transform0 [y*cols+x+THRid]; } __syncthreads(); for (int i=0; i<left; i++) { const cost_t base = SharedBase [i]; const cost_t match = SharedMatch[(MAX_DISPARITY-1-THRid)+1+i]; d_cost[(y*cols+x+i)*MAX_DISPARITY+THRid] = popcount( base ^ match ); } __syncthreads(); } } } // namespace sgm_gpu
0e9ce7f0e01d5df35ad4acb6130bb2bfea985b9a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <random> #include <cmath> #include <sys/time.h> #include <sys/types.h> #include <sys/stat.h> #include "cuda_ptr.cuh" typedef double Dtype; constexpr Dtype density = 0.5; constexpr int N = 400000; // const int N = 1000000; constexpr int NUM_NEIGH = 60; constexpr int MAX_PAIRS = NUM_NEIGH * N; constexpr int LOOP = 100; #include "kernel_hip.cuh" Dtype L = 50.0; // Dtype L = 70.0; const Dtype dt = 0.001; cuda_ptr<float3> q_f3, p_f3; cuda_ptr<float4> q_f4, p_f4; cuda_ptr<double3> q_d3, p_d3; cuda_ptr<double4> q_d4, p_d4; cuda_ptr<int32_t> sorted_list, number_of_partners, pointer; cuda_ptr<int32_t> transposed_list; cuda_ptr<int32_t> sorted_list2d; int particle_number = 0; int number_of_pairs = 0; int i_particles[MAX_PAIRS]; int j_particles[MAX_PAIRS]; int pointer2[N]; const Dtype CUTOFF_LENGTH = 3.0; const Dtype SEARCH_LENGTH = 3.3; const auto CL2 = CUTOFF_LENGTH * CUTOFF_LENGTH; #ifdef EN_ACTION_REACTION const char* cache_file_name = ".cache_pair_half.dat"; #else const char* cache_file_name = ".cache_pair_all.dat"; #endif int THREAD_BLOCK = 128; template <typename Vec> void add_particle(const Dtype x, const Dtype y, const Dtype z, Vec* q) { static std::mt19937 mt(2); std::uniform_real_distribution<Dtype> ud(0.0, 0.1); q[particle_number].x = x + ud(mt); q[particle_number].y = y + ud(mt); q[particle_number].z = z + ud(mt); particle_number++; } template <typename Vec> void init(Vec* q, Vec* p) { const Dtype s = 1.0 / ::pow(density * 0.25, 1.0 / 3.0); const Dtype hs = s * 0.5; const int sx = static_cast<int>(L / s); const int sy = static_cast<int>(L / s); const int sz = static_cast<int>(L / s); for (int iz = 0; iz < sz; iz++) { for (int iy = 0; iy < sy; iy++) { for (int ix = 0; ix < sx; ix++) { const Dtype x = ix*s; const Dtype y = iy*s; const Dtype z = iz*s; add_particle(x ,y ,z, q); add_particle(x ,y+hs,z+hs, q); add_particle(x+hs ,y ,z+hs, q); add_particle(x+hs ,y+hs,z, q); } } } for (int i = 0; i < particle_number; i++) { p[i].x = 0.0; p[i].y = 0.0; p[i].z = 0.0; } if (particle_number > N) { fprintf(stderr, "particle_number %d exceeds maximum buffer size %d\n", particle_number, N); std::quick_exit(EXIT_FAILURE); } // std::mt19937 mt(123); // std::shuffle(q, q + particle_number, mt); } double myclock() { timeval t; gettimeofday(&t, nullptr); return t.tv_sec + t.tv_usec * 1e-6; } void register_pair(const int index1, const int index2) { int i, j; #ifdef EN_ACTION_REACTION if (index1 < index2) { i = index1; j = index2; } else { i = index2; j = index1; } #else i = index1; j = index2; #endif i_particles[number_of_pairs] = i; j_particles[number_of_pairs] = j; number_of_partners[i]++; number_of_pairs++; } template <typename Vec> void makepair(const Vec* q) { const auto SL2 = SEARCH_LENGTH * SEARCH_LENGTH; const auto pn = particle_number; for (int i = 0; i < pn; i++) { number_of_partners[i] = 0; } #ifdef EN_ACTION_REACTION for (int i = 0; i < particle_number - 1; i++) { for (int j = i + 1; j < particle_number; j++) { #else for (int i = 0; i < particle_number; i++) { for (int j = 0; j < particle_number; j++) { if (i == j) continue; #endif const auto dx = q[i].x - q[j].x; const auto dy = q[i].y - q[j].y; const auto dz = q[i].z - q[j].z; const auto r2 = dx * dx + dy * dy + dz * dz; if (r2 < SL2) { register_pair(i, j); } } } int pos = 0; pointer[0] = 0; for (int i = 0; i < pn - 1; i++) { pos += number_of_partners[i]; pointer[i + 1] = pos; } for (int i = 0; i < pn; i++) { pointer2[i] = 0; } const auto s = number_of_pairs; for (int k = 0; k < s; k++) { const auto i = i_particles[k]; const auto j = j_particles[k]; const auto index = pointer[i] + pointer2[i]; sorted_list[index] = j; pointer2[i] ++; } } void makepaircache() { FILE* fp = fopen(cache_file_name, "w"); fprintf(fp, "%d %d\n", particle_number, number_of_pairs); for (int i = 0; i < particle_number; i++) { fprintf(fp, "%d %d\n", number_of_partners[i], pointer[i]); } for (int i = 0; i < number_of_pairs; i++) { fprintf(fp, "%d\n", sorted_list[i]); } fclose(fp); } bool file_exist(const char* name) { struct stat buffer; return (stat(name, &buffer) == 0); } bool check_loadedpair() { for (int i = 0; i < particle_number; i++) { if (number_of_partners[i] < 0 || number_of_partners[i] >= particle_number) { fprintf(stderr, "number_of_partners[%d] = %d\n", i, number_of_partners[i]); return false; } if (pointer[i] < 0 || pointer[i] >= number_of_pairs + 1) { fprintf(stderr, "pointer[%d] = %d\n", i, pointer[i]); return false; } } for (int i = 0; i < number_of_pairs; i++) { if (sorted_list[i] < 0 || sorted_list[i] >= particle_number) { fprintf(stderr, "number_of_pairs[%d] = %d\n", i, sorted_list[i]); return false; } } return true; } bool loadpair() { if (!file_exist(cache_file_name)) return false; FILE* fp = fopen(cache_file_name, "r"); int ptcl_num_tmp = 0; fscanf(fp, "%d %d", &ptcl_num_tmp, &number_of_pairs); if (ptcl_num_tmp != particle_number) { fprintf(stderr, "Pairlist cache data may be broken.\n"); return false; } for (int i = 0; i < particle_number; i++) { fscanf(fp, "%d %d", &number_of_partners[i], &pointer[i]); } for (int i = 0; i < number_of_pairs; i++) { fscanf(fp, "%d", &sorted_list[i]); } if (!check_loadedpair()) return false; fclose(fp); fprintf(stderr, "%s is successfully loaded.\n", cache_file_name); return true; } void make_transposed_pairlist() { transposed_list.set_val(0); for (int i = 0; i < particle_number; i++) { const auto np = number_of_partners[i]; const auto kp = pointer[i]; for (int k = 0; k < np; k++) { const auto j = sorted_list[kp + k]; transposed_list[i + k * particle_number] = j; } } } void make_sorted_list2d() { sorted_list2d.set_val(0); for (int i = 0; i < particle_number; i++) { const auto np = number_of_partners[i]; const auto kp = pointer[i]; for (int k = 0; k < np ; k++) { const auto j = sorted_list[kp + k]; sorted_list2d[i * NUM_NEIGH + k] = j; } } } void random_shfl() { std::mt19937 mt(10); const auto pn = particle_number; for (int i = 0; i < pn; i++) { const auto kp = pointer[i]; const auto np = number_of_partners[i]; std::shuffle(&sorted_list[kp], &sorted_list[kp + np], mt); } } void allocate() { q_f3.allocate(N); p_f3.allocate(N); q_d3.allocate(N); p_d3.allocate(N); q_f4.allocate(N); p_f4.allocate(N); q_d4.allocate(N); p_d4.allocate(N); sorted_list.allocate(MAX_PAIRS); transposed_list.allocate(MAX_PAIRS); sorted_list2d.allocate(MAX_PAIRS); number_of_partners.allocate(N); pointer.allocate(N); } void cleanup() { q_f3.deallocate(); p_f3.deallocate(); q_d3.deallocate(); p_d3.deallocate(); q_f4.deallocate(); p_f4.deallocate(); q_d4.deallocate(); p_d4.deallocate(); sorted_list.deallocate(); transposed_list.deallocate(); sorted_list2d.deallocate(); number_of_partners.deallocate(); pointer.deallocate(); } template <typename Vec1, typename Vec2> void copy_vec(Vec1* v1, Vec2* v2, const int ptcl_num) { for (int i = 0; i < ptcl_num; i++) { v1[i].x = v2[i].x; v1[i].y = v2[i].y; v1[i].z = v2[i].z; } } template <typename Vec> void copy_to_gpu(cuda_ptr<Vec>& q, cuda_ptr<Vec>& p) { q.host2dev(); p.host2dev(); sorted_list.host2dev(); transposed_list.host2dev(); sorted_list2d.host2dev(); number_of_partners.host2dev(); pointer.host2dev(); } template <typename Vec> void copy_to_host(cuda_ptr<Vec>& p) { p.dev2host(); } template <typename Vec, typename Dtype, typename ptr_func> void measure(ptr_func kernel, const char* name, cuda_ptr<Vec>& q, cuda_ptr<Vec>& p, const Dtype dt_, const Dtype CL2_, const int32_t* list, const int32_t* partner_pointer, const int32_t tot_thread) { const int block_num = (tot_thread - 1) / THREAD_BLOCK + 1; const auto st_all = myclock(); copy_to_gpu(q, p); const auto st_calc = myclock(); for (int i = 0; i < LOOP; i++) { hipLaunchKernelGGL(( kernel), dim3(block_num), dim3(THREAD_BLOCK), 0, 0, q, p, particle_number, dt_, CL2_, list, number_of_partners, partner_pointer); } checkCudaErrors(hipDeviceSynchronize()); const auto diff_calc = myclock() - st_calc; copy_to_host(p); const auto diff_all = myclock() - st_all; fprintf(stderr, "N=%d, %s %f [sec]\n", particle_number, name, diff_all); fprintf(stderr, "N=%d, %s %f [sec] (without Host<->Device)\n", particle_number, name, diff_calc); } template <typename Vec> void print_results(const Vec* p) { for (int i = 0; i < 5; i++) { fprintf(stdout, "%.10f %.10f %.10f\n", p[i].x, p[i].y, p[i].z); } for (int i = particle_number - 5; i < particle_number; i++) { fprintf(stdout, "%.10f %.10f %.10f\n", p[i].x, p[i].y, p[i].z); } } #define STR(s) #s #define MEASURE_FOR_ALLTYPES(fname, list, p_pointer, tot_thread) \ do { \ measure(fname<double3, double>, \ STR(fname ## _double3), \ q_d3, \ p_d3, \ static_cast<double>(dt), \ static_cast<double>(CL2), \ list, \ p_pointer, \ tot_thread); \ measure(fname<double4, double>, \ STR(fname ## _double4), \ q_d4, \ p_d4, \ static_cast<double>(dt), \ static_cast<double>(CL2), \ list, \ p_pointer, \ tot_thread); \ } while (false) int main(const int argc, const char* argv[]) { if (argc >= 2) THREAD_BLOCK = std::atoi(argv[1]); if (THREAD_BLOCK < 64 || THREAD_BLOCK > 1024) { std::cerr << "THREAD_BLOCK size is too large or small.\n"; std::exit(1); } allocate(); init(&q_d3[0], &p_d3[0]); copy_vec(&q_f3[0], &q_d3[0], particle_number); copy_vec(&p_f3[0], &p_d3[0], particle_number); copy_vec(&q_f4[0], &q_d3[0], particle_number); copy_vec(&p_f4[0], &p_d3[0], particle_number); copy_vec(&q_d4[0], &q_d3[0], particle_number); copy_vec(&p_d4[0], &p_d3[0], particle_number); if (!loadpair()) { fprintf(stderr, "Now make pairlist %s.\n", cache_file_name); number_of_pairs = 0; makepair(&q_d3[0]); random_shfl(); makepaircache(); } make_transposed_pairlist(); make_sorted_list2d(); #ifdef EN_TEST_GPU // MEASURE_FOR_ALLTYPES(force_kernel_plain, sorted_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_ifless, sorted_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_memopt, sorted_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_memopt2, transposed_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_memopt3, transposed_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_memopt3_swpl, transposed_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_swpl, transposed_list, nullptr, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_swpl2, transposed_list, nullptr, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_swpl3, transposed_list, nullptr, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_unrolling, transposed_list, nullptr, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_unrolling2, transposed_list, nullptr, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_memopt3_coarse, transposed_list, nullptr, particle_number / 2); // MEASURE_FOR_ALLTYPES(force_kernel_warp_unroll, sorted_list, pointer, particle_number * 32); MEASURE_FOR_ALLTYPES(force_kernel_warp_unroll2, sorted_list, pointer, particle_number * 32); print_results(&p_d3[0]); #elif defined EN_ACTION_REACTION MEASURE_FOR_ALLTYPES(force_kernel_plain_with_aar, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_ifless_with_aar, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt_with_aar, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt2_with_aar, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt3_with_aar, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_warp_unroll_with_aar, sorted_list, pointer, particle_number * 32); // print_results(&p_d3[0]); #else MEASURE_FOR_ALLTYPES(force_kernel_plain, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_ifless, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt2, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt3, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt3_swpl, transposed_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_swpl, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_swpl2, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_swpl3, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_unrolling, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_unrolling2, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt3_coarse, transposed_list, nullptr, particle_number / 2); MEASURE_FOR_ALLTYPES(force_kernel_warp_unroll, sorted_list, pointer, particle_number * 32); MEASURE_FOR_ALLTYPES(force_kernel_warp_unroll2, sorted_list, pointer, particle_number * 32); #endif cleanup(); }
0e9ce7f0e01d5df35ad4acb6130bb2bfea985b9a.cu
#include <cstdio> #include <cstdlib> #include <random> #include <cmath> #include <sys/time.h> #include <sys/types.h> #include <sys/stat.h> #include "cuda_ptr.cuh" typedef double Dtype; constexpr Dtype density = 0.5; constexpr int N = 400000; // const int N = 1000000; constexpr int NUM_NEIGH = 60; constexpr int MAX_PAIRS = NUM_NEIGH * N; constexpr int LOOP = 100; #include "kernel.cuh" Dtype L = 50.0; // Dtype L = 70.0; const Dtype dt = 0.001; cuda_ptr<float3> q_f3, p_f3; cuda_ptr<float4> q_f4, p_f4; cuda_ptr<double3> q_d3, p_d3; cuda_ptr<double4> q_d4, p_d4; cuda_ptr<int32_t> sorted_list, number_of_partners, pointer; cuda_ptr<int32_t> transposed_list; cuda_ptr<int32_t> sorted_list2d; int particle_number = 0; int number_of_pairs = 0; int i_particles[MAX_PAIRS]; int j_particles[MAX_PAIRS]; int pointer2[N]; const Dtype CUTOFF_LENGTH = 3.0; const Dtype SEARCH_LENGTH = 3.3; const auto CL2 = CUTOFF_LENGTH * CUTOFF_LENGTH; #ifdef EN_ACTION_REACTION const char* cache_file_name = ".cache_pair_half.dat"; #else const char* cache_file_name = ".cache_pair_all.dat"; #endif int THREAD_BLOCK = 128; template <typename Vec> void add_particle(const Dtype x, const Dtype y, const Dtype z, Vec* q) { static std::mt19937 mt(2); std::uniform_real_distribution<Dtype> ud(0.0, 0.1); q[particle_number].x = x + ud(mt); q[particle_number].y = y + ud(mt); q[particle_number].z = z + ud(mt); particle_number++; } template <typename Vec> void init(Vec* q, Vec* p) { const Dtype s = 1.0 / std::pow(density * 0.25, 1.0 / 3.0); const Dtype hs = s * 0.5; const int sx = static_cast<int>(L / s); const int sy = static_cast<int>(L / s); const int sz = static_cast<int>(L / s); for (int iz = 0; iz < sz; iz++) { for (int iy = 0; iy < sy; iy++) { for (int ix = 0; ix < sx; ix++) { const Dtype x = ix*s; const Dtype y = iy*s; const Dtype z = iz*s; add_particle(x ,y ,z, q); add_particle(x ,y+hs,z+hs, q); add_particle(x+hs ,y ,z+hs, q); add_particle(x+hs ,y+hs,z, q); } } } for (int i = 0; i < particle_number; i++) { p[i].x = 0.0; p[i].y = 0.0; p[i].z = 0.0; } if (particle_number > N) { fprintf(stderr, "particle_number %d exceeds maximum buffer size %d\n", particle_number, N); std::quick_exit(EXIT_FAILURE); } // std::mt19937 mt(123); // std::shuffle(q, q + particle_number, mt); } double myclock() { timeval t; gettimeofday(&t, nullptr); return t.tv_sec + t.tv_usec * 1e-6; } void register_pair(const int index1, const int index2) { int i, j; #ifdef EN_ACTION_REACTION if (index1 < index2) { i = index1; j = index2; } else { i = index2; j = index1; } #else i = index1; j = index2; #endif i_particles[number_of_pairs] = i; j_particles[number_of_pairs] = j; number_of_partners[i]++; number_of_pairs++; } template <typename Vec> void makepair(const Vec* q) { const auto SL2 = SEARCH_LENGTH * SEARCH_LENGTH; const auto pn = particle_number; for (int i = 0; i < pn; i++) { number_of_partners[i] = 0; } #ifdef EN_ACTION_REACTION for (int i = 0; i < particle_number - 1; i++) { for (int j = i + 1; j < particle_number; j++) { #else for (int i = 0; i < particle_number; i++) { for (int j = 0; j < particle_number; j++) { if (i == j) continue; #endif const auto dx = q[i].x - q[j].x; const auto dy = q[i].y - q[j].y; const auto dz = q[i].z - q[j].z; const auto r2 = dx * dx + dy * dy + dz * dz; if (r2 < SL2) { register_pair(i, j); } } } int pos = 0; pointer[0] = 0; for (int i = 0; i < pn - 1; i++) { pos += number_of_partners[i]; pointer[i + 1] = pos; } for (int i = 0; i < pn; i++) { pointer2[i] = 0; } const auto s = number_of_pairs; for (int k = 0; k < s; k++) { const auto i = i_particles[k]; const auto j = j_particles[k]; const auto index = pointer[i] + pointer2[i]; sorted_list[index] = j; pointer2[i] ++; } } void makepaircache() { FILE* fp = fopen(cache_file_name, "w"); fprintf(fp, "%d %d\n", particle_number, number_of_pairs); for (int i = 0; i < particle_number; i++) { fprintf(fp, "%d %d\n", number_of_partners[i], pointer[i]); } for (int i = 0; i < number_of_pairs; i++) { fprintf(fp, "%d\n", sorted_list[i]); } fclose(fp); } bool file_exist(const char* name) { struct stat buffer; return (stat(name, &buffer) == 0); } bool check_loadedpair() { for (int i = 0; i < particle_number; i++) { if (number_of_partners[i] < 0 || number_of_partners[i] >= particle_number) { fprintf(stderr, "number_of_partners[%d] = %d\n", i, number_of_partners[i]); return false; } if (pointer[i] < 0 || pointer[i] >= number_of_pairs + 1) { fprintf(stderr, "pointer[%d] = %d\n", i, pointer[i]); return false; } } for (int i = 0; i < number_of_pairs; i++) { if (sorted_list[i] < 0 || sorted_list[i] >= particle_number) { fprintf(stderr, "number_of_pairs[%d] = %d\n", i, sorted_list[i]); return false; } } return true; } bool loadpair() { if (!file_exist(cache_file_name)) return false; FILE* fp = fopen(cache_file_name, "r"); int ptcl_num_tmp = 0; fscanf(fp, "%d %d", &ptcl_num_tmp, &number_of_pairs); if (ptcl_num_tmp != particle_number) { fprintf(stderr, "Pairlist cache data may be broken.\n"); return false; } for (int i = 0; i < particle_number; i++) { fscanf(fp, "%d %d", &number_of_partners[i], &pointer[i]); } for (int i = 0; i < number_of_pairs; i++) { fscanf(fp, "%d", &sorted_list[i]); } if (!check_loadedpair()) return false; fclose(fp); fprintf(stderr, "%s is successfully loaded.\n", cache_file_name); return true; } void make_transposed_pairlist() { transposed_list.set_val(0); for (int i = 0; i < particle_number; i++) { const auto np = number_of_partners[i]; const auto kp = pointer[i]; for (int k = 0; k < np; k++) { const auto j = sorted_list[kp + k]; transposed_list[i + k * particle_number] = j; } } } void make_sorted_list2d() { sorted_list2d.set_val(0); for (int i = 0; i < particle_number; i++) { const auto np = number_of_partners[i]; const auto kp = pointer[i]; for (int k = 0; k < np ; k++) { const auto j = sorted_list[kp + k]; sorted_list2d[i * NUM_NEIGH + k] = j; } } } void random_shfl() { std::mt19937 mt(10); const auto pn = particle_number; for (int i = 0; i < pn; i++) { const auto kp = pointer[i]; const auto np = number_of_partners[i]; std::shuffle(&sorted_list[kp], &sorted_list[kp + np], mt); } } void allocate() { q_f3.allocate(N); p_f3.allocate(N); q_d3.allocate(N); p_d3.allocate(N); q_f4.allocate(N); p_f4.allocate(N); q_d4.allocate(N); p_d4.allocate(N); sorted_list.allocate(MAX_PAIRS); transposed_list.allocate(MAX_PAIRS); sorted_list2d.allocate(MAX_PAIRS); number_of_partners.allocate(N); pointer.allocate(N); } void cleanup() { q_f3.deallocate(); p_f3.deallocate(); q_d3.deallocate(); p_d3.deallocate(); q_f4.deallocate(); p_f4.deallocate(); q_d4.deallocate(); p_d4.deallocate(); sorted_list.deallocate(); transposed_list.deallocate(); sorted_list2d.deallocate(); number_of_partners.deallocate(); pointer.deallocate(); } template <typename Vec1, typename Vec2> void copy_vec(Vec1* v1, Vec2* v2, const int ptcl_num) { for (int i = 0; i < ptcl_num; i++) { v1[i].x = v2[i].x; v1[i].y = v2[i].y; v1[i].z = v2[i].z; } } template <typename Vec> void copy_to_gpu(cuda_ptr<Vec>& q, cuda_ptr<Vec>& p) { q.host2dev(); p.host2dev(); sorted_list.host2dev(); transposed_list.host2dev(); sorted_list2d.host2dev(); number_of_partners.host2dev(); pointer.host2dev(); } template <typename Vec> void copy_to_host(cuda_ptr<Vec>& p) { p.dev2host(); } template <typename Vec, typename Dtype, typename ptr_func> void measure(ptr_func kernel, const char* name, cuda_ptr<Vec>& q, cuda_ptr<Vec>& p, const Dtype dt_, const Dtype CL2_, const int32_t* list, const int32_t* partner_pointer, const int32_t tot_thread) { const int block_num = (tot_thread - 1) / THREAD_BLOCK + 1; const auto st_all = myclock(); copy_to_gpu(q, p); const auto st_calc = myclock(); for (int i = 0; i < LOOP; i++) { kernel<<<block_num, THREAD_BLOCK>>>(q, p, particle_number, dt_, CL2_, list, number_of_partners, partner_pointer); } checkCudaErrors(cudaDeviceSynchronize()); const auto diff_calc = myclock() - st_calc; copy_to_host(p); const auto diff_all = myclock() - st_all; fprintf(stderr, "N=%d, %s %f [sec]\n", particle_number, name, diff_all); fprintf(stderr, "N=%d, %s %f [sec] (without Host<->Device)\n", particle_number, name, diff_calc); } template <typename Vec> void print_results(const Vec* p) { for (int i = 0; i < 5; i++) { fprintf(stdout, "%.10f %.10f %.10f\n", p[i].x, p[i].y, p[i].z); } for (int i = particle_number - 5; i < particle_number; i++) { fprintf(stdout, "%.10f %.10f %.10f\n", p[i].x, p[i].y, p[i].z); } } #define STR(s) #s #define MEASURE_FOR_ALLTYPES(fname, list, p_pointer, tot_thread) \ do { \ measure(fname<double3, double>, \ STR(fname ## _double3), \ q_d3, \ p_d3, \ static_cast<double>(dt), \ static_cast<double>(CL2), \ list, \ p_pointer, \ tot_thread); \ measure(fname<double4, double>, \ STR(fname ## _double4), \ q_d4, \ p_d4, \ static_cast<double>(dt), \ static_cast<double>(CL2), \ list, \ p_pointer, \ tot_thread); \ } while (false) int main(const int argc, const char* argv[]) { if (argc >= 2) THREAD_BLOCK = std::atoi(argv[1]); if (THREAD_BLOCK < 64 || THREAD_BLOCK > 1024) { std::cerr << "THREAD_BLOCK size is too large or small.\n"; std::exit(1); } allocate(); init(&q_d3[0], &p_d3[0]); copy_vec(&q_f3[0], &q_d3[0], particle_number); copy_vec(&p_f3[0], &p_d3[0], particle_number); copy_vec(&q_f4[0], &q_d3[0], particle_number); copy_vec(&p_f4[0], &p_d3[0], particle_number); copy_vec(&q_d4[0], &q_d3[0], particle_number); copy_vec(&p_d4[0], &p_d3[0], particle_number); if (!loadpair()) { fprintf(stderr, "Now make pairlist %s.\n", cache_file_name); number_of_pairs = 0; makepair(&q_d3[0]); random_shfl(); makepaircache(); } make_transposed_pairlist(); make_sorted_list2d(); #ifdef EN_TEST_GPU // MEASURE_FOR_ALLTYPES(force_kernel_plain, sorted_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_ifless, sorted_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_memopt, sorted_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_memopt2, transposed_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_memopt3, transposed_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_memopt3_swpl, transposed_list, pointer, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_swpl, transposed_list, nullptr, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_swpl2, transposed_list, nullptr, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_swpl3, transposed_list, nullptr, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_unrolling, transposed_list, nullptr, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_unrolling2, transposed_list, nullptr, particle_number); // MEASURE_FOR_ALLTYPES(force_kernel_memopt3_coarse, transposed_list, nullptr, particle_number / 2); // MEASURE_FOR_ALLTYPES(force_kernel_warp_unroll, sorted_list, pointer, particle_number * 32); MEASURE_FOR_ALLTYPES(force_kernel_warp_unroll2, sorted_list, pointer, particle_number * 32); print_results(&p_d3[0]); #elif defined EN_ACTION_REACTION MEASURE_FOR_ALLTYPES(force_kernel_plain_with_aar, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_ifless_with_aar, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt_with_aar, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt2_with_aar, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt3_with_aar, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_warp_unroll_with_aar, sorted_list, pointer, particle_number * 32); // print_results(&p_d3[0]); #else MEASURE_FOR_ALLTYPES(force_kernel_plain, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_ifless, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt, sorted_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt2, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt3, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt3_swpl, transposed_list, pointer, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_swpl, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_swpl2, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_swpl3, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_unrolling, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_unrolling2, transposed_list, nullptr, particle_number); MEASURE_FOR_ALLTYPES(force_kernel_memopt3_coarse, transposed_list, nullptr, particle_number / 2); MEASURE_FOR_ALLTYPES(force_kernel_warp_unroll, sorted_list, pointer, particle_number * 32); MEASURE_FOR_ALLTYPES(force_kernel_warp_unroll2, sorted_list, pointer, particle_number * 32); #endif cleanup(); }
b2ae55030ed8f710a3cceb30599e676fef53c201.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <algorithm> #include <string> #include "poisson2d.hpp" #include "timer.hpp" #define BLOCK_SIZE 256 #define GRID_SIZE 256 /** Computes y = A*x for a sparse matrix A in CSR format and vector x,y */ __global__ void csr_Ax(const size_t N, int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *x, double *y) { const size_t stride = gridDim.x * blockDim.x; for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < N; i += stride) { double tmp = 0.0; for (int j = csr_rowoffsets[i]; j < csr_rowoffsets[i+1]; ++j) tmp += csr_values[j] * x[csr_colindices[j]]; y[i] = tmp; } } __global__ void xADDay(const size_t N, double *x, double *y, double *z, const double alpha) { const size_t stride = blockDim.x * gridDim.x; for(size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += stride) z[i] = x[i] + alpha * y[i]; } __global__ void xDOTy(const size_t N, double* x, double* y, double* z) { size_t tid = threadIdx.x + blockDim.x* blockIdx.x; size_t stride = blockDim.x* gridDim.x; __shared__ double cache[BLOCK_SIZE]; double tid_sum = 0.0; for (; tid < N; tid += stride) { tid_sum += x[tid] * y[tid]; } tid = threadIdx.x; cache[tid] = tid_sum; __syncthreads(); for (size_t i = blockDim.x/2; i != 0; i /=2) { __syncthreads(); if (tid < i) //lower half does smth, rest idles cache[tid] += cache[tid + i]; //lower looks up by stride and sums up } if(tid == 0) // cache[0] now contains block_sum { atomicAdd(z, cache[0]); } } /** Implementation of the conjugate gradient algorithm. * * The control flow is handled by the CPU. * Only the individual operations (vector updates, dot products, sparse matrix-vector product) are transferred to CUDA kernels. * * The temporary arrays p, r, and Ap need to be allocated on the GPU for use with CUDA. * Modify as you see fit. */ void conjugate_gradient(const size_t N, // number of unknows int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *h_rhs, double *h_solution, const double conv_factor) //, double *init_guess) // feel free to add a nonzero initial guess as needed { // clear solution vector (it may contain garbage values): std::fill(h_solution, h_solution + N, 0.0); // initialize work vectors: double* h_pAp = (double*)malloc(sizeof(double)); double* h_r2 = (double*)malloc(sizeof(double)); double* h_r22 = (double*)malloc(sizeof(double)); double* zero = (double*)malloc(sizeof(double)); *zero = 0.00; *h_pAp = 0.00; *h_r2 = 0.00; *h_r22 = 0.00; double* x; double* p; double* r; double* Ap; double* pAp; double* r2; // arrays const size_t arr_size = N*sizeof(double); hipMalloc(&x, arr_size); hipMalloc(&p, arr_size); hipMalloc(&r, arr_size); hipMalloc(&Ap, arr_size); // scalars hipMalloc(&pAp, sizeof(double)); hipMalloc(&r2, sizeof(double)); // line 2: initialize r and p: //std::copy(h_rhs, h_rhs+N, h_p); //std::copy(h_rhs, h_rhs+N, h_r); hipMemcpy(x, h_solution, arr_size, hipMemcpyHostToDevice); hipMemcpy(r, h_rhs, arr_size, hipMemcpyHostToDevice); hipMemcpy(Ap, h_rhs, arr_size, hipMemcpyHostToDevice); hipMemcpy(p, h_rhs, arr_size, hipMemcpyHostToDevice); double alpha, beta; int iters = 0; //while (1) { while (iters < 10000) { // will end with iter == 10'000 or earlier hipMemcpy(r2, zero, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(pAp, zero, sizeof(double), hipMemcpyHostToDevice); // 4: Ap = A * p hipLaunchKernelGGL(( csr_Ax), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, csr_rowoffsets, csr_colindices, csr_values, p, Ap); // 5: pAp = <p,Ap> hipLaunchKernelGGL(( xDOTy), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, p, Ap, pAp); // r2 = <r,r> hipLaunchKernelGGL(( xDOTy), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, r, r, r2); hipDeviceSynchronize(); hipMemcpy(h_pAp, pAp, sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(h_r2, r2, sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // 6: alpha = <r,r>/<p,Ap> alpha = (*h_r2) / (*h_pAp); // 7: x = x_i+1 = ... hipLaunchKernelGGL(( xADDay), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, x, p, x, alpha); // 8: r = r_i+1 = ... hipLaunchKernelGGL(( xADDay), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, r, Ap, r, -alpha); // 9: r2 = <r,r> hipLaunchKernelGGL(( xDOTy), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, r, r, r2); hipDeviceSynchronize(); hipMemcpy(h_r22, r2, sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // // 10: check if (iters < 10 or iters > 10000 - 10) std::cout << "r2[" << iters << "] = " << *h_r2 << " vs " << conv_factor << std::endl; if (*h_r22 < conv_factor) { break; } // beta = beta_i = ... beta = (*h_r22) / (*h_r2); // 10: check // if (iters < 10 or iters > 10000 - 10) // std::cout << "r2[" << iters << "] = " << beta << " vs " << conv_factor << std::endl; // if (beta < conv_factor or beta > 10) { // break; // } // 12: p = p_i+1 = ... hipLaunchKernelGGL(( xADDay), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, r, p, p, beta); hipDeviceSynchronize(); ++iters; } if (iters >= 10000) std::cout << "Conjugate Gradient did NOT converge within 10000 iterations with r^2 = " << *h_r2 << std::endl; else std::cout << "Conjugate Gradient converged in " << iters << " iterations with r^2 = " << *h_r2 << std::endl; hipMemcpy(h_solution, x, arr_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipFree(x); hipFree(p); hipFree(r); hipFree(Ap); hipFree(pAp); hipFree(r2); free(h_pAp); free(h_r2); free(h_r22); } /** Solve a system with `points_per_direction * points_per_direction` unknowns */ void solve_system(size_t points_per_direction) { size_t N = points_per_direction * points_per_direction; // number of unknows to solve for std::cout << "Solving Ax=b with " << N << " unknowns." << std::endl; // // Allocate CSR arrays. // // Note: Usually one does not know the number of nonzeros in the system matrix a-priori. // For this exercise, however, we know that there are at most 5 nonzeros per row in the system matrix, so we can allocate accordingly. // const size_t size_row = sizeof(int) * (N+1); const size_t size_col = sizeof(int) * 5 * N; const size_t size_val = sizeof(double) * 5 * N; int *h_csr_rowoffsets = (int*)malloc(size_row); int *h_csr_colindices = (int*)malloc(size_col); double *h_csr_values = (double*)malloc(size_val); int* csr_rowoffsets; int* csr_colindices; double* csr_values; hipMalloc(&csr_rowoffsets, size_row); hipMalloc(&csr_colindices, size_col); hipMalloc(&csr_values, size_val); // // fill CSR matrix with values // generate_fdm_laplace(points_per_direction, h_csr_rowoffsets, h_csr_colindices, h_csr_values); hipMemcpy(csr_rowoffsets, h_csr_rowoffsets, size_row, hipMemcpyHostToDevice); hipMemcpy(csr_colindices, h_csr_colindices, size_col, hipMemcpyHostToDevice); hipMemcpy(csr_values, h_csr_values, size_val, hipMemcpyHostToDevice); // // Allocate solution vector and right hand side: // double *solution = (double*)malloc(sizeof(double) * N); double *rhs = (double*)malloc(sizeof(double) * N); std::fill(rhs, rhs + N, 1); // // Call Conjugate Gradient implementation (CPU arrays passed here; modify to use GPU arrays) // CSR Matrix is passed as GPU arrays already. // rhs and solution are CPU arrays. // This isn't a nice setup obviously...but it's little more than a 1 file "script", so I think that's fine for now. // double conv_factor = 1e-6; //1e-6 conjugate_gradient(N, csr_rowoffsets, csr_colindices, csr_values, rhs, solution, conv_factor); // // Check for convergence: // double residual_norm = relative_residual(N, h_csr_rowoffsets, h_csr_colindices, h_csr_values, rhs, solution); std::string check = "OK"; if (residual_norm > conv_factor) check = "FAIL"; std::cout << "Relative residual norm: " << residual_norm << " (should be smaller than 1e-6): " << check << std::endl; hipFree(csr_rowoffsets); hipFree(csr_colindices); hipFree(csr_values); free(solution); free(rhs); free(h_csr_rowoffsets); free(h_csr_colindices); free(h_csr_values); } int main() { solve_system(10); // solves a system with 100*100 unknowns return EXIT_SUCCESS; }
b2ae55030ed8f710a3cceb30599e676fef53c201.cu
#include <stdio.h> #include <iostream> #include <algorithm> #include <string> #include "poisson2d.hpp" #include "timer.hpp" #define BLOCK_SIZE 256 #define GRID_SIZE 256 /** Computes y = A*x for a sparse matrix A in CSR format and vector x,y */ __global__ void csr_Ax(const size_t N, int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *x, double *y) { const size_t stride = gridDim.x * blockDim.x; for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < N; i += stride) { double tmp = 0.0; for (int j = csr_rowoffsets[i]; j < csr_rowoffsets[i+1]; ++j) tmp += csr_values[j] * x[csr_colindices[j]]; y[i] = tmp; } } __global__ void xADDay(const size_t N, double *x, double *y, double *z, const double alpha) { const size_t stride = blockDim.x * gridDim.x; for(size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += stride) z[i] = x[i] + alpha * y[i]; } __global__ void xDOTy(const size_t N, double* x, double* y, double* z) { size_t tid = threadIdx.x + blockDim.x* blockIdx.x; size_t stride = blockDim.x* gridDim.x; __shared__ double cache[BLOCK_SIZE]; double tid_sum = 0.0; for (; tid < N; tid += stride) { tid_sum += x[tid] * y[tid]; } tid = threadIdx.x; cache[tid] = tid_sum; __syncthreads(); for (size_t i = blockDim.x/2; i != 0; i /=2) { __syncthreads(); if (tid < i) //lower half does smth, rest idles cache[tid] += cache[tid + i]; //lower looks up by stride and sums up } if(tid == 0) // cache[0] now contains block_sum { atomicAdd(z, cache[0]); } } /** Implementation of the conjugate gradient algorithm. * * The control flow is handled by the CPU. * Only the individual operations (vector updates, dot products, sparse matrix-vector product) are transferred to CUDA kernels. * * The temporary arrays p, r, and Ap need to be allocated on the GPU for use with CUDA. * Modify as you see fit. */ void conjugate_gradient(const size_t N, // number of unknows int *csr_rowoffsets, int *csr_colindices, double *csr_values, double *h_rhs, double *h_solution, const double conv_factor) //, double *init_guess) // feel free to add a nonzero initial guess as needed { // clear solution vector (it may contain garbage values): std::fill(h_solution, h_solution + N, 0.0); // initialize work vectors: double* h_pAp = (double*)malloc(sizeof(double)); double* h_r2 = (double*)malloc(sizeof(double)); double* h_r22 = (double*)malloc(sizeof(double)); double* zero = (double*)malloc(sizeof(double)); *zero = 0.00; *h_pAp = 0.00; *h_r2 = 0.00; *h_r22 = 0.00; double* x; double* p; double* r; double* Ap; double* pAp; double* r2; // arrays const size_t arr_size = N*sizeof(double); cudaMalloc(&x, arr_size); cudaMalloc(&p, arr_size); cudaMalloc(&r, arr_size); cudaMalloc(&Ap, arr_size); // scalars cudaMalloc(&pAp, sizeof(double)); cudaMalloc(&r2, sizeof(double)); // line 2: initialize r and p: //std::copy(h_rhs, h_rhs+N, h_p); //std::copy(h_rhs, h_rhs+N, h_r); cudaMemcpy(x, h_solution, arr_size, cudaMemcpyHostToDevice); cudaMemcpy(r, h_rhs, arr_size, cudaMemcpyHostToDevice); cudaMemcpy(Ap, h_rhs, arr_size, cudaMemcpyHostToDevice); cudaMemcpy(p, h_rhs, arr_size, cudaMemcpyHostToDevice); double alpha, beta; int iters = 0; //while (1) { while (iters < 10000) { // will end with iter == 10'000 or earlier cudaMemcpy(r2, zero, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(pAp, zero, sizeof(double), cudaMemcpyHostToDevice); // 4: Ap = A * p csr_Ax<<<GRID_SIZE, BLOCK_SIZE>>>(N, csr_rowoffsets, csr_colindices, csr_values, p, Ap); // 5: pAp = <p,Ap> xDOTy<<<GRID_SIZE, BLOCK_SIZE>>>(N, p, Ap, pAp); // r2 = <r,r> xDOTy<<<GRID_SIZE, BLOCK_SIZE>>>(N, r, r, r2); cudaDeviceSynchronize(); cudaMemcpy(h_pAp, pAp, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(h_r2, r2, sizeof(double), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // 6: alpha = <r,r>/<p,Ap> alpha = (*h_r2) / (*h_pAp); // 7: x = x_i+1 = ... xADDay<<<GRID_SIZE, BLOCK_SIZE>>>(N, x, p, x, alpha); // 8: r = r_i+1 = ... xADDay<<<GRID_SIZE, BLOCK_SIZE>>>(N, r, Ap, r, -alpha); // 9: r2 = <r,r> xDOTy<<<GRID_SIZE, BLOCK_SIZE>>>(N, r, r, r2); cudaDeviceSynchronize(); cudaMemcpy(h_r22, r2, sizeof(double), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // // 10: check if (iters < 10 or iters > 10000 - 10) std::cout << "r2[" << iters << "] = " << *h_r2 << " vs " << conv_factor << std::endl; if (*h_r22 < conv_factor) { break; } // beta = beta_i = ... beta = (*h_r22) / (*h_r2); // 10: check // if (iters < 10 or iters > 10000 - 10) // std::cout << "r2[" << iters << "] = " << beta << " vs " << conv_factor << std::endl; // if (beta < conv_factor or beta > 10) { // break; // } // 12: p = p_i+1 = ... xADDay<<<GRID_SIZE, BLOCK_SIZE>>>(N, r, p, p, beta); cudaDeviceSynchronize(); ++iters; } if (iters >= 10000) std::cout << "Conjugate Gradient did NOT converge within 10000 iterations with r^2 = " << *h_r2 << std::endl; else std::cout << "Conjugate Gradient converged in " << iters << " iterations with r^2 = " << *h_r2 << std::endl; cudaMemcpy(h_solution, x, arr_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaFree(x); cudaFree(p); cudaFree(r); cudaFree(Ap); cudaFree(pAp); cudaFree(r2); free(h_pAp); free(h_r2); free(h_r22); } /** Solve a system with `points_per_direction * points_per_direction` unknowns */ void solve_system(size_t points_per_direction) { size_t N = points_per_direction * points_per_direction; // number of unknows to solve for std::cout << "Solving Ax=b with " << N << " unknowns." << std::endl; // // Allocate CSR arrays. // // Note: Usually one does not know the number of nonzeros in the system matrix a-priori. // For this exercise, however, we know that there are at most 5 nonzeros per row in the system matrix, so we can allocate accordingly. // const size_t size_row = sizeof(int) * (N+1); const size_t size_col = sizeof(int) * 5 * N; const size_t size_val = sizeof(double) * 5 * N; int *h_csr_rowoffsets = (int*)malloc(size_row); int *h_csr_colindices = (int*)malloc(size_col); double *h_csr_values = (double*)malloc(size_val); int* csr_rowoffsets; int* csr_colindices; double* csr_values; cudaMalloc(&csr_rowoffsets, size_row); cudaMalloc(&csr_colindices, size_col); cudaMalloc(&csr_values, size_val); // // fill CSR matrix with values // generate_fdm_laplace(points_per_direction, h_csr_rowoffsets, h_csr_colindices, h_csr_values); cudaMemcpy(csr_rowoffsets, h_csr_rowoffsets, size_row, cudaMemcpyHostToDevice); cudaMemcpy(csr_colindices, h_csr_colindices, size_col, cudaMemcpyHostToDevice); cudaMemcpy(csr_values, h_csr_values, size_val, cudaMemcpyHostToDevice); // // Allocate solution vector and right hand side: // double *solution = (double*)malloc(sizeof(double) * N); double *rhs = (double*)malloc(sizeof(double) * N); std::fill(rhs, rhs + N, 1); // // Call Conjugate Gradient implementation (CPU arrays passed here; modify to use GPU arrays) // CSR Matrix is passed as GPU arrays already. // rhs and solution are CPU arrays. // This isn't a nice setup obviously...but it's little more than a 1 file "script", so I think that's fine for now. // double conv_factor = 1e-6; //1e-6 conjugate_gradient(N, csr_rowoffsets, csr_colindices, csr_values, rhs, solution, conv_factor); // // Check for convergence: // double residual_norm = relative_residual(N, h_csr_rowoffsets, h_csr_colindices, h_csr_values, rhs, solution); std::string check = "OK"; if (residual_norm > conv_factor) check = "FAIL"; std::cout << "Relative residual norm: " << residual_norm << " (should be smaller than 1e-6): " << check << std::endl; cudaFree(csr_rowoffsets); cudaFree(csr_colindices); cudaFree(csr_values); free(solution); free(rhs); free(h_csr_rowoffsets); free(h_csr_colindices); free(h_csr_values); } int main() { solve_system(10); // solves a system with 100*100 unknowns return EXIT_SUCCESS; }
dbd66a672339c0d54d1b7a0a8975fe77c66fceef.hip
// !!! This is a file automatically generated by hipify!!! // This example demonstrates the use of shared per-block arrays // implement an optimized dense matrix multiplication algorithm. // Like the shared_variables.cu example, a per-block __shared__ // array acts as a "bandwidth multiplier" by eliminating redundant // loads issued by neighboring threads. #include <stdlib.h> #include <stdio.h> #include <vector> #include <algorithm> #include <iostream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define TILE_WIDTH 16 // a simple version of matrix_multiply which issues redundant loads from off-chip global memory __global__ void matrix_multiply_simple(int *a, int *b, int *ab, size_t width) { // calculate the row & column index of the element int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; int result = 0; // do dot product between row of a and column of b for(int k = 0; k < width; ++k) { result += a[row*width+k] * b[k*width+col]; } // write out this thread's result ab[row*width+col] = result; } void MatrixMulOnHost(int* M, int* N, int* P, int Width) { for (int i = 0; i < Width; ++i) { for (int j = 0; j < Width; ++j) { double sum = 0; for (int k = 0; k < Width; ++k) { double a = M[i * Width + k]; double b = N[k * Width + j]; sum += a * b; } P[i * Width + j] = sum; } } } int main(void) { // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements //const size_t n = 1<<10; const size_t n = 1024; std::cout << "Total element is " << n << "\n"; const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host std::vector<int> h_a(n*n), h_b(n*n), h_c(n*n); for(int i = 0; i < n*n; ++i) { h_a[i] = static_cast<int>(rand()) / RAND_MAX; h_b[i] = static_cast<int>(rand()) / RAND_MAX; } // allocate storage for the device int *d_a = 0, *d_b = 0, *d_c = 0; hipMalloc((void**)&d_a, sizeof(int) * n * n); hipMalloc((void**)&d_b, sizeof(int) * n * n); hipMalloc((void**)&d_c, sizeof(int) * n * n); // copy input to the device hipMemcpy(d_a, &h_a[0], sizeof(int) * n * n, hipMemcpyHostToDevice); hipMemcpy(d_b, &h_b[0], sizeof(int) * n * n, hipMemcpyHostToDevice); // to get accurate timings, launch a single "warm-up" kernel hipLaunchKernelGGL(( matrix_multiply_simple), dim3(num_blocks),dim3(block_size), 0, 0, d_a, d_b, d_c, n); hipMemcpy(&h_c[0], d_c, sizeof(int) * n * n, hipMemcpyDeviceToHost); //------------------ int* h_r; h_r = (int*)malloc(sizeof(int) * n * n); MatrixMulOnHost(&h_a[0], &h_b[0], h_r, n); for (int i=0; i<(n*n); i++) { if (h_r[i] != h_c[i]) { std::cout << "Failed at i " << i << "h_r=" << h_r[i] << ",h_c=" << h_c[i] << "\n"; exit(1); } } std::cout << "Result is correct."; // deallocate device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
dbd66a672339c0d54d1b7a0a8975fe77c66fceef.cu
// This example demonstrates the use of shared per-block arrays // implement an optimized dense matrix multiplication algorithm. // Like the shared_variables.cu example, a per-block __shared__ // array acts as a "bandwidth multiplier" by eliminating redundant // loads issued by neighboring threads. #include <stdlib.h> #include <stdio.h> #include <vector> #include <algorithm> #include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define TILE_WIDTH 16 // a simple version of matrix_multiply which issues redundant loads from off-chip global memory __global__ void matrix_multiply_simple(int *a, int *b, int *ab, size_t width) { // calculate the row & column index of the element int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; int result = 0; // do dot product between row of a and column of b for(int k = 0; k < width; ++k) { result += a[row*width+k] * b[k*width+col]; } // write out this thread's result ab[row*width+col] = result; } void MatrixMulOnHost(int* M, int* N, int* P, int Width) { for (int i = 0; i < Width; ++i) { for (int j = 0; j < Width; ++j) { double sum = 0; for (int k = 0; k < Width; ++k) { double a = M[i * Width + k]; double b = N[k * Width + j]; sum += a * b; } P[i * Width + j] = sum; } } } int main(void) { // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements //const size_t n = 1<<10; const size_t n = 1024; std::cout << "Total element is " << n << "\n"; const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host std::vector<int> h_a(n*n), h_b(n*n), h_c(n*n); for(int i = 0; i < n*n; ++i) { h_a[i] = static_cast<int>(rand()) / RAND_MAX; h_b[i] = static_cast<int>(rand()) / RAND_MAX; } // allocate storage for the device int *d_a = 0, *d_b = 0, *d_c = 0; cudaMalloc((void**)&d_a, sizeof(int) * n * n); cudaMalloc((void**)&d_b, sizeof(int) * n * n); cudaMalloc((void**)&d_c, sizeof(int) * n * n); // copy input to the device cudaMemcpy(d_a, &h_a[0], sizeof(int) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &h_b[0], sizeof(int) * n * n, cudaMemcpyHostToDevice); // to get accurate timings, launch a single "warm-up" kernel matrix_multiply_simple<<<num_blocks,block_size>>>(d_a, d_b, d_c, n); cudaMemcpy(&h_c[0], d_c, sizeof(int) * n * n, cudaMemcpyDeviceToHost); //------------------ int* h_r; h_r = (int*)malloc(sizeof(int) * n * n); MatrixMulOnHost(&h_a[0], &h_b[0], h_r, n); for (int i=0; i<(n*n); i++) { if (h_r[i] != h_c[i]) { std::cout << "Failed at i " << i << "h_r=" << h_r[i] << ",h_c=" << h_c[i] << "\n"; exit(1); } } std::cout << "Result is correct."; // deallocate device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
7f6397c4fca6c79992c4876d5947dfebb91e36fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <time.h> #define KB 1024 #define MB 1048576 #define ASYNC_V1 1 #define ASYNC_V2 2 #define ASYNC_V3 3 static char *sAsyncMethod[] = { "0 (None, Sequential)", "1 (Async V1)", "2 (Async V2)", "3 (Async V3)", NULL }; /************************************************* timeBurningKernel *************************************************/ #define BURNING 1050 __global__ void timeBurningKernel(float *d_a, float *d_r, float factor, int N) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if( gid < N ) { for( int i = 0 ; i < BURNING ; ++i ) d_r[gid] = d_a[gid] + factor * factor; } } /************************************************* main *************************************************/ int main(int argc, char* argv[]) { if( argc < 2 ) { puts("usage: ./a.out [Async Mode]"); return 0; } const int niterations = 2*3*4; // number of iterations for the loop inside the kernel int nstreams = 3; int async_mode = ASYNC_V1; //default Async_V1 float factor = 1.1; int N = 4*MB; if( argc > 1 ) async_mode = atoi(argv[1]); if( async_mode == 0 ) nstreams = 1; printf("N: %d\n", N ); printf("# BURNING: %d\n", BURNING ); printf("# iterations: %d\n", niterations ); printf("# streams: %d\n", nstreams ); printf("ASync method: %s\n", sAsyncMethod[async_mode]); //Total size size_t sz = 128 * MB; if( (sz/sizeof(float)) < BURNING ) { printf("error: 'sz' must be larger than BURNING\n"); exit(-1); } //Struct for time measure struct timeval start, end, timer; // TODO: allocate and initialize an array of stream handles hipStream_t *streams = ... // Memory allocation for cpu (host) // Pinned memory (page-locked) float *h_a[niterations]; float *h_r[niterations]; for( int i = 0 ; i < niterations ; ++i ) { hipHostMalloc((void**)&h_a[i], sz); hipHostMalloc((void**)&h_r[i], sz); } srand(time(NULL)); for( int j = 0 ; j < niterations ; ++j ) { for(int i = 0 ; i < N*N; i++ ) { h_a[j][i] = (float)(rand()%100); h_r[j][i] = 0.; } } //Memory allocation for gpu(device) float *d_a[nstreams], *d_r[nstreams]; for( int j = 0 ; j < nstreams ; ++j ) { hipMalloc((void **) &d_a[j], sz ); hipMalloc((void **) &d_r[j], sz ); } /************************************************* Launching timeBurningKernel *************************************************/ size_t dim_threads = 256; size_t dim_grid = ((N%dim_threads)? N/dim_threads+1 : N/dim_threads); hipDeviceSynchronize(); gettimeofday(&start, NULL); if(nstreams == 1 ) { for( int i =0 ; i < niterations ; i ++ ) { // TODO // No stream (sequential) } } else { if(async_mode == ASYNC_V1 ) { // TODO } else if(async_mode == ASYNC_V2) { // TODO } else // Async V3 { // TODO } } hipDeviceSynchronize(); gettimeofday(&end, NULL); timersub(&end,&start,&timer); printf("%d, elapsed time: %lf\n", niterations, (timer.tv_usec / 1000.0 + timer.tv_sec *1000.0) ); for(int i=0; i<niterations; i++) { hipHostFree(h_r[i]); hipHostFree(h_a[i]); } for(int i=0; i<nstreams; i++) { hipFree(d_r[i]); hipFree(d_a[i]); } return 0; }
7f6397c4fca6c79992c4876d5947dfebb91e36fa.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <time.h> #define KB 1024 #define MB 1048576 #define ASYNC_V1 1 #define ASYNC_V2 2 #define ASYNC_V3 3 static char *sAsyncMethod[] = { "0 (None, Sequential)", "1 (Async V1)", "2 (Async V2)", "3 (Async V3)", NULL }; /************************************************* timeBurningKernel *************************************************/ #define BURNING 1050 __global__ void timeBurningKernel(float *d_a, float *d_r, float factor, int N) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if( gid < N ) { for( int i = 0 ; i < BURNING ; ++i ) d_r[gid] = d_a[gid] + factor * factor; } } /************************************************* main *************************************************/ int main(int argc, char* argv[]) { if( argc < 2 ) { puts("usage: ./a.out [Async Mode]"); return 0; } const int niterations = 2*3*4; // number of iterations for the loop inside the kernel int nstreams = 3; int async_mode = ASYNC_V1; //default Async_V1 float factor = 1.1; int N = 4*MB; if( argc > 1 ) async_mode = atoi(argv[1]); if( async_mode == 0 ) nstreams = 1; printf("N: %d\n", N ); printf("# BURNING: %d\n", BURNING ); printf("# iterations: %d\n", niterations ); printf("# streams: %d\n", nstreams ); printf("ASync method: %s\n", sAsyncMethod[async_mode]); //Total size size_t sz = 128 * MB; if( (sz/sizeof(float)) < BURNING ) { printf("error: 'sz' must be larger than BURNING\n"); exit(-1); } //Struct for time measure struct timeval start, end, timer; // TODO: allocate and initialize an array of stream handles cudaStream_t *streams = ... // Memory allocation for cpu (host) // Pinned memory (page-locked) float *h_a[niterations]; float *h_r[niterations]; for( int i = 0 ; i < niterations ; ++i ) { cudaMallocHost((void**)&h_a[i], sz); cudaMallocHost((void**)&h_r[i], sz); } srand(time(NULL)); for( int j = 0 ; j < niterations ; ++j ) { for(int i = 0 ; i < N*N; i++ ) { h_a[j][i] = (float)(rand()%100); h_r[j][i] = 0.; } } //Memory allocation for gpu(device) float *d_a[nstreams], *d_r[nstreams]; for( int j = 0 ; j < nstreams ; ++j ) { cudaMalloc((void **) &d_a[j], sz ); cudaMalloc((void **) &d_r[j], sz ); } /************************************************* Launching timeBurningKernel *************************************************/ size_t dim_threads = 256; size_t dim_grid = ((N%dim_threads)? N/dim_threads+1 : N/dim_threads); cudaDeviceSynchronize(); gettimeofday(&start, NULL); if(nstreams == 1 ) { for( int i =0 ; i < niterations ; i ++ ) { // TODO // No stream (sequential) } } else { if(async_mode == ASYNC_V1 ) { // TODO } else if(async_mode == ASYNC_V2) { // TODO } else // Async V3 { // TODO } } cudaDeviceSynchronize(); gettimeofday(&end, NULL); timersub(&end,&start,&timer); printf("%d, elapsed time: %lf\n", niterations, (timer.tv_usec / 1000.0 + timer.tv_sec *1000.0) ); for(int i=0; i<niterations; i++) { cudaFreeHost(h_r[i]); cudaFreeHost(h_a[i]); } for(int i=0; i<nstreams; i++) { cudaFree(d_r[i]); cudaFree(d_a[i]); } return 0; }
7dafdfaea55882b5a9e354670069b75567762fb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=195 --blockDim=128 #include "common.h" __device__ float max(float, float); template <typename Real> __device__ static __attribute__((always_inline)) Real reduce_sum(Real in); template <typename Real> __global__ void computeValue(Real *const values, const Real *const paths, const AsianOption<Real> *const option, const unsigned int numSims, const unsigned int numTimesteps); template __global__ void computeValue<float>(float *const values, const float *const paths, const AsianOption<float> *const option, const unsigned int numSims, const unsigned int numTimesteps); template <typename Real> __device__ static __attribute__((always_inline)) Real reduce_sum(Real in) { #if 0 // imperial edit SharedMemory<Real> sdata; #else Real sdata[1]; #endif // Perform first level of reduction: // - Write to shared memory unsigned int ltid = threadIdx.x; sdata[ltid] = in; __syncthreads(); // Do reduction in shared mem for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1) { if (ltid < s) { sdata[ltid] += sdata[ltid + s]; } __syncthreads(); } return sdata[0]; } template <typename Real> __global__ void computeValue(Real *const values, const Real *const paths, const AsianOption<Real> *const option, const unsigned int numSims, const unsigned int numTimesteps) { // Determine thread ID unsigned int bid = blockIdx.x; unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int step = gridDim.x * blockDim.x; Real sumPayoffs = static_cast<Real>(0); for (unsigned int i = tid ; i < numSims ; i += step) { // Shift the input pointer const Real *path = paths + i; // Compute the arithmetic average Real avg = static_cast<Real>(0); for (unsigned int t = 0 ; t < numTimesteps ; t++, path += numSims) { avg += *path; } avg = avg * option->spot / numTimesteps; // Compute the payoff Real payoff = avg - option->strike; if (option->type == AsianOption<Real>::Put) { payoff = - payoff; } payoff = max(static_cast<Real>(0), payoff); // Accumulate payoff locally sumPayoffs += payoff; } // Reduce within the block sumPayoffs = reduce_sum<Real>(sumPayoffs); // Store the result if (threadIdx.x == 0) { values[bid] = sumPayoffs; } }
7dafdfaea55882b5a9e354670069b75567762fb5.cu
//pass //--gridDim=195 --blockDim=128 #include "common.h" __device__ float max(float, float); template <typename Real> __device__ static __attribute__((always_inline)) Real reduce_sum(Real in); template <typename Real> __global__ void computeValue(Real *const values, const Real *const paths, const AsianOption<Real> *const option, const unsigned int numSims, const unsigned int numTimesteps); template __global__ void computeValue<float>(float *const values, const float *const paths, const AsianOption<float> *const option, const unsigned int numSims, const unsigned int numTimesteps); template <typename Real> __device__ static __attribute__((always_inline)) Real reduce_sum(Real in) { #if 0 // imperial edit SharedMemory<Real> sdata; #else Real sdata[1]; #endif // Perform first level of reduction: // - Write to shared memory unsigned int ltid = threadIdx.x; sdata[ltid] = in; __syncthreads(); // Do reduction in shared mem for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1) { if (ltid < s) { sdata[ltid] += sdata[ltid + s]; } __syncthreads(); } return sdata[0]; } template <typename Real> __global__ void computeValue(Real *const values, const Real *const paths, const AsianOption<Real> *const option, const unsigned int numSims, const unsigned int numTimesteps) { // Determine thread ID unsigned int bid = blockIdx.x; unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int step = gridDim.x * blockDim.x; Real sumPayoffs = static_cast<Real>(0); for (unsigned int i = tid ; i < numSims ; i += step) { // Shift the input pointer const Real *path = paths + i; // Compute the arithmetic average Real avg = static_cast<Real>(0); for (unsigned int t = 0 ; t < numTimesteps ; t++, path += numSims) { avg += *path; } avg = avg * option->spot / numTimesteps; // Compute the payoff Real payoff = avg - option->strike; if (option->type == AsianOption<Real>::Put) { payoff = - payoff; } payoff = max(static_cast<Real>(0), payoff); // Accumulate payoff locally sumPayoffs += payoff; } // Reduce within the block sumPayoffs = reduce_sum<Real>(sumPayoffs); // Store the result if (threadIdx.x == 0) { values[bid] = sumPayoffs; } }
9a122c999f88ae5a1be262c682ae2825f9a195f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2017 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file deformable_psroi_pooling.cu * \brief * \author Yi Li, Guodong Zhang, Jifeng Dai */ /***************** Adapted by Charles Shang *********************/ #include <cstdio> #include <algorithm> #include <cstring> #include <iostream> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename T> __device__ T bilinear_interp_cuda( const T *data, const T x, const T y, const int width, const int height) { int x1 = floor(x); int x2 = ceil(x); int y1 = floor(y); int y2 = ceil(y); T dist_x = static_cast<T>(x - x1); T dist_y = static_cast<T>(y - y1); T value11 = data[y1 * width + x1]; T value12 = data[y2 * width + x1]; T value21 = data[y1 * width + x2]; T value22 = data[y2 * width + x2]; T value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; return value; } template <typename T> __global__ void DeformablePSROIPoolForwardKernelCuda( const int count, const T *bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T *bottom_rois, const T *bottom_trans, const int no_trans, const T trans_std, const int sample_per_part, const int output_dim, const int group_size, const int part_size, const int num_classes, const int channels_each_class, T *top_data, T *top_count) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const T *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5; T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5; T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 T roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part); T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part); int part_h = floor(static_cast<T>(ph) / pooled_height * part_size); int part_w = floor(static_cast<T>(pw) / pooled_width * part_size); int class_id = ctop / channels_each_class; T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; wstart += trans_x * roi_width; T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; hstart += trans_y * roi_height; T sum = 0; int count = 0; int gw = floor(static_cast<T>(pw) * group_size / pooled_width); int gh = floor(static_cast<T>(ph) * group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); const T *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { T w = wstart + iw * sub_bin_size_w; T h = hstart + ih * sub_bin_size_h; // bilinear interpolation if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop * group_size + gh) * group_size + gw; T val = bilinear_interp_cuda(offset_bottom_data + c * height * width, w, h, width, height); sum += val; count++; } } top_data[index] = count == 0 ? static_cast<T>(0) : sum / count; top_count[index] = count; } } template <typename T> __global__ void DeformablePSROIPoolBackwardAccKernelCuda( const int count, const T *top_diff, const T *top_count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, T *bottom_data_diff, T *bottom_trans_diff, const T *bottom_data, const T *bottom_rois, const T *bottom_trans, const int no_trans, const T trans_std, const int sample_per_part, const int group_size, const int part_size, const int num_classes, const int channels_each_class) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const T *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5; T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5; T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 T roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part); T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part); int part_h = floor(static_cast<T>(ph) / pooled_height * part_size); int part_w = floor(static_cast<T>(pw) / pooled_width * part_size); int class_id = ctop / channels_each_class; T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; wstart += trans_x * roi_width; T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; hstart += trans_y * roi_height; if (top_count[index] <= 0) { continue; } T diff_val = top_diff[index] / top_count[index]; const T *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; T *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; int gw = floor(static_cast<T>(pw) * group_size / pooled_width); int gh = floor(static_cast<T>(ph) * group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { T w = wstart + iw * sub_bin_size_w; T h = hstart + ih * sub_bin_size_h; // bilinear interpolation if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop * group_size + gh) * group_size + gw; // backward on feature int x0 = floor(w); int x1 = ceil(w); int y0 = floor(h); int y1 = ceil(h); T dist_x = w - x0, dist_y = h - y0; T q00 = (1 - dist_x) * (1 - dist_y); T q01 = (1 - dist_x) * dist_y; T q10 = dist_x * (1 - dist_y); T q11 = dist_x * dist_y; int bottom_index_base = c * height * width; atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val); if (no_trans) { continue; } T U00 = offset_bottom_data[bottom_index_base + y0 * width + x0]; T U01 = offset_bottom_data[bottom_index_base + y1 * width + x0]; T U10 = offset_bottom_data[bottom_index_base + y0 * width + x1]; T U11 = offset_bottom_data[bottom_index_base + y1 * width + x1]; T diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val; diff_x *= roi_width; T diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val; diff_y *= roi_height; atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); } } } } std::tuple<at::Tensor, at::Tensor> dcn_v2_psroi_pooling_cuda_forward(const at::Tensor &input, const at::Tensor &bbox, const at::Tensor &trans, const int no_trans, const float spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(bbox.is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(trans.is_cuda(), "trans must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_trans = no_trans ? 2 : trans.size(1); const int num_bbox = bbox.size(0); AT_ASSERTM(channels == output_dim, "input channels and output channels must equal"); auto pooled_height = pooled_size; auto pooled_width = pooled_size; auto out = at::empty({num_bbox, output_dim, pooled_height, pooled_width}, input.options()); long out_size = num_bbox * output_dim * pooled_height * pooled_width; auto top_count = at::zeros({num_bbox, output_dim, pooled_height, pooled_width}, input.options()); const int num_classes = no_trans ? 1 : channels_trans / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (out.numel() == 0) { THCudaCheck(hipGetLastError()); return std::make_tuple(out, top_count); } dim3 grid(::min(THCCeilDiv(out_size, 512L), 4096L)); dim3 block(512); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "dcn_v2_psroi_pooling_cuda_forward", [&] { hipLaunchKernelGGL(( DeformablePSROIPoolForwardKernelCuda<scalar_t>), dim3(grid), dim3(block), 0, stream, out_size, input.contiguous().data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, bbox.contiguous().data_ptr<scalar_t>(), trans.contiguous().data_ptr<scalar_t>(), no_trans, trans_std, sample_per_part, output_dim, group_size, part_size, num_classes, channels_each_class, out.data_ptr<scalar_t>(), top_count.data_ptr<scalar_t>()); }); THCudaCheck(hipGetLastError()); return std::make_tuple(out, top_count); } std::tuple<at::Tensor, at::Tensor> dcn_v2_psroi_pooling_cuda_backward(const at::Tensor &out_grad, const at::Tensor &input, const at::Tensor &bbox, const at::Tensor &trans, const at::Tensor &top_count, const int no_trans, const float spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { AT_ASSERTM(out_grad.is_cuda(), "out_grad must be a CUDA tensor"); AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(bbox.is_cuda(), "bbox must be a CUDA tensor"); AT_ASSERTM(trans.is_cuda(), "trans must be a CUDA tensor"); AT_ASSERTM(top_count.is_cuda(), "top_count must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_trans = no_trans ? 2 : trans.size(1); const int num_bbox = bbox.size(0); AT_ASSERTM(channels == output_dim, "input channels and output channels must equal"); auto pooled_height = pooled_size; auto pooled_width = pooled_size; long out_size = num_bbox * output_dim * pooled_height * pooled_width; const int num_classes = no_trans ? 1 : channels_trans / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; auto input_grad = at::zeros({batch, channels, height, width}, out_grad.options()); auto trans_grad = at::zeros_like(trans); if (input_grad.numel() == 0) { THCudaCheck(hipGetLastError()); return std::make_tuple(input_grad, trans_grad); } dim3 grid(::min(THCCeilDiv(out_size, 512L), 4096L)); dim3 block(512); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES(out_grad.scalar_type(), "dcn_v2_psroi_pooling_cuda_backward", [&] { hipLaunchKernelGGL(( DeformablePSROIPoolBackwardAccKernelCuda<scalar_t>), dim3(grid), dim3(block), 0, stream, out_size, out_grad.contiguous().data_ptr<scalar_t>(), top_count.contiguous().data_ptr<scalar_t>(), num_bbox, spatial_scale, channels, height, width, pooled_height, pooled_width, output_dim, input_grad.contiguous().data_ptr<scalar_t>(), trans_grad.contiguous().data_ptr<scalar_t>(), input.contiguous().data_ptr<scalar_t>(), bbox.contiguous().data_ptr<scalar_t>(), trans.contiguous().data_ptr<scalar_t>(), no_trans, trans_std, sample_per_part, group_size, part_size, num_classes, channels_each_class); }); THCudaCheck(hipGetLastError()); return std::make_tuple(input_grad, trans_grad); }
9a122c999f88ae5a1be262c682ae2825f9a195f6.cu
/*! * Copyright (c) 2017 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file deformable_psroi_pooling.cu * \brief * \author Yi Li, Guodong Zhang, Jifeng Dai */ /***************** Adapted by Charles Shang *********************/ #include <cstdio> #include <algorithm> #include <cstring> #include <iostream> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename T> __device__ T bilinear_interp_cuda( const T *data, const T x, const T y, const int width, const int height) { int x1 = floor(x); int x2 = ceil(x); int y1 = floor(y); int y2 = ceil(y); T dist_x = static_cast<T>(x - x1); T dist_y = static_cast<T>(y - y1); T value11 = data[y1 * width + x1]; T value12 = data[y2 * width + x1]; T value21 = data[y1 * width + x2]; T value22 = data[y2 * width + x2]; T value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; return value; } template <typename T> __global__ void DeformablePSROIPoolForwardKernelCuda( const int count, const T *bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T *bottom_rois, const T *bottom_trans, const int no_trans, const T trans_std, const int sample_per_part, const int output_dim, const int group_size, const int part_size, const int num_classes, const int channels_each_class, T *top_data, T *top_count) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const T *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5; T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5; T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 T roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part); T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part); int part_h = floor(static_cast<T>(ph) / pooled_height * part_size); int part_w = floor(static_cast<T>(pw) / pooled_width * part_size); int class_id = ctop / channels_each_class; T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; wstart += trans_x * roi_width; T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; hstart += trans_y * roi_height; T sum = 0; int count = 0; int gw = floor(static_cast<T>(pw) * group_size / pooled_width); int gh = floor(static_cast<T>(ph) * group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); const T *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { T w = wstart + iw * sub_bin_size_w; T h = hstart + ih * sub_bin_size_h; // bilinear interpolation if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop * group_size + gh) * group_size + gw; T val = bilinear_interp_cuda(offset_bottom_data + c * height * width, w, h, width, height); sum += val; count++; } } top_data[index] = count == 0 ? static_cast<T>(0) : sum / count; top_count[index] = count; } } template <typename T> __global__ void DeformablePSROIPoolBackwardAccKernelCuda( const int count, const T *top_diff, const T *top_count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, T *bottom_data_diff, T *bottom_trans_diff, const T *bottom_data, const T *bottom_rois, const T *bottom_trans, const int no_trans, const T trans_std, const int sample_per_part, const int group_size, const int part_size, const int num_classes, const int channels_each_class) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const T *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; T roi_start_w = static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5; T roi_start_h = static_cast<T>(round(offset_bottom_rois[2])) * spatial_scale - 0.5; T roi_end_w = static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; T roi_end_h = static_cast<T>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 T roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part); T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part); int part_h = floor(static_cast<T>(ph) / pooled_height * part_size); int part_w = floor(static_cast<T>(pw) / pooled_width * part_size); int class_id = ctop / channels_each_class; T trans_x = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; T trans_y = no_trans ? static_cast<T>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; wstart += trans_x * roi_width; T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; hstart += trans_y * roi_height; if (top_count[index] <= 0) { continue; } T diff_val = top_diff[index] / top_count[index]; const T *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; T *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; int gw = floor(static_cast<T>(pw) * group_size / pooled_width); int gh = floor(static_cast<T>(ph) * group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { T w = wstart + iw * sub_bin_size_w; T h = hstart + ih * sub_bin_size_h; // bilinear interpolation if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop * group_size + gh) * group_size + gw; // backward on feature int x0 = floor(w); int x1 = ceil(w); int y0 = floor(h); int y1 = ceil(h); T dist_x = w - x0, dist_y = h - y0; T q00 = (1 - dist_x) * (1 - dist_y); T q01 = (1 - dist_x) * dist_y; T q10 = dist_x * (1 - dist_y); T q11 = dist_x * dist_y; int bottom_index_base = c * height * width; atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val); if (no_trans) { continue; } T U00 = offset_bottom_data[bottom_index_base + y0 * width + x0]; T U01 = offset_bottom_data[bottom_index_base + y1 * width + x0]; T U10 = offset_bottom_data[bottom_index_base + y0 * width + x1]; T U11 = offset_bottom_data[bottom_index_base + y1 * width + x1]; T diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val; diff_x *= roi_width; T diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val; diff_y *= roi_height; atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); } } } } std::tuple<at::Tensor, at::Tensor> dcn_v2_psroi_pooling_cuda_forward(const at::Tensor &input, const at::Tensor &bbox, const at::Tensor &trans, const int no_trans, const float spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(bbox.is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(trans.is_cuda(), "trans must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_trans = no_trans ? 2 : trans.size(1); const int num_bbox = bbox.size(0); AT_ASSERTM(channels == output_dim, "input channels and output channels must equal"); auto pooled_height = pooled_size; auto pooled_width = pooled_size; auto out = at::empty({num_bbox, output_dim, pooled_height, pooled_width}, input.options()); long out_size = num_bbox * output_dim * pooled_height * pooled_width; auto top_count = at::zeros({num_bbox, output_dim, pooled_height, pooled_width}, input.options()); const int num_classes = no_trans ? 1 : channels_trans / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (out.numel() == 0) { THCudaCheck(cudaGetLastError()); return std::make_tuple(out, top_count); } dim3 grid(std::min(THCCeilDiv(out_size, 512L), 4096L)); dim3 block(512); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "dcn_v2_psroi_pooling_cuda_forward", [&] { DeformablePSROIPoolForwardKernelCuda<scalar_t><<<grid, block, 0, stream>>>( out_size, input.contiguous().data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, bbox.contiguous().data_ptr<scalar_t>(), trans.contiguous().data_ptr<scalar_t>(), no_trans, trans_std, sample_per_part, output_dim, group_size, part_size, num_classes, channels_each_class, out.data_ptr<scalar_t>(), top_count.data_ptr<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return std::make_tuple(out, top_count); } std::tuple<at::Tensor, at::Tensor> dcn_v2_psroi_pooling_cuda_backward(const at::Tensor &out_grad, const at::Tensor &input, const at::Tensor &bbox, const at::Tensor &trans, const at::Tensor &top_count, const int no_trans, const float spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { AT_ASSERTM(out_grad.is_cuda(), "out_grad must be a CUDA tensor"); AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(bbox.is_cuda(), "bbox must be a CUDA tensor"); AT_ASSERTM(trans.is_cuda(), "trans must be a CUDA tensor"); AT_ASSERTM(top_count.is_cuda(), "top_count must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_trans = no_trans ? 2 : trans.size(1); const int num_bbox = bbox.size(0); AT_ASSERTM(channels == output_dim, "input channels and output channels must equal"); auto pooled_height = pooled_size; auto pooled_width = pooled_size; long out_size = num_bbox * output_dim * pooled_height * pooled_width; const int num_classes = no_trans ? 1 : channels_trans / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; auto input_grad = at::zeros({batch, channels, height, width}, out_grad.options()); auto trans_grad = at::zeros_like(trans); if (input_grad.numel() == 0) { THCudaCheck(cudaGetLastError()); return std::make_tuple(input_grad, trans_grad); } dim3 grid(std::min(THCCeilDiv(out_size, 512L), 4096L)); dim3 block(512); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES(out_grad.scalar_type(), "dcn_v2_psroi_pooling_cuda_backward", [&] { DeformablePSROIPoolBackwardAccKernelCuda<scalar_t><<<grid, block, 0, stream>>>( out_size, out_grad.contiguous().data_ptr<scalar_t>(), top_count.contiguous().data_ptr<scalar_t>(), num_bbox, spatial_scale, channels, height, width, pooled_height, pooled_width, output_dim, input_grad.contiguous().data_ptr<scalar_t>(), trans_grad.contiguous().data_ptr<scalar_t>(), input.contiguous().data_ptr<scalar_t>(), bbox.contiguous().data_ptr<scalar_t>(), trans.contiguous().data_ptr<scalar_t>(), no_trans, trans_std, sample_per_part, group_size, part_size, num_classes, channels_each_class); }); THCudaCheck(cudaGetLastError()); return std::make_tuple(input_grad, trans_grad); }
ecef97c0de88f530028c21872bc0725b69822807.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Device code __global__ void array3D(hipPitchedPtr devPitchedPtr, int width, int height, int depth) { char* devPtr = devPitchedPtr.ptr; size_t pitch = devPitchedPtr.pitch; size_t slicePitch = pitch * height; for (int z = 0; z < depth; ++z) { char* slice = devPtr + z * slicePitch; for (int y = 0; y < height; ++y) { float* row = (float*)(slice + y * pitch); for (int x = 0; x < width; ++x) { float element = row[x]; } } } } // Device code __global__ void array2D(float* devPtr, size_t pitch, int width, int height) { for (int r = 0; r < height; ++r) { float* row = (float*)((char*)devPtr + r * pitch); for (int c = 0; c < width; ++c) { float element = row[c]; } } } int main(){ // Host code int width = 64, height = 64, depth = 64; hipExtent extent = make_hipExtent(width * sizeof(float), height, depth); hipPitchedPtr devPitchedPtr; hipMalloc3D(&devPitchedPtr, extent); hipLaunchKernelGGL(( array3D), dim3(100), dim3(512), 0, 0, devPitchedPtr, width, height, depth); size_t pitch; hipMallocPitch(&devPtr, &pitch, width * sizeof(float), height); hipLaunchKernelGGL(( array2D), dim3(100), dim3(512), 0, 0, devPtr, pitch, width, height); }
ecef97c0de88f530028c21872bc0725b69822807.cu
// Device code __global__ void array3D(cudaPitchedPtr devPitchedPtr, int width, int height, int depth) { char* devPtr = devPitchedPtr.ptr; size_t pitch = devPitchedPtr.pitch; size_t slicePitch = pitch * height; for (int z = 0; z < depth; ++z) { char* slice = devPtr + z * slicePitch; for (int y = 0; y < height; ++y) { float* row = (float*)(slice + y * pitch); for (int x = 0; x < width; ++x) { float element = row[x]; } } } } // Device code __global__ void array2D(float* devPtr, size_t pitch, int width, int height) { for (int r = 0; r < height; ++r) { float* row = (float*)((char*)devPtr + r * pitch); for (int c = 0; c < width; ++c) { float element = row[c]; } } } int main(){ // Host code int width = 64, height = 64, depth = 64; cudaExtent extent = make_cudaExtent(width * sizeof(float), height, depth); cudaPitchedPtr devPitchedPtr; cudaMalloc3D(&devPitchedPtr, extent); array3D<<<100, 512>>>(devPitchedPtr, width, height, depth); size_t pitch; cudaMallocPitch(&devPtr, &pitch, width * sizeof(float), height); array2D<<<100, 512>>>(devPtr, pitch, width, height); }
71d96de7bc179e9316991a73f954e030afa45c81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions normal d */ #include "common_magma.h" #include "commonblas_d.h" static __device__ void daxpy(double a,double *b, double *c) { c[0] += a * b[0]; c[1] += a * b[1]; c[2] += a * b[2]; c[3] += a * b[3]; c[4] += a * b[4]; c[5] += a * b[5]; c[6] += a * b[6]; c[7] += a * b[7]; c[8] += a * b[8]; c[9] += a * b[9]; c[10] += a * b[10]; c[11] += a * b[11]; c[12] += a * b[12]; c[13] += a * b[13]; c[14] += a * b[14]; c[15] += a * b[15]; } __global__ void dgemm_kernel_N_N_64_16_16_16_4_special(double *C, const double *A, const double *B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta) { /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 Purpose: ======== This routine computes C = alpha* A*B + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4 This kernel is for matrices devisible by the corresponding blocking sizes. =============================================================== */ const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y *16; const int idt = ty * 16 + tx; B+=tx+__mul24(iby+ty,ldb); A += ibx + idt; C += ibx +idt +__mul24( iby,ldc); const double *Bend = B + k; double Cb[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; m = 2*lda ; n = 3*lda ; do { //double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; double Ab[4] = {A[0], A[lda], A[m], A[n]}; __shared__ double Bb[16][17]; Bb[tx][ty+0] = B[0]; Bb[tx][ty+4] = B[4*ldb]; Bb[tx][ty+8] = B[8*ldb]; Bb[tx][ty+12] = B[12*ldb]; __syncthreads(); A += 4 * lda; daxpy(Ab[0], &Bb[0][0], Cb); Ab[0] = A[0]; daxpy(Ab[1], &Bb[1][0], Cb); Ab[1] = A[lda]; daxpy(Ab[2], &Bb[2][0], Cb); Ab[2] = A[m]; daxpy(Ab[3], &Bb[3][0], Cb); Ab[3] = A[n]; A += 4 * lda; daxpy(Ab[0], &Bb[4][0], Cb); Ab[0] = A[0]; daxpy(Ab[1], &Bb[5][0], Cb); Ab[1] = A[lda]; daxpy(Ab[2], &Bb[6][0], Cb); Ab[2] = A[m]; daxpy(Ab[3], &Bb[7][0], Cb); Ab[3] = A[n]; A += 4 * lda; daxpy(Ab[0], &Bb[8][0], Cb); Ab[0] = A[0]; daxpy(Ab[1], &Bb[9][0], Cb); Ab[1] = A[lda]; daxpy(Ab[2], &Bb[10][0], Cb); Ab[2] = A[m]; daxpy(Ab[3], &Bb[11][0], Cb); Ab[3] = A[n]; A += 4 * lda; daxpy(Ab[0], &Bb[12][0], Cb); daxpy(Ab[1], &Bb[13][0], Cb); daxpy(Ab[2], &Bb[14][0], Cb); daxpy(Ab[3], &Bb[15][0], Cb); B += 16; __syncthreads(); } while (B < Bend); #pragma unroll 16 for (int i = 0; i < 16; i++, C += ldc) { C[0] =alpha*Cb[i] + beta * C[0]; } } extern "C" void magmablas_dgemm_kernel_N_N_64_16_16_16_4_special(double *C, const double *A, const double *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, double alpha, double beta) { dim3 threads( 16, 4 ); dim3 grid(m/64,n/16); hipLaunchKernelGGL(( dgemm_kernel_N_N_64_16_16_16_4_special), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb, ldc, alpha, beta); }
71d96de7bc179e9316991a73f954e030afa45c81.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions normal d */ #include "common_magma.h" #include "commonblas_d.h" static __device__ void daxpy(double a,double *b, double *c) { c[0] += a * b[0]; c[1] += a * b[1]; c[2] += a * b[2]; c[3] += a * b[3]; c[4] += a * b[4]; c[5] += a * b[5]; c[6] += a * b[6]; c[7] += a * b[7]; c[8] += a * b[8]; c[9] += a * b[9]; c[10] += a * b[10]; c[11] += a * b[11]; c[12] += a * b[12]; c[13] += a * b[13]; c[14] += a * b[14]; c[15] += a * b[15]; } __global__ void dgemm_kernel_N_N_64_16_16_16_4_special(double *C, const double *A, const double *B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta) { /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 Purpose: ======== This routine computes C = alpha* A*B + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4 This kernel is for matrices devisible by the corresponding blocking sizes. =============================================================== */ const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y *16; const int idt = ty * 16 + tx; B+=tx+__mul24(iby+ty,ldb); A += ibx + idt; C += ibx +idt +__mul24( iby,ldc); const double *Bend = B + k; double Cb[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; m = 2*lda ; n = 3*lda ; do { //double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; double Ab[4] = {A[0], A[lda], A[m], A[n]}; __shared__ double Bb[16][17]; Bb[tx][ty+0] = B[0]; Bb[tx][ty+4] = B[4*ldb]; Bb[tx][ty+8] = B[8*ldb]; Bb[tx][ty+12] = B[12*ldb]; __syncthreads(); A += 4 * lda; daxpy(Ab[0], &Bb[0][0], Cb); Ab[0] = A[0]; daxpy(Ab[1], &Bb[1][0], Cb); Ab[1] = A[lda]; daxpy(Ab[2], &Bb[2][0], Cb); Ab[2] = A[m]; daxpy(Ab[3], &Bb[3][0], Cb); Ab[3] = A[n]; A += 4 * lda; daxpy(Ab[0], &Bb[4][0], Cb); Ab[0] = A[0]; daxpy(Ab[1], &Bb[5][0], Cb); Ab[1] = A[lda]; daxpy(Ab[2], &Bb[6][0], Cb); Ab[2] = A[m]; daxpy(Ab[3], &Bb[7][0], Cb); Ab[3] = A[n]; A += 4 * lda; daxpy(Ab[0], &Bb[8][0], Cb); Ab[0] = A[0]; daxpy(Ab[1], &Bb[9][0], Cb); Ab[1] = A[lda]; daxpy(Ab[2], &Bb[10][0], Cb); Ab[2] = A[m]; daxpy(Ab[3], &Bb[11][0], Cb); Ab[3] = A[n]; A += 4 * lda; daxpy(Ab[0], &Bb[12][0], Cb); daxpy(Ab[1], &Bb[13][0], Cb); daxpy(Ab[2], &Bb[14][0], Cb); daxpy(Ab[3], &Bb[15][0], Cb); B += 16; __syncthreads(); } while (B < Bend); #pragma unroll 16 for (int i = 0; i < 16; i++, C += ldc) { C[0] =alpha*Cb[i] + beta * C[0]; } } extern "C" void magmablas_dgemm_kernel_N_N_64_16_16_16_4_special(double *C, const double *A, const double *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, double alpha, double beta) { dim3 threads( 16, 4 ); dim3 grid(m/64,n/16); dgemm_kernel_N_N_64_16_16_16_4_special<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb, ldc, alpha, beta); }
4035f8746532c2bed7923fb12e057d277b5d4edd.hip
// !!! This is a file automatically generated by hipify!!! // ** Original codelet code ** // // #pragma hmppcg cpiparam __arg0 IN a%hmpp_codelet__runMvt: (1, 0) // #pragma hmppcg cpiparam __arg1 INOUT x1%hmpp_codelet__runMvt: (1, 1) // #pragma hmppcg cpiparam __arg2 INOUT x2%hmpp_codelet__runMvt: (1, 2) // #pragma hmppcg cpiparam __arg3 IN y1%hmpp_codelet__runMvt: (1, 3) // #pragma hmppcg cpiparam __arg4 IN y2%hmpp_codelet__runMvt: (1, 4) // // #pragma hmppcg cpicall hmpp_codelet__runMvt(__arg0, __arg1, __arg2, __arg3, __arg4): 1 // // // /* begin of extracted source code for directive set "mvt" */ // // // # 25 "mvt.c" // typedef float DATA_TYPE; // // // # 30 "mvt.c" // void hmpp_codelet__runMvt(DATA_TYPE a[4096][4096], DATA_TYPE x1[4096], DATA_TYPE x2[4096], DATA_TYPE y1[4096], DATA_TYPE y2[4096]) // { // int i, j; // // #pragma hmppcg grid blocksize 32 X 8 // # 9 "<preprocessor>" // # 36 "mvt.c" // #pragma hmppcg tile i:2 // # 12 "<preprocessor>" // # 37 "mvt.c" // for (i = 0 ; i < 4096 ; i++) // { // #pragma hmppcg tile j:2 // # 17 "<preprocessor>" // # 40 "mvt.c" // for (j = 0 ; j < 4096 ; j++) // { // x1[i] = x1[i] + a[i][j] * y1[j]; // } // } // // #pragma hmppcg grid blocksize 32 X 8 // # 26 "<preprocessor>" // # 48 "mvt.c" // for (i = 0 ; i < 4096 ; i++) // { // #pragma hmppcg tile j:2 // # 31 "<preprocessor>" // # 51 "mvt.c" // for (j = 0 ; j < 4096 ; j++) // { // x2[i] = x2[i] + a[j][i] * y2[j]; // } // } // } // // // /* end of extracted source code for directive set "mvt" */ // // // // ** End of original codelet codelet ** #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _MSC_VER # define HMPPCG_RESTRICT typedef __int8 int8_t; typedef unsigned __int8 uint8_t; typedef __int16 int16_t; typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; # ifdef _WIN64 typedef int64_t intptr_t; # else typedef int32_t intptr_t; # endif #else # if defined(__GNUC__) || defined(__RESTRICT) # define HMPPCG_RESTRICT __restrict # else # define HMPPCG_RESTRICT # endif # include <stdint.h> #endif // Dynamic array typedef struct __hmppcg_array_struct { void *array; size_t *size; size_t elsize; } __hmppcg_array_t; // Data section typedef struct __hmppcg_DataSection { size_t from; size_t to; size_t step; } __hmppcg_DataSection; #include <hip/hip_runtime.h> #if CUDART_VERSION < 2000 #error Bad CUDA Runtime version. CUDA Toolkit 2.0+ required. #endif #define HMPP_CONSTMEM_OFFSET 0 #include <map> #include <string> // ---------------------------------------------------------------------------- // HMPP CUDA support classes // ---------------------------------------------------------------------------- #ifndef __HMPP_CUDADATA_H__ #define __HMPP_CUDADATA_H__ #ifndef HMPPCG_WARP_SIZE #define HMPPCG_WARP_SIZE 32 #endif enum CopyKind { HostToHost = 0, HostToDevice = 1, DeviceToHost = 2, DeviceToDevice = 3, }; inline int hmppcg_check_status(const char *file,int line,hipError_t status) { if(status != hipSuccess) { fprintf(stderr, "%s:%d CUDA Error: %s\n", file, line, hipGetErrorString(status)); return -1; } return 0; } #define CHECK_STATUS(X) hmppcg_check_status(__FILE__,__LINE__,(X)) #define HMPP_CHECK_GRID_BOUNDARY(x) \ if(x>65535){\ fprintf(stderr, "%s:%d Grid Dimension Error: '%s' exceeds the 65535 dimension limit. Please modify the grid size configuration (see the hmppcg grid blocksize pragma) or switch to 2D gridification\n", __FILE__,__LINE__, #x);\ exit(-1) ;\ } #define HMPP_CHECK_BLOCK_BOUNDARY(x) \ if(x > devProp.maxThreadsPerBlock){ \ fprintf(stderr, "%s:%d Number of threads per block exceeds for the HWA: it is '%d' and HWA supports up to '%d'. Please modify the block size configuration (see the hmppcg grid blocksize pragma)\n", __FILE__,__LINE__, x, devProp.maxThreadsPerBlock); \ exit(-1) ; \ } // ---------------------------------------------------------------------------- // class DefaultPolicy // ---------------------------------------------------------------------------- struct DefaultPolicy { public: DefaultPolicy() { } virtual ~DefaultPolicy() { } int deviceAlloc(void **ptr,size_t size) { if( CHECK_STATUS(hipStreamCreate(&stream_)) != 0 ) return -1; if( CHECK_STATUS(hipMalloc(ptr,size)) != 0 ) return -1; #if TORCH_HIP_VERSION >= 3020 if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventDisableTiming | hipEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventBlockingSync)) != 0) return -1; #endif return 0; } int deviceFree(void *ptr) { if( CHECK_STATUS(hipStreamDestroy(stream_)) != 0) return -1; if( CHECK_STATUS(hipFree(ptr)) != 0) return -1; if( CHECK_STATUS(hipEventDestroy(event)) != 0) return -1; return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { static hipMemcpyKind cudaKind[] = {hipMemcpyHostToHost, hipMemcpyHostToDevice, hipMemcpyDeviceToHost, hipMemcpyDeviceToDevice }; if(async) { return CHECK_STATUS(hipMemcpyAsync(dst,src,size,cudaKind[kind],stream_)); } else { return CHECK_STATUS(hipMemcpy(dst,src,size,cudaKind[kind])); } } int makeStreamWait(hipStream_t wstream) { int status; status = CHECK_STATUS(hipEventRecord(event, stream_)); if (status != 0) return status; #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(hipEventSynchronize(event)); #endif } int waitOnEvent(hipEvent_t wevent) { #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(stream_, wevent, 0)); #else return CHECK_STATUS(hipEventSynchronize(wevent)); #endif } int deviceWait() { return CHECK_STATUS(hipStreamSynchronize(stream_)); } private: hipStream_t stream_; hipEvent_t event; }; // ---------------------------------------------------------------------------- // class ConstantPolicy // ---------------------------------------------------------------------------- #ifndef HMPP_CONSTMEM_SIZE #define HMPP_CONSTMEM_SIZE 2048 #endif __constant__ int64_t hmpp_constmem[HMPP_CONSTMEM_SIZE / 8]; /// Shared memory array is aligned on 64 bit thanks to that (to avoid an nvcc compilation error) extern __shared__ int64_t hmpp_sharedmem[]; struct ConstantPolicy { public: ConstantPolicy() { static bool initialized = false; if(!initialized) { next_offset_ = HMPP_CONSTMEM_OFFSET; initialized = true; } offset_ = -1; } virtual ~ConstantPolicy() { } void setStaticOffset(int offset) { offset_ = offset; while(offset_ % 8) offset_ ++; } int deviceAlloc(void **ptr, size_t size) { #if TORCH_HIP_VERSION >= 3020 if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventDisableTiming | hipEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventBlockingSync)) != 0) return -1; #endif if(offset_ != -1) { if((offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)offset_; return 0; } if((next_offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)next_offset_; next_offset_ += size; return 0; } int deviceFree(void *ptr) { return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { size_t offset; switch(kind) { case HostToDevice: offset = (size_t)dst; return CHECK_STATUS(hipMemcpyToSymbol(hmpp_constmem,src,size,offset,hipMemcpyHostToDevice)); case DeviceToHost: offset = (size_t)src; return CHECK_STATUS(hipMemcpyFromSymbol(dst,hmpp_constmem,size,offset,hipMemcpyDeviceToHost)); default: return -1; } } int makeStreamWait(hipStream_t wstream) { int status; /* stream 0 at the moment */ status = CHECK_STATUS(hipEventRecord(event, 0)); if (status != 0) return status; #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(hipEventSynchronize(event)); #endif } int waitOnEvent(hipEvent_t wevent) { /* stream 0 at the moment */ #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(0, wevent, 0)); #else return CHECK_STATUS(hipEventSynchronize(wevent)); #endif } int deviceWait() { return 0; } private: static size_t next_offset_; int offset_; hipEvent_t event; }; size_t ConstantPolicy::next_offset_; // ---------------------------------------------------------------------------- // class Lazy // ---------------------------------------------------------------------------- template <typename Policy> struct Lazy { char * value; bool valid; bool allocated; void ** devaddr; Policy * policy; size_t size; Lazy(size_t elem_size) { value = new char[elem_size]; } ~Lazy() { delete[] value; } int requireDeviceAlloc() { if(!allocated) { allocated = true; return policy->deviceAlloc(devaddr,size); } else { return 0; } } }; // ---------------------------------------------------------------------------- // class Element // ---------------------------------------------------------------------------- template <typename T,typename Policy> struct Element { Element(void * const * device_addr, size_t offset, Policy *policy, Lazy<Policy> * lazy) : device_addr_(device_addr) , offset_(offset), policy_(policy), lazy_(lazy) { } Element &operator=(const T & value) { if(lazy_) { *((T *)(lazy_->value)) = value; lazy_->valid = true; return *this; } if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,(const char*)&value,ElemSize,HostToDevice,false); return *this; } Element &operator=(const Element & src) { if(src.lazy_ && src.lazy_->valid) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)(src.lazy_->value)); return *this; } if(lazy_) lazy_->requireDeviceAlloc(); if(src.lazy_) src.lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,((const char*)(*src.device_addr_)) + src.offset_, ElemSize,DeviceToDevice,false); if(lazy_) { lazy_->valid = false; } return *this; } operator T() { if(lazy_ && lazy_->valid) return *((T *)(lazy_->value)); T res; if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(&res,((const char*)(*device_addr_)) + offset_,ElemSize,DeviceToHost,false); if(lazy_) { *((T *)(lazy_->value)) = res; lazy_->valid = true; } return res; } typedef T Type; enum { ElemSize = sizeof(T) }; private: size_t offset_; void *const* device_addr_; Policy *policy_; public: Lazy<Policy> * lazy_; }; enum DataFlags { DEFAULT = 0x0, LAZY = 0x1 }; // ---------------------------------------------------------------------------- // class Data // ---------------------------------------------------------------------------- template <typename T,typename Policy> class Data { public: typedef T Type; typedef Element<T,Policy> ElementType; enum { ElemSize = sizeof(T) }; Data(const char * name, unsigned int flags = DEFAULT) : name_(name), flags_(flags), dim_(0), sizes_(0), size_(0), host_addr_(0), device_addr_(0) { policy_ = new Policy; if(flags_ & LAZY) { lazy_ = new Lazy<Policy>(ElemSize); lazy_->valid = false; lazy_->devaddr = 0; lazy_->policy = policy_; } else lazy_ = 0; } ~Data() { free(); delete policy_; if(lazy_) delete lazy_; } int allocate(unsigned int dim, size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { const size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return allocate2(dim,sizes); } int allocate3(unsigned int dim_p, const size_t * sizes_p) { size_t sizes[2]; sizes[0] = 1; sizes[1] = 0; for(int d = 0 ; d < dim_p ; d++) { sizes[0] *= sizes_p[d]; } return allocate2(1, sizes); } int allocate2(unsigned int dim, const size_t * sizes) { dim_ = dim; sizes_ = new size_t[dim]; dimSizes_ = new size_t[dim]; size_ = ElemSize; for(int d=0;d<dim;d++) { sizes_[d] = sizes[d]; size_ *= sizes_[d]; size_t size = 1; for(int d2=d+1;d2<dim;d2++) size*=sizes[d2]; dimSizes_[d] = size; } if(lazy_) { lazy_->allocated = false; lazy_->devaddr = &device_addr_; lazy_->size = size_; return 0; } else return policy_->deviceAlloc(&device_addr_,size_); } int free() { if(sizes_) { delete [] sizes_; delete [] dimSizes_; sizes_ = 0; dim_ = 0; size_ = 0; } if(device_addr_) { if(policy_->deviceFree(device_addr_) != 0) return -1; device_addr_ = 0; } return 0; } int download(void * host_addr,bool async) { if(lazy_ && lazy_->valid) { *((T *)host_addr) = *((T *)(lazy_->value)); return 0; } if(lazy_) { lazy_->requireDeviceAlloc(); } int sts = policy_->deviceMemcpy(host_addr,device_addr_,size_,DeviceToHost,async); if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)host_addr); } return sts; } int upload(const void * host_addr,bool async) { if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = * ((T *)host_addr); lazy_->requireDeviceAlloc(); } return policy_->deviceMemcpy(device_addr_,host_addr,size_,HostToDevice,async); } int downloadSection(void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(host_addr,device_addr_,sections,DeviceToHost,async); } int uploadSection(const void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(device_addr_,host_addr,sections,HostToDevice,async); } int makeStreamWait(hipStream_t wstream) { if(lazy_) lazy_->requireDeviceAlloc(); return policy_->makeStreamWait(wstream); } int waitOnEvent(hipEvent_t wevent) { return policy_->waitOnEvent(wevent); } int waitTransfer() { return policy_->deviceWait(); } ElementType operator()(size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return at(sizes); } ElementType at(size_t *idx) { size_t offset = idx[0]; return ElementType(&device_addr_,offset*ElemSize,policy_,lazy_); } template <typename Y> Element<Y,Policy> at(size_t offset) { return Element<Y,Policy>(&device_addr_,offset,policy_,lazy_); } ElementType operator=(const T & value) { ElementType res(&device_addr_,0,policy_,lazy_); res = value; return res; } ElementType operator=(const Data &data) { return operator=(data.value()); } T value() const { ElementType res(&device_addr_,0,policy_,lazy_); return (T)res; } operator T() { return value(); } T *getDeviceAddr() { if(lazy_) lazy_->requireDeviceAlloc(); if(lazy_ && lazy_->valid) { policy_->deviceMemcpy(device_addr_,lazy_->value,size_,HostToDevice,false); } return (T*)device_addr_; } void invalidateLazy() { if(lazy_) { lazy_->valid = false; } } private: Data(const Data &data) {} int sectionCopy(char *dst,const char *src,int offset,int cur, const __hmppcg_DataSection *sections,int lastdense,CopyKind kind,bool async) { int d; int size = 1; for(d=cur+1;d<dim_;d++) size *= sizes_[d]; if(cur<(lastdense-1)) { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=sections[cur].step) if(sectionCopy(dst,src,offset+x*size,cur+1,sections,lastdense,kind,async) != 0) return -1; } else { int step = sections[cur].step; if(step == 1) { int start = (offset + sections[cur].from * size) * ElemSize; int total = (sections[cur].to - sections[cur].from + 1) * size * ElemSize; return policy_->deviceMemcpy(dst+start,src+start,total,kind,async); } else { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=step) { int off = (offset + x * size) * ElemSize; if(policy_->deviceMemcpy(dst+off,src+off,size * ElemSize,kind,async) != 0) return -1; } } } return 0; } int sectionCopy(void *dst,const void *src, const __hmppcg_DataSection *sections,CopyKind kind,bool async) { int i; int lastdense = dim_; for (i = dim_ - 1 ; i >= 0 ; i --) { if ((sections[i].from == 0) && (sections[i].to == sizes_[i] - 1) && (sections[i].step == 1)) lastdense = i; else break; } return sectionCopy((char*)dst,(const char*)src,0,0,sections,lastdense,kind,async); } const char * name_; size_t flags_; void *device_addr_; void *host_addr_; size_t dim_; size_t *sizes_; size_t *dimSizes_; size_t size_; Lazy<Policy> * lazy_; public: Policy *policy_; }; // --------------------------------------------------------------------------- // User data // --------------------------------------------------------------------------- class UserData{ public: virtual ~UserData(){} UserData(){} }; #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef float2 __hmppcg_complex_float; #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef double2 __hmppcg_complex_double; // --------------------------------------------------------------------------- // Allocatable Arrays // --------------------------------------------------------------------------- template <const size_t nb_dims> struct AArrayDesc { int lbounds_[nb_dims]; size_t sizes_[nb_dims]; size_t wholesize_; }; #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE( var, type, nb_dims, ... ) \ { int alloc_ranges[] = { __VA_ARGS__ }; \ int hmppcg_alloc_i; \ var ## _aarray_desc.wholesize_ = 1; \ for(hmppcg_alloc_i=0; hmppcg_alloc_i<nb_dims; hmppcg_alloc_i++){ \ int hmppcg_alloc_first = alloc_ranges[2*hmppcg_alloc_i]; \ int hmppcg_alloc_last = alloc_ranges[2*hmppcg_alloc_i + 1]; \ int hmppcg_alloc_size = hmppcg_alloc_last - hmppcg_alloc_first + 1; \ var ## _aarray_desc.lbounds_[hmppcg_alloc_i] = hmppcg_alloc_first; \ var ## _aarray_desc.sizes_[hmppcg_alloc_i] = hmppcg_alloc_size; \ var ## _aarray_desc.wholesize_ *= hmppcg_alloc_size; \ } \ if((hmppcg_status_ = var.allocate2(nb_dims, var ## _aarray_desc.sizes_))) \ return; \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE( var ) \ { \ var.free(); \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED( var ) \ (var.getDeviceAddr() != NULL) #endif //__HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #ifndef __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #define __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE( var ) \ var ## _aarray_desc.wholesize_ #endif //__HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_SIZE #define __HMPPCG_ALLOCATABLE_ARRAY_SIZE( var, d ) \ var ## _aarray_desc.sizes_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_SIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_LBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_LBOUND( var, d ) \ var ## _aarray_desc.lbounds_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_LBOUND #ifndef __HMPPCG_ALLOCATABLE_ARRAY_UBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_UBOUND( var, d ) \ (var ## _aarray_desc.sizes_[d] + var ## _aarray_desc.lbounds_[d] - 1) #endif //__HMPPCG_ALLOCATABLE_ARRAY_UBOUND #ifndef __HMPP_INT_POW_FUNC #define __HMPP_INT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ if(exp < 0) \ return 0; \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_INT_POW_FUNC( i64, int64_t ); __HMPP_INT_POW_FUNC( i32, int32_t ); __HMPP_INT_POW_FUNC( i16, int16_t ); __HMPP_INT_POW_FUNC( i8, int8_t ); #ifndef __HMPP_UINT_POW_FUNC #define __HMPP_UINT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_UINT_POW_FUNC( ui64, uint64_t ); __HMPP_UINT_POW_FUNC( ui32, uint32_t ); __HMPP_UINT_POW_FUNC( ui16, uint16_t ); __HMPP_UINT_POW_FUNC( ui8, uint8_t ); #endif // __HMPP_CUDADATA_H__ #ifndef __HMPPCG_COMPLEX_DOUBLE_DEFINED #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef struct { double x; double y; }__hmppcg_complex_double; #endif /* __HMPPCG_COMPLEX_DOUBLE_DEFINED */ #ifndef __HMPPCG_COMPLEX_FLOAT_DEFINED #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef struct { float x; float y; }__hmppcg_complex_float; #endif /* __HMPPCG_COMPLEX_FLOAT_DEFINED */ template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__runMvt_loop0_( float * HMPPCG_RESTRICT a, float * HMPPCG_RESTRICT x1, float * HMPPCG_RESTRICT y1) { int32_t i_3; int32_t outer_i_2; i_3 = (blockDimX__ * blockIdx.x + threadIdx.x); outer_i_2 = (blockDimY__ * blockIdx.y + threadIdx.y); bool __hmppcg_guard = (!((i_3 <= 1) & (outer_i_2 <= 2047))); if(__hmppcg_guard) { goto __hmppcg_label1; }; { int32_t __hmppcg_end, outer_j_2; for (outer_j_2 = 0, __hmppcg_end = 2047; outer_j_2 <= __hmppcg_end; outer_j_2 += 1) { { int32_t __hmppcg_end, j_3; for (j_3 = 0, __hmppcg_end = 1; j_3 <= __hmppcg_end; j_3 += 1) { x1[i_3 + ((int32_t) (outer_i_2 * 2))] = (x1[i_3 + ((int32_t) (outer_i_2 * 2))]) + ((a[((i_3 + ((int32_t) (outer_i_2 * 2))) * 4096) + (j_3 + ((int32_t) (outer_j_2 * 2)))]) * (y1[j_3 + ((int32_t) (outer_j_2 * 2))])); } } } } __hmppcg_label1:; } template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__runMvt_loop1_( float * HMPPCG_RESTRICT a, float * HMPPCG_RESTRICT x2, float * HMPPCG_RESTRICT y2) { int32_t i_2; i_2 = (blockDimX__ * blockDimY__ * blockIdx.x + threadIdx.y * blockDimX__ + threadIdx.x); bool __hmppcg_guard = (!(i_2 <= 4095)); if(__hmppcg_guard) { goto __hmppcg_label3; }; { int32_t __hmppcg_end, outer_j_4; for (outer_j_4 = 0, __hmppcg_end = 2047; outer_j_4 <= __hmppcg_end; outer_j_4 += 1) { { int32_t __hmppcg_end, j_4; for (j_4 = 0, __hmppcg_end = 1; j_4 <= __hmppcg_end; j_4 += 1) { x2[i_2] = (x2[i_2]) + ((a[((j_4 + ((int32_t) (outer_j_4 * 2))) * 4096) + i_2]) * (y2[j_4 + ((int32_t) (outer_j_4 * 2))])); } } } } __hmppcg_label3:; } void hmpp_codelet__runMvt( int &hmppcg_status_, void * __h, const hipDeviceProp_t &devProp, hipStream_t kernel_stream, hipEvent_t kernel_event, Data<float,DefaultPolicy> & a, Data<float,DefaultPolicy> & x1, Data<float,DefaultPolicy> & x2, Data<float,DefaultPolicy> & y1, Data<float,DefaultPolicy> & y2) { if(1LL) { unsigned int gridDimX__ = 1LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 512LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 4LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if TORCH_HIP_VERSION >= 3020 a.makeStreamWait(kernel_stream); x1.makeStreamWait(kernel_stream); y1.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hipLaunchKernelGGL(( hmpp_codelet__runMvt_loop0_<blockDimX__, blockDimY__>), dim3(dim_grid), dim3(dim_block), 0LL, kernel_stream, a.getDeviceAddr(), x1.getDeviceAddr(), y1.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(hipGetLastError()))) return; #if TORCH_HIP_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(hipEventRecord(kernel_event, kernel_stream)))) return; a.waitOnEvent(kernel_event); x1.waitOnEvent(kernel_event); y1.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif }; if(1LL) { unsigned int gridDimX__ = 16LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 1LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 8LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if TORCH_HIP_VERSION >= 3020 a.makeStreamWait(kernel_stream); x2.makeStreamWait(kernel_stream); y2.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hipLaunchKernelGGL(( hmpp_codelet__runMvt_loop1_<blockDimX__, blockDimY__>), dim3(dim_grid), dim3(dim_block), 0LL, kernel_stream, a.getDeviceAddr(), x2.getDeviceAddr(), y2.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(hipGetLastError()))) return; #if TORCH_HIP_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(hipEventRecord(kernel_event, kernel_stream)))) return; a.waitOnEvent(kernel_event); x2.waitOnEvent(kernel_event); y2.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif }; } // HMPP_API #ifdef __cplusplus #define HMPP_EXTERN extern "C" #else #define HMPP_EXTERN #endif #ifdef _WIN32 #define HMPP_EXPORT __declspec(dllexport) #define HMPP_INLINE __inline #else #define HMPP_EXPORT #define HMPP_INLINE inline #endif #define HMPP_API HMPP_EXTERN HMPP_EXPORT // HMPPCG_POP_HASH #define HMPPCG_POP_HASH(major,minor) (((major)<<16)|(minor)) // --------------------------------------------------------------------------- // HMPP handle // --------------------------------------------------------------------------- typedef struct hmpp_handle_struct { Data<float,DefaultPolicy> * __arg0; Data<float,DefaultPolicy> * __arg1; Data<float,DefaultPolicy> * __arg2; Data<float,DefaultPolicy> * __arg3; Data<float,DefaultPolicy> * __arg4; hipDeviceProp_t devProp; hipStream_t kernel_stream; hipEvent_t kernel_event; std::map<std::string,UserData*> map_user_data; } hmpp_handle_t; // --------------------------------------------------------------------------- // hmpp_createInstance() // --------------------------------------------------------------------------- HMPP_API hmpp_handle_t * hmpp_createInstance() { hmpp_handle_t * __h = new hmpp_handle_t; if(!__h) return 0; if(CHECK_STATUS(hipStreamCreate(&__h->kernel_stream)) != 0) return NULL; #if TORCH_HIP_VERSION >= 3020 if(CHECK_STATUS(hipEventCreateWithFlags(&__h->kernel_event, hipEventDisableTiming | hipEventBlockingSync)) != 0) return NULL; #else if(CHECK_STATUS(hipEventCreateWithFlags(&__h->kernel_event, hipEventBlockingSync)) != 0) return NULL; #endif __h->__arg0 = NULL; __h->__arg1 = NULL; __h->__arg2 = NULL; __h->__arg3 = NULL; __h->__arg4 = NULL; int device; hipGetDevice(&device); hipGetDeviceProperties(&(__h->devProp), device); return __h; } // --------------------------------------------------------------------------- // hmpp_freeInstance() // --------------------------------------------------------------------------- HMPP_API int hmpp_freeInstance(hmpp_handle_t * __h) { delete __h->__arg0; delete __h->__arg1; delete __h->__arg2; delete __h->__arg3; delete __h->__arg4; hipStreamDestroy(__h->kernel_stream); hipEventDestroy(__h->kernel_event); __h->kernel_stream = 0; for(std::map<std::string,UserData*>::const_iterator it = __h->map_user_data.begin(); it != __h->map_user_data.end(); it++) { delete it->second; } delete(__h); return 0; } // --------------------------------------------------------------------------- // hmpp_allocateOnHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_allocateOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { __h->__arg0 = new Data<float,DefaultPolicy>("__arg0", DEFAULT); return __h->__arg0->allocate2(dim, size); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { __h->__arg1 = new Data<float,DefaultPolicy>("__arg1", DEFAULT); return __h->__arg1->allocate2(dim, size); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { __h->__arg2 = new Data<float,DefaultPolicy>("__arg2", DEFAULT); return __h->__arg2->allocate2(dim, size); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { __h->__arg3 = new Data<float,DefaultPolicy>("__arg3", DEFAULT); return __h->__arg3->allocate2(dim, size); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { __h->__arg4 = new Data<float,DefaultPolicy>("__arg4", DEFAULT); return __h->__arg4->allocate2(dim, size); } default: return -1; } } HMPP_API int hmpp_allocateOutputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInOutOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } // --------------------------------------------------------------------------- // hmpp_readDataFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->download(data,async!=0); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->download(data,async!=0); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->download(data,async!=0); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->download(data,async!=0); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->download(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->upload(data,async!=0); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->upload(data,async!=0); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->upload(data,async!=0); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->upload(data,async!=0); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->upload(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_readDataSectionFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataSectionFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->downloadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataSectionToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataSectionToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->uploadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForWriteTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForWriteTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->waitTransfer(); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->waitTransfer(); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForReadTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForReadTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->waitTransfer(); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->waitTransfer(); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_codeletsAreReentrant() // --------------------------------------------------------------------------- HMPP_API int hmpp_codeletsAreReentrant() { return 0; } // --------------------------------------------------------------------------- // hmpp_start() // --------------------------------------------------------------------------- HMPP_API int hmpp_start(hmpp_handle_t * __h, int __id, int __async) { int status = 0; switch(__id) { case 1: // hmpp_codelet__runMvt(__arg0,__arg1,__arg2,__arg3,__arg4) hmpp_codelet__runMvt(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg0), (*__h->__arg1), (*__h->__arg2), (*__h->__arg3), (*__h->__arg4)); return status; } return -1; } // --------------------------------------------------------------------------- // hmpp_wait() // --------------------------------------------------------------------------- HMPP_API int hmpp_wait(hmpp_handle_t * __h,int codelet_id) { return CHECK_STATUS(hipStreamSynchronize(__h->kernel_stream)); } // --------------------------------------------------------------------------- // hmpp_version() // --------------------------------------------------------------------------- HMPP_API int hmpp_version() { #ifndef HMPP_RUNTIME_TARGET_VERSION #define HMPP_RUNTIME_TARGET_VERSION(major,minor)((major << 16) | (minor << 8)) #endif return HMPP_RUNTIME_TARGET_VERSION(2,5); } //
4035f8746532c2bed7923fb12e057d277b5d4edd.cu
// ** Original codelet code ** // // #pragma hmppcg cpiparam __arg0 IN a%hmpp_codelet__runMvt: (1, 0) // #pragma hmppcg cpiparam __arg1 INOUT x1%hmpp_codelet__runMvt: (1, 1) // #pragma hmppcg cpiparam __arg2 INOUT x2%hmpp_codelet__runMvt: (1, 2) // #pragma hmppcg cpiparam __arg3 IN y1%hmpp_codelet__runMvt: (1, 3) // #pragma hmppcg cpiparam __arg4 IN y2%hmpp_codelet__runMvt: (1, 4) // // #pragma hmppcg cpicall hmpp_codelet__runMvt(__arg0, __arg1, __arg2, __arg3, __arg4): 1 // // // /* begin of extracted source code for directive set "mvt" */ // // // # 25 "mvt.c" // typedef float DATA_TYPE; // // // # 30 "mvt.c" // void hmpp_codelet__runMvt(DATA_TYPE a[4096][4096], DATA_TYPE x1[4096], DATA_TYPE x2[4096], DATA_TYPE y1[4096], DATA_TYPE y2[4096]) // { // int i, j; // // #pragma hmppcg grid blocksize 32 X 8 // # 9 "<preprocessor>" // # 36 "mvt.c" // #pragma hmppcg tile i:2 // # 12 "<preprocessor>" // # 37 "mvt.c" // for (i = 0 ; i < 4096 ; i++) // { // #pragma hmppcg tile j:2 // # 17 "<preprocessor>" // # 40 "mvt.c" // for (j = 0 ; j < 4096 ; j++) // { // x1[i] = x1[i] + a[i][j] * y1[j]; // } // } // // #pragma hmppcg grid blocksize 32 X 8 // # 26 "<preprocessor>" // # 48 "mvt.c" // for (i = 0 ; i < 4096 ; i++) // { // #pragma hmppcg tile j:2 // # 31 "<preprocessor>" // # 51 "mvt.c" // for (j = 0 ; j < 4096 ; j++) // { // x2[i] = x2[i] + a[j][i] * y2[j]; // } // } // } // // // /* end of extracted source code for directive set "mvt" */ // // // // ** End of original codelet codelet ** #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _MSC_VER # define HMPPCG_RESTRICT typedef __int8 int8_t; typedef unsigned __int8 uint8_t; typedef __int16 int16_t; typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; # ifdef _WIN64 typedef int64_t intptr_t; # else typedef int32_t intptr_t; # endif #else # if defined(__GNUC__) || defined(__RESTRICT) # define HMPPCG_RESTRICT __restrict # else # define HMPPCG_RESTRICT # endif # include <stdint.h> #endif // Dynamic array typedef struct __hmppcg_array_struct { void *array; size_t *size; size_t elsize; } __hmppcg_array_t; // Data section typedef struct __hmppcg_DataSection { size_t from; size_t to; size_t step; } __hmppcg_DataSection; #include <cuda.h> #if CUDART_VERSION < 2000 #error Bad CUDA Runtime version. CUDA Toolkit 2.0+ required. #endif #define HMPP_CONSTMEM_OFFSET 0 #include <map> #include <string> // ---------------------------------------------------------------------------- // HMPP CUDA support classes // ---------------------------------------------------------------------------- #ifndef __HMPP_CUDADATA_H__ #define __HMPP_CUDADATA_H__ #ifndef HMPPCG_WARP_SIZE #define HMPPCG_WARP_SIZE 32 #endif enum CopyKind { HostToHost = 0, HostToDevice = 1, DeviceToHost = 2, DeviceToDevice = 3, }; inline int hmppcg_check_status(const char *file,int line,cudaError_t status) { if(status != cudaSuccess) { fprintf(stderr, "%s:%d CUDA Error: %s\n", file, line, cudaGetErrorString(status)); return -1; } return 0; } #define CHECK_STATUS(X) hmppcg_check_status(__FILE__,__LINE__,(X)) #define HMPP_CHECK_GRID_BOUNDARY(x) \ if(x>65535){\ fprintf(stderr, "%s:%d Grid Dimension Error: '%s' exceeds the 65535 dimension limit. Please modify the grid size configuration (see the hmppcg grid blocksize pragma) or switch to 2D gridification\n", __FILE__,__LINE__, #x);\ exit(-1) ;\ } #define HMPP_CHECK_BLOCK_BOUNDARY(x) \ if(x > devProp.maxThreadsPerBlock){ \ fprintf(stderr, "%s:%d Number of threads per block exceeds for the HWA: it is '%d' and HWA supports up to '%d'. Please modify the block size configuration (see the hmppcg grid blocksize pragma)\n", __FILE__,__LINE__, x, devProp.maxThreadsPerBlock); \ exit(-1) ; \ } // ---------------------------------------------------------------------------- // class DefaultPolicy // ---------------------------------------------------------------------------- struct DefaultPolicy { public: DefaultPolicy() { } virtual ~DefaultPolicy() { } int deviceAlloc(void **ptr,size_t size) { if( CHECK_STATUS(cudaStreamCreate(&stream_)) != 0 ) return -1; if( CHECK_STATUS(cudaMalloc(ptr,size)) != 0 ) return -1; #if CUDA_VERSION >= 3020 if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventBlockingSync)) != 0) return -1; #endif return 0; } int deviceFree(void *ptr) { if( CHECK_STATUS(cudaStreamDestroy(stream_)) != 0) return -1; if( CHECK_STATUS(cudaFree(ptr)) != 0) return -1; if( CHECK_STATUS(cudaEventDestroy(event)) != 0) return -1; return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { static cudaMemcpyKind cudaKind[] = {cudaMemcpyHostToHost, cudaMemcpyHostToDevice, cudaMemcpyDeviceToHost, cudaMemcpyDeviceToDevice }; if(async) { return CHECK_STATUS(cudaMemcpyAsync(dst,src,size,cudaKind[kind],stream_)); } else { return CHECK_STATUS(cudaMemcpy(dst,src,size,cudaKind[kind])); } } int makeStreamWait(cudaStream_t wstream) { int status; status = CHECK_STATUS(cudaEventRecord(event, stream_)); if (status != 0) return status; #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(cudaEventSynchronize(event)); #endif } int waitOnEvent(cudaEvent_t wevent) { #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(stream_, wevent, 0)); #else return CHECK_STATUS(cudaEventSynchronize(wevent)); #endif } int deviceWait() { return CHECK_STATUS(cudaStreamSynchronize(stream_)); } private: cudaStream_t stream_; cudaEvent_t event; }; // ---------------------------------------------------------------------------- // class ConstantPolicy // ---------------------------------------------------------------------------- #ifndef HMPP_CONSTMEM_SIZE #define HMPP_CONSTMEM_SIZE 2048 #endif __constant__ int64_t hmpp_constmem[HMPP_CONSTMEM_SIZE / 8]; /// Shared memory array is aligned on 64 bit thanks to that (to avoid an nvcc compilation error) extern __shared__ int64_t hmpp_sharedmem[]; struct ConstantPolicy { public: ConstantPolicy() { static bool initialized = false; if(!initialized) { next_offset_ = HMPP_CONSTMEM_OFFSET; initialized = true; } offset_ = -1; } virtual ~ConstantPolicy() { } void setStaticOffset(int offset) { offset_ = offset; while(offset_ % 8) offset_ ++; } int deviceAlloc(void **ptr, size_t size) { #if CUDA_VERSION >= 3020 if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventBlockingSync)) != 0) return -1; #endif if(offset_ != -1) { if((offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)offset_; return 0; } if((next_offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)next_offset_; next_offset_ += size; return 0; } int deviceFree(void *ptr) { return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { size_t offset; switch(kind) { case HostToDevice: offset = (size_t)dst; return CHECK_STATUS(cudaMemcpyToSymbol(hmpp_constmem,src,size,offset,cudaMemcpyHostToDevice)); case DeviceToHost: offset = (size_t)src; return CHECK_STATUS(cudaMemcpyFromSymbol(dst,hmpp_constmem,size,offset,cudaMemcpyDeviceToHost)); default: return -1; } } int makeStreamWait(cudaStream_t wstream) { int status; /* stream 0 at the moment */ status = CHECK_STATUS(cudaEventRecord(event, 0)); if (status != 0) return status; #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(cudaEventSynchronize(event)); #endif } int waitOnEvent(cudaEvent_t wevent) { /* stream 0 at the moment */ #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(0, wevent, 0)); #else return CHECK_STATUS(cudaEventSynchronize(wevent)); #endif } int deviceWait() { return 0; } private: static size_t next_offset_; int offset_; cudaEvent_t event; }; size_t ConstantPolicy::next_offset_; // ---------------------------------------------------------------------------- // class Lazy // ---------------------------------------------------------------------------- template <typename Policy> struct Lazy { char * value; bool valid; bool allocated; void ** devaddr; Policy * policy; size_t size; Lazy(size_t elem_size) { value = new char[elem_size]; } ~Lazy() { delete[] value; } int requireDeviceAlloc() { if(!allocated) { allocated = true; return policy->deviceAlloc(devaddr,size); } else { return 0; } } }; // ---------------------------------------------------------------------------- // class Element // ---------------------------------------------------------------------------- template <typename T,typename Policy> struct Element { Element(void * const * device_addr, size_t offset, Policy *policy, Lazy<Policy> * lazy) : device_addr_(device_addr) , offset_(offset), policy_(policy), lazy_(lazy) { } Element &operator=(const T & value) { if(lazy_) { *((T *)(lazy_->value)) = value; lazy_->valid = true; return *this; } if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,(const char*)&value,ElemSize,HostToDevice,false); return *this; } Element &operator=(const Element & src) { if(src.lazy_ && src.lazy_->valid) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)(src.lazy_->value)); return *this; } if(lazy_) lazy_->requireDeviceAlloc(); if(src.lazy_) src.lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,((const char*)(*src.device_addr_)) + src.offset_, ElemSize,DeviceToDevice,false); if(lazy_) { lazy_->valid = false; } return *this; } operator T() { if(lazy_ && lazy_->valid) return *((T *)(lazy_->value)); T res; if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(&res,((const char*)(*device_addr_)) + offset_,ElemSize,DeviceToHost,false); if(lazy_) { *((T *)(lazy_->value)) = res; lazy_->valid = true; } return res; } typedef T Type; enum { ElemSize = sizeof(T) }; private: size_t offset_; void *const* device_addr_; Policy *policy_; public: Lazy<Policy> * lazy_; }; enum DataFlags { DEFAULT = 0x0, LAZY = 0x1 }; // ---------------------------------------------------------------------------- // class Data // ---------------------------------------------------------------------------- template <typename T,typename Policy> class Data { public: typedef T Type; typedef Element<T,Policy> ElementType; enum { ElemSize = sizeof(T) }; Data(const char * name, unsigned int flags = DEFAULT) : name_(name), flags_(flags), dim_(0), sizes_(0), size_(0), host_addr_(0), device_addr_(0) { policy_ = new Policy; if(flags_ & LAZY) { lazy_ = new Lazy<Policy>(ElemSize); lazy_->valid = false; lazy_->devaddr = 0; lazy_->policy = policy_; } else lazy_ = 0; } ~Data() { free(); delete policy_; if(lazy_) delete lazy_; } int allocate(unsigned int dim, size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { const size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return allocate2(dim,sizes); } int allocate3(unsigned int dim_p, const size_t * sizes_p) { size_t sizes[2]; sizes[0] = 1; sizes[1] = 0; for(int d = 0 ; d < dim_p ; d++) { sizes[0] *= sizes_p[d]; } return allocate2(1, sizes); } int allocate2(unsigned int dim, const size_t * sizes) { dim_ = dim; sizes_ = new size_t[dim]; dimSizes_ = new size_t[dim]; size_ = ElemSize; for(int d=0;d<dim;d++) { sizes_[d] = sizes[d]; size_ *= sizes_[d]; size_t size = 1; for(int d2=d+1;d2<dim;d2++) size*=sizes[d2]; dimSizes_[d] = size; } if(lazy_) { lazy_->allocated = false; lazy_->devaddr = &device_addr_; lazy_->size = size_; return 0; } else return policy_->deviceAlloc(&device_addr_,size_); } int free() { if(sizes_) { delete [] sizes_; delete [] dimSizes_; sizes_ = 0; dim_ = 0; size_ = 0; } if(device_addr_) { if(policy_->deviceFree(device_addr_) != 0) return -1; device_addr_ = 0; } return 0; } int download(void * host_addr,bool async) { if(lazy_ && lazy_->valid) { *((T *)host_addr) = *((T *)(lazy_->value)); return 0; } if(lazy_) { lazy_->requireDeviceAlloc(); } int sts = policy_->deviceMemcpy(host_addr,device_addr_,size_,DeviceToHost,async); if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)host_addr); } return sts; } int upload(const void * host_addr,bool async) { if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = * ((T *)host_addr); lazy_->requireDeviceAlloc(); } return policy_->deviceMemcpy(device_addr_,host_addr,size_,HostToDevice,async); } int downloadSection(void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(host_addr,device_addr_,sections,DeviceToHost,async); } int uploadSection(const void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(device_addr_,host_addr,sections,HostToDevice,async); } int makeStreamWait(cudaStream_t wstream) { if(lazy_) lazy_->requireDeviceAlloc(); return policy_->makeStreamWait(wstream); } int waitOnEvent(cudaEvent_t wevent) { return policy_->waitOnEvent(wevent); } int waitTransfer() { return policy_->deviceWait(); } ElementType operator()(size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return at(sizes); } ElementType at(size_t *idx) { size_t offset = idx[0]; return ElementType(&device_addr_,offset*ElemSize,policy_,lazy_); } template <typename Y> Element<Y,Policy> at(size_t offset) { return Element<Y,Policy>(&device_addr_,offset,policy_,lazy_); } ElementType operator=(const T & value) { ElementType res(&device_addr_,0,policy_,lazy_); res = value; return res; } ElementType operator=(const Data &data) { return operator=(data.value()); } T value() const { ElementType res(&device_addr_,0,policy_,lazy_); return (T)res; } operator T() { return value(); } T *getDeviceAddr() { if(lazy_) lazy_->requireDeviceAlloc(); if(lazy_ && lazy_->valid) { policy_->deviceMemcpy(device_addr_,lazy_->value,size_,HostToDevice,false); } return (T*)device_addr_; } void invalidateLazy() { if(lazy_) { lazy_->valid = false; } } private: Data(const Data &data) {} int sectionCopy(char *dst,const char *src,int offset,int cur, const __hmppcg_DataSection *sections,int lastdense,CopyKind kind,bool async) { int d; int size = 1; for(d=cur+1;d<dim_;d++) size *= sizes_[d]; if(cur<(lastdense-1)) { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=sections[cur].step) if(sectionCopy(dst,src,offset+x*size,cur+1,sections,lastdense,kind,async) != 0) return -1; } else { int step = sections[cur].step; if(step == 1) { int start = (offset + sections[cur].from * size) * ElemSize; int total = (sections[cur].to - sections[cur].from + 1) * size * ElemSize; return policy_->deviceMemcpy(dst+start,src+start,total,kind,async); } else { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=step) { int off = (offset + x * size) * ElemSize; if(policy_->deviceMemcpy(dst+off,src+off,size * ElemSize,kind,async) != 0) return -1; } } } return 0; } int sectionCopy(void *dst,const void *src, const __hmppcg_DataSection *sections,CopyKind kind,bool async) { int i; int lastdense = dim_; for (i = dim_ - 1 ; i >= 0 ; i --) { if ((sections[i].from == 0) && (sections[i].to == sizes_[i] - 1) && (sections[i].step == 1)) lastdense = i; else break; } return sectionCopy((char*)dst,(const char*)src,0,0,sections,lastdense,kind,async); } const char * name_; size_t flags_; void *device_addr_; void *host_addr_; size_t dim_; size_t *sizes_; size_t *dimSizes_; size_t size_; Lazy<Policy> * lazy_; public: Policy *policy_; }; // --------------------------------------------------------------------------- // User data // --------------------------------------------------------------------------- class UserData{ public: virtual ~UserData(){} UserData(){} }; #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef float2 __hmppcg_complex_float; #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef double2 __hmppcg_complex_double; // --------------------------------------------------------------------------- // Allocatable Arrays // --------------------------------------------------------------------------- template <const size_t nb_dims> struct AArrayDesc { int lbounds_[nb_dims]; size_t sizes_[nb_dims]; size_t wholesize_; }; #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE( var, type, nb_dims, ... ) \ { int alloc_ranges[] = { __VA_ARGS__ }; \ int hmppcg_alloc_i; \ var ## _aarray_desc.wholesize_ = 1; \ for(hmppcg_alloc_i=0; hmppcg_alloc_i<nb_dims; hmppcg_alloc_i++){ \ int hmppcg_alloc_first = alloc_ranges[2*hmppcg_alloc_i]; \ int hmppcg_alloc_last = alloc_ranges[2*hmppcg_alloc_i + 1]; \ int hmppcg_alloc_size = hmppcg_alloc_last - hmppcg_alloc_first + 1; \ var ## _aarray_desc.lbounds_[hmppcg_alloc_i] = hmppcg_alloc_first; \ var ## _aarray_desc.sizes_[hmppcg_alloc_i] = hmppcg_alloc_size; \ var ## _aarray_desc.wholesize_ *= hmppcg_alloc_size; \ } \ if((hmppcg_status_ = var.allocate2(nb_dims, var ## _aarray_desc.sizes_))) \ return; \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE( var ) \ { \ var.free(); \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED( var ) \ (var.getDeviceAddr() != NULL) #endif //__HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #ifndef __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #define __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE( var ) \ var ## _aarray_desc.wholesize_ #endif //__HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_SIZE #define __HMPPCG_ALLOCATABLE_ARRAY_SIZE( var, d ) \ var ## _aarray_desc.sizes_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_SIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_LBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_LBOUND( var, d ) \ var ## _aarray_desc.lbounds_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_LBOUND #ifndef __HMPPCG_ALLOCATABLE_ARRAY_UBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_UBOUND( var, d ) \ (var ## _aarray_desc.sizes_[d] + var ## _aarray_desc.lbounds_[d] - 1) #endif //__HMPPCG_ALLOCATABLE_ARRAY_UBOUND #ifndef __HMPP_INT_POW_FUNC #define __HMPP_INT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ if(exp < 0) \ return 0; \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_INT_POW_FUNC( i64, int64_t ); __HMPP_INT_POW_FUNC( i32, int32_t ); __HMPP_INT_POW_FUNC( i16, int16_t ); __HMPP_INT_POW_FUNC( i8, int8_t ); #ifndef __HMPP_UINT_POW_FUNC #define __HMPP_UINT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_UINT_POW_FUNC( ui64, uint64_t ); __HMPP_UINT_POW_FUNC( ui32, uint32_t ); __HMPP_UINT_POW_FUNC( ui16, uint16_t ); __HMPP_UINT_POW_FUNC( ui8, uint8_t ); #endif // __HMPP_CUDADATA_H__ #ifndef __HMPPCG_COMPLEX_DOUBLE_DEFINED #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef struct { double x; double y; }__hmppcg_complex_double; #endif /* __HMPPCG_COMPLEX_DOUBLE_DEFINED */ #ifndef __HMPPCG_COMPLEX_FLOAT_DEFINED #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef struct { float x; float y; }__hmppcg_complex_float; #endif /* __HMPPCG_COMPLEX_FLOAT_DEFINED */ template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__runMvt_loop0_( float * HMPPCG_RESTRICT a, float * HMPPCG_RESTRICT x1, float * HMPPCG_RESTRICT y1) { int32_t i_3; int32_t outer_i_2; i_3 = (blockDimX__ * blockIdx.x + threadIdx.x); outer_i_2 = (blockDimY__ * blockIdx.y + threadIdx.y); bool __hmppcg_guard = (!((i_3 <= 1) & (outer_i_2 <= 2047))); if(__hmppcg_guard) { goto __hmppcg_label1; }; { int32_t __hmppcg_end, outer_j_2; for (outer_j_2 = 0, __hmppcg_end = 2047; outer_j_2 <= __hmppcg_end; outer_j_2 += 1) { { int32_t __hmppcg_end, j_3; for (j_3 = 0, __hmppcg_end = 1; j_3 <= __hmppcg_end; j_3 += 1) { x1[i_3 + ((int32_t) (outer_i_2 * 2))] = (x1[i_3 + ((int32_t) (outer_i_2 * 2))]) + ((a[((i_3 + ((int32_t) (outer_i_2 * 2))) * 4096) + (j_3 + ((int32_t) (outer_j_2 * 2)))]) * (y1[j_3 + ((int32_t) (outer_j_2 * 2))])); } } } } __hmppcg_label1:; } template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__runMvt_loop1_( float * HMPPCG_RESTRICT a, float * HMPPCG_RESTRICT x2, float * HMPPCG_RESTRICT y2) { int32_t i_2; i_2 = (blockDimX__ * blockDimY__ * blockIdx.x + threadIdx.y * blockDimX__ + threadIdx.x); bool __hmppcg_guard = (!(i_2 <= 4095)); if(__hmppcg_guard) { goto __hmppcg_label3; }; { int32_t __hmppcg_end, outer_j_4; for (outer_j_4 = 0, __hmppcg_end = 2047; outer_j_4 <= __hmppcg_end; outer_j_4 += 1) { { int32_t __hmppcg_end, j_4; for (j_4 = 0, __hmppcg_end = 1; j_4 <= __hmppcg_end; j_4 += 1) { x2[i_2] = (x2[i_2]) + ((a[((j_4 + ((int32_t) (outer_j_4 * 2))) * 4096) + i_2]) * (y2[j_4 + ((int32_t) (outer_j_4 * 2))])); } } } } __hmppcg_label3:; } void hmpp_codelet__runMvt( int &hmppcg_status_, void * __h, const cudaDeviceProp &devProp, cudaStream_t kernel_stream, cudaEvent_t kernel_event, Data<float,DefaultPolicy> & a, Data<float,DefaultPolicy> & x1, Data<float,DefaultPolicy> & x2, Data<float,DefaultPolicy> & y1, Data<float,DefaultPolicy> & y2) { if(1LL) { unsigned int gridDimX__ = 1LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 512LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 4LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if CUDA_VERSION >= 3020 a.makeStreamWait(kernel_stream); x1.makeStreamWait(kernel_stream); y1.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hmpp_codelet__runMvt_loop0_<blockDimX__, blockDimY__><<<dim_grid, dim_block, 0LL, kernel_stream>>>(a.getDeviceAddr(), x1.getDeviceAddr(), y1.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(cudaGetLastError()))) return; #if CUDA_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(cudaEventRecord(kernel_event, kernel_stream)))) return; a.waitOnEvent(kernel_event); x1.waitOnEvent(kernel_event); y1.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif }; if(1LL) { unsigned int gridDimX__ = 16LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 1LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 8LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if CUDA_VERSION >= 3020 a.makeStreamWait(kernel_stream); x2.makeStreamWait(kernel_stream); y2.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hmpp_codelet__runMvt_loop1_<blockDimX__, blockDimY__><<<dim_grid, dim_block, 0LL, kernel_stream>>>(a.getDeviceAddr(), x2.getDeviceAddr(), y2.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(cudaGetLastError()))) return; #if CUDA_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(cudaEventRecord(kernel_event, kernel_stream)))) return; a.waitOnEvent(kernel_event); x2.waitOnEvent(kernel_event); y2.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif }; } // HMPP_API #ifdef __cplusplus #define HMPP_EXTERN extern "C" #else #define HMPP_EXTERN #endif #ifdef _WIN32 #define HMPP_EXPORT __declspec(dllexport) #define HMPP_INLINE __inline #else #define HMPP_EXPORT #define HMPP_INLINE inline #endif #define HMPP_API HMPP_EXTERN HMPP_EXPORT // HMPPCG_POP_HASH #define HMPPCG_POP_HASH(major,minor) (((major)<<16)|(minor)) // --------------------------------------------------------------------------- // HMPP handle // --------------------------------------------------------------------------- typedef struct hmpp_handle_struct { Data<float,DefaultPolicy> * __arg0; Data<float,DefaultPolicy> * __arg1; Data<float,DefaultPolicy> * __arg2; Data<float,DefaultPolicy> * __arg3; Data<float,DefaultPolicy> * __arg4; cudaDeviceProp devProp; cudaStream_t kernel_stream; cudaEvent_t kernel_event; std::map<std::string,UserData*> map_user_data; } hmpp_handle_t; // --------------------------------------------------------------------------- // hmpp_createInstance() // --------------------------------------------------------------------------- HMPP_API hmpp_handle_t * hmpp_createInstance() { hmpp_handle_t * __h = new hmpp_handle_t; if(!__h) return 0; if(CHECK_STATUS(cudaStreamCreate(&__h->kernel_stream)) != 0) return NULL; #if CUDA_VERSION >= 3020 if(CHECK_STATUS(cudaEventCreateWithFlags(&__h->kernel_event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0) return NULL; #else if(CHECK_STATUS(cudaEventCreateWithFlags(&__h->kernel_event, cudaEventBlockingSync)) != 0) return NULL; #endif __h->__arg0 = NULL; __h->__arg1 = NULL; __h->__arg2 = NULL; __h->__arg3 = NULL; __h->__arg4 = NULL; int device; cudaGetDevice(&device); cudaGetDeviceProperties(&(__h->devProp), device); return __h; } // --------------------------------------------------------------------------- // hmpp_freeInstance() // --------------------------------------------------------------------------- HMPP_API int hmpp_freeInstance(hmpp_handle_t * __h) { delete __h->__arg0; delete __h->__arg1; delete __h->__arg2; delete __h->__arg3; delete __h->__arg4; cudaStreamDestroy(__h->kernel_stream); cudaEventDestroy(__h->kernel_event); __h->kernel_stream = 0; for(std::map<std::string,UserData*>::const_iterator it = __h->map_user_data.begin(); it != __h->map_user_data.end(); it++) { delete it->second; } delete(__h); return 0; } // --------------------------------------------------------------------------- // hmpp_allocateOnHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_allocateOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { __h->__arg0 = new Data<float,DefaultPolicy>("__arg0", DEFAULT); return __h->__arg0->allocate2(dim, size); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { __h->__arg1 = new Data<float,DefaultPolicy>("__arg1", DEFAULT); return __h->__arg1->allocate2(dim, size); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { __h->__arg2 = new Data<float,DefaultPolicy>("__arg2", DEFAULT); return __h->__arg2->allocate2(dim, size); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { __h->__arg3 = new Data<float,DefaultPolicy>("__arg3", DEFAULT); return __h->__arg3->allocate2(dim, size); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { __h->__arg4 = new Data<float,DefaultPolicy>("__arg4", DEFAULT); return __h->__arg4->allocate2(dim, size); } default: return -1; } } HMPP_API int hmpp_allocateOutputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInOutOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } // --------------------------------------------------------------------------- // hmpp_readDataFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->download(data,async!=0); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->download(data,async!=0); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->download(data,async!=0); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->download(data,async!=0); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->download(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->upload(data,async!=0); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->upload(data,async!=0); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->upload(data,async!=0); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->upload(data,async!=0); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->upload(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_readDataSectionFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataSectionFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->downloadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataSectionToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataSectionToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->uploadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForWriteTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForWriteTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->waitTransfer(); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->waitTransfer(); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForReadTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForReadTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__runMvt { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // x1@hmpp_codelet__runMvt { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,2): // x2@hmpp_codelet__runMvt { return __h->__arg2->waitTransfer(); } case HMPPCG_POP_HASH(1,3): // y1@hmpp_codelet__runMvt { return __h->__arg3->waitTransfer(); } case HMPPCG_POP_HASH(1,4): // y2@hmpp_codelet__runMvt { return __h->__arg4->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_codeletsAreReentrant() // --------------------------------------------------------------------------- HMPP_API int hmpp_codeletsAreReentrant() { return 0; } // --------------------------------------------------------------------------- // hmpp_start() // --------------------------------------------------------------------------- HMPP_API int hmpp_start(hmpp_handle_t * __h, int __id, int __async) { int status = 0; switch(__id) { case 1: // hmpp_codelet__runMvt(__arg0,__arg1,__arg2,__arg3,__arg4) hmpp_codelet__runMvt(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg0), (*__h->__arg1), (*__h->__arg2), (*__h->__arg3), (*__h->__arg4)); return status; } return -1; } // --------------------------------------------------------------------------- // hmpp_wait() // --------------------------------------------------------------------------- HMPP_API int hmpp_wait(hmpp_handle_t * __h,int codelet_id) { return CHECK_STATUS(cudaStreamSynchronize(__h->kernel_stream)); } // --------------------------------------------------------------------------- // hmpp_version() // --------------------------------------------------------------------------- HMPP_API int hmpp_version() { #ifndef HMPP_RUNTIME_TARGET_VERSION #define HMPP_RUNTIME_TARGET_VERSION(major,minor)((major << 16) | (minor << 8)) #endif return HMPP_RUNTIME_TARGET_VERSION(2,5); } //
befb88c580331e2ede48021d2939aa2067b07e37.hip
// !!! This is a file automatically generated by hipify!!! #include "SpMV_CSR.h" #include "SpMV_COO.h" #include "SpMV_ELL.h" #include "../Helper_Code/timer.h" const float eps = 0.00001; void checkIfEqual(float* cpuArray, float* gpuArray, unsigned int N){ for(unsigned int i = 0; i < N; i++) { float diff = (cpuArray[i] - gpuArray[i])/cpuArray[i]; //division is to get relative error if(diff > eps || diff < -eps) { printf("Arrays are not equal (cpuArray[%u] = %e, GPUArray[%u] = %e)\n", i, cpuArray[i], i, gpuArray[i]); exit(0); } } } void SpMV_COO_CPU(const COOMatrix<float>& cooMatrix, const float* inVector, float* outVector){ for(int i = 0; i < cooMatrix.numRows; i++){ outVector[i] = 0; } for(int i = 0; i < cooMatrix.numNonzeros; i++){ unsigned int row = cooMatrix.rowIdxs[i], col = cooMatrix.colIdxs[i]; outVector[row] += inVector[col] * cooMatrix.values[i]; } } void SpMV_CSR_CPU(const CSRMatrix<float>& csrMatrix, const float* inVector, float* outVector){ for(int i = 0; i < csrMatrix.numRows; i++){ outVector[i] = 0; } for(int row = 0; row < csrMatrix.numRows; row++){ float sum = 0; for(unsigned int i = csrMatrix.rowPtrs[row]; i < csrMatrix.rowPtrs[row + 1]; i++){ unsigned int col = csrMatrix.colIdxs[i]; sum += inVector[col] * csrMatrix.values[i]; } outVector[row] = sum; } } void SpMV_ELL_CPU(const ELLMatrix<float>& ellMatrix, const float* inVector, float* outVector){ for(int row = 0; row < ellMatrix.numRows; row++){ float sum = 0; for(int i = 0; i < ellMatrix.nonZeroPerRow[row]; i++){ unsigned int idx = i * ellMatrix.numRows + row; unsigned int col = ellMatrix.colIdxs[idx]; sum += inVector[col] * ellMatrix.values[idx]; } outVector[row] = sum; } } // Multiplies a sparse matrix with a vector // type 1: uses COO, type 2: uses CSR, type 3: uses ELL int main(int argc, char**argv) { hipDeviceSynchronize(); // Allocate memory and initialize data Timer timer; unsigned int type = (argc > 1) ? (atoi(argv[1])) : 1; unsigned int numNonzeros = (argc > 2) ? (atoi(argv[2])) : 100000; unsigned int numRows = (argc > 3) ? (atoi(argv[3])) : 10000; unsigned int numCols = (argc > 4) ? (atoi(argv[4])) : 10000; if (type == 1){ printf("Running sparse matrix-vector multiplication using the COO format\n"); } else if (type == 2) { printf("Running sparse matrix-vector multiplication using the CSR format\n"); } else { printf("Running sparse matrix-vector multiplication using the ELL format\n"); } float* inVector = (float*) malloc(numCols*sizeof(float)); float* outVectorCPU = (float*) malloc(numRows*sizeof(float)); float* outVectorGPU = (float*) malloc(numRows*sizeof(float)); for(int i = 0; i < numCols; i++){ inVector[i] = 1.0f*rand()/RAND_MAX; } COOMatrix<float> cooMatrix(numRows, numCols, numNonzeros, false); CSRMatrix<float> csrMatrix(numRows, numCols, numNonzeros, false); ELLMatrix<float> ellMatrix(numRows, numCols, numNonzeros, false); if (type == 1) { cooMatrix.generateRandomMatrix(); } else if (type == 2) { csrMatrix.generateRandomMatrix(); } else { ellMatrix.generateRandomMatrix(); } // Compute on CPU startTime(&timer); if (type == 1){ SpMV_COO_CPU(cooMatrix, inVector, outVectorCPU); } else if (type == 2) { SpMV_CSR_CPU(csrMatrix, inVector, outVectorCPU); } else { SpMV_ELL_CPU(ellMatrix, inVector, outVectorCPU); } stopTime(&timer); printElapsedTime(timer, "CPU time", BLUE); // Compute on GPU startTime(&timer); if (type == 1){ SpMV_COO_GPU<float>(cooMatrix, inVector, outVectorGPU); } else if (type == 2) { SpMV_CSR_GPU<float>(csrMatrix, inVector, outVectorGPU); } else { SpMV_ELL_GPU<float>(ellMatrix, inVector, outVectorGPU); } stopTime(&timer); printElapsedTime(timer, "GPU time", RED); // Verify result checkIfEqual(outVectorCPU, outVectorGPU, numRows); // Free memory free(inVector); free(outVectorCPU); free(outVectorGPU); return 0; }
befb88c580331e2ede48021d2939aa2067b07e37.cu
#include "SpMV_CSR.h" #include "SpMV_COO.h" #include "SpMV_ELL.h" #include "../Helper_Code/timer.h" const float eps = 0.00001; void checkIfEqual(float* cpuArray, float* gpuArray, unsigned int N){ for(unsigned int i = 0; i < N; i++) { float diff = (cpuArray[i] - gpuArray[i])/cpuArray[i]; //division is to get relative error if(diff > eps || diff < -eps) { printf("Arrays are not equal (cpuArray[%u] = %e, GPUArray[%u] = %e)\n", i, cpuArray[i], i, gpuArray[i]); exit(0); } } } void SpMV_COO_CPU(const COOMatrix<float>& cooMatrix, const float* inVector, float* outVector){ for(int i = 0; i < cooMatrix.numRows; i++){ outVector[i] = 0; } for(int i = 0; i < cooMatrix.numNonzeros; i++){ unsigned int row = cooMatrix.rowIdxs[i], col = cooMatrix.colIdxs[i]; outVector[row] += inVector[col] * cooMatrix.values[i]; } } void SpMV_CSR_CPU(const CSRMatrix<float>& csrMatrix, const float* inVector, float* outVector){ for(int i = 0; i < csrMatrix.numRows; i++){ outVector[i] = 0; } for(int row = 0; row < csrMatrix.numRows; row++){ float sum = 0; for(unsigned int i = csrMatrix.rowPtrs[row]; i < csrMatrix.rowPtrs[row + 1]; i++){ unsigned int col = csrMatrix.colIdxs[i]; sum += inVector[col] * csrMatrix.values[i]; } outVector[row] = sum; } } void SpMV_ELL_CPU(const ELLMatrix<float>& ellMatrix, const float* inVector, float* outVector){ for(int row = 0; row < ellMatrix.numRows; row++){ float sum = 0; for(int i = 0; i < ellMatrix.nonZeroPerRow[row]; i++){ unsigned int idx = i * ellMatrix.numRows + row; unsigned int col = ellMatrix.colIdxs[idx]; sum += inVector[col] * ellMatrix.values[idx]; } outVector[row] = sum; } } // Multiplies a sparse matrix with a vector // type 1: uses COO, type 2: uses CSR, type 3: uses ELL int main(int argc, char**argv) { cudaDeviceSynchronize(); // Allocate memory and initialize data Timer timer; unsigned int type = (argc > 1) ? (atoi(argv[1])) : 1; unsigned int numNonzeros = (argc > 2) ? (atoi(argv[2])) : 100000; unsigned int numRows = (argc > 3) ? (atoi(argv[3])) : 10000; unsigned int numCols = (argc > 4) ? (atoi(argv[4])) : 10000; if (type == 1){ printf("Running sparse matrix-vector multiplication using the COO format\n"); } else if (type == 2) { printf("Running sparse matrix-vector multiplication using the CSR format\n"); } else { printf("Running sparse matrix-vector multiplication using the ELL format\n"); } float* inVector = (float*) malloc(numCols*sizeof(float)); float* outVectorCPU = (float*) malloc(numRows*sizeof(float)); float* outVectorGPU = (float*) malloc(numRows*sizeof(float)); for(int i = 0; i < numCols; i++){ inVector[i] = 1.0f*rand()/RAND_MAX; } COOMatrix<float> cooMatrix(numRows, numCols, numNonzeros, false); CSRMatrix<float> csrMatrix(numRows, numCols, numNonzeros, false); ELLMatrix<float> ellMatrix(numRows, numCols, numNonzeros, false); if (type == 1) { cooMatrix.generateRandomMatrix(); } else if (type == 2) { csrMatrix.generateRandomMatrix(); } else { ellMatrix.generateRandomMatrix(); } // Compute on CPU startTime(&timer); if (type == 1){ SpMV_COO_CPU(cooMatrix, inVector, outVectorCPU); } else if (type == 2) { SpMV_CSR_CPU(csrMatrix, inVector, outVectorCPU); } else { SpMV_ELL_CPU(ellMatrix, inVector, outVectorCPU); } stopTime(&timer); printElapsedTime(timer, "CPU time", BLUE); // Compute on GPU startTime(&timer); if (type == 1){ SpMV_COO_GPU<float>(cooMatrix, inVector, outVectorGPU); } else if (type == 2) { SpMV_CSR_GPU<float>(csrMatrix, inVector, outVectorGPU); } else { SpMV_ELL_GPU<float>(ellMatrix, inVector, outVectorGPU); } stopTime(&timer); printElapsedTime(timer, "GPU time", RED); // Verify result checkIfEqual(outVectorCPU, outVectorGPU, numRows); // Free memory free(inVector); free(outVectorCPU); free(outVectorGPU); return 0; }
952faaca490cc7c7764feba7757600d097ae9f1b.hip
// !!! This is a file automatically generated by hipify!!! /* * Universit Pierre et Marie Curie * Calcul de transport de neutrons * Version squentielle */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define OUTPUT_FILE "/tmp/absorbed.dat" #define NB_BLOCK 256 #define NB_THREAD 256 #define CUDA_CALL(x) do { if((x) != hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) // Dclaration dans le mmoire RAM du GPU __device__ int device_r; __device__ int device_b; __device__ int device_t; __device__ int j=0; char info[] = "\ Usage:\n\ neutron-seq H Nb C_c C_s\n\ \n\ H : paisseur de la plaque\n\ Nb : nombre d'chantillons\n\ C_c: composante absorbante\n\ C_s: componente diffusante\n\ \n\ Exemple d'execution : \n\ neutron-seq 1.0 500000000 0.5 0.5\n\ "; __global__ void setup_kernel(hiprandState_t *state){ int idx = threadIdx.x+blockDim.x*blockIdx.x; // On initialise chaque gnrateur avec une graine diffrente hiprand_init(idx, 0, 0, &state[idx]); /*On initialise chaque gnrateur avec la mme graine mais avec une squence diffrente Les gnrateur donneront pas les mmes chiffres car chaque squence est spar de 2^67 nombres*/ // hiprand_init(1, idx, 0, &state[idx]); } /* * notre gettimeofday() */ double my_gettimeofday(){ struct timeval tmp_time; gettimeofday(&tmp_time, NULL); return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L); } __global__ void neutron_gpu(hiprandState_t *state, float h, int n, float c_c, float c_s, float *result) { // nombre de neutrons reflchis, absorbs et transmis int r, b, t; r = b = t = 0; int j_loc; // Tableau pour l'criture de chaque thread __shared__ int R[NB_THREAD]; __shared__ int B[NB_THREAD]; __shared__ int T[NB_THREAD]; float c; c = c_c + c_s; // distance parcourue par le neutron avant la collision float L; // direction du neutron (0 <= d <= PI) float d; // variable alatoire uniforme float u; // position de la particule (0 <= x <= h) float x; int idx; idx = threadIdx.x + blockIdx.x * blockDim.x; // On copie le gnrateur sur le registre pour plus d'efficacit hiprandState_t localState = state[idx]; /* code GPU */ while(idx < n) { d = 0.0; x = 0.0; while(1) { u = hiprand_uniform(&localState); L = -(1 / c) * log(u); x = x + L * cos(d); if (x < 0) { r++; break; } else if (x >= h) { t++; break; } else if ((u = hiprand_uniform(&localState)) < c_c / c) { b++; j_loc = atomicAdd(&j,1); result[j_loc] = x; break; } else { u = hiprand_uniform(&localState); d = u * M_PI; } } idx+= blockDim.x * gridDim.x; } // On stock r,b,t dans le tableau R[threadIdx.x] = r; B[threadIdx.x] = b; T[threadIdx.x] = t; // Synchronisation avant qu'un thread calcule la somme totale __syncthreads(); // Reduction des tableaux for(unsigned int s = blockDim.x/2; s > 0; s = s/2) { if(threadIdx.x < s) { R[threadIdx.x] += R[threadIdx.x + s]; B[threadIdx.x] += B[threadIdx.x + s]; T[threadIdx.x] += T[threadIdx.x + s]; } __syncthreads(); } // Seul le thread 0 d'une bloc va additionner l'ensemble des valeurs if(threadIdx.x == 0) { atomicAdd(&device_r,R[0]); atomicAdd(&device_b,B[0]); atomicAdd(&device_t,T[0]); } } /* * main() */ int main(int argc, char *argv[]) { // La distance moyenne entre les interactions neutron/atome est 1/c. // c_c et c_s sont les composantes absorbantes et diffusantes de c. float c_c, c_s; // paisseur de la plaque float h; // nombre d'chantillons int n; // chronometrage hipEvent_t start, finish; hipEventCreate(&start); hipEventCreate(&finish); if( argc == 1) fprintf( stderr, "%s\n", info); // valeurs par defaut h = 1.0; n = 500000000; c_c = 0.5; c_s = 0.5; // recuperation des parametres if (argc > 1) h = atof(argv[1]); if (argc > 2) n = atoi(argv[2]); if (argc > 3) c_c = atof(argv[3]); if (argc > 4) c_s = atof(argv[4]); // affichage des parametres pour verificatrion printf("paisseur de la plaque : %4.g\n", h); printf("Nombre d'chantillons : %d\n", n); printf("C_c : %g\n", c_c); printf("C_s : %g\n", c_s); //Allocation mmoire du rsultat ct CPU float *host_absorbed; host_absorbed = (float *) calloc(n, sizeof(float)); int r,b,t; //Allocation mmoire du rsultat ct GPU float *device_absorbed; hipMalloc((void **)&device_absorbed, n*sizeof(float)); hipMemset(device_absorbed,0,n*sizeof(float)); // Allocation mmoire par le CPU du tableau de gnrateur pseudo-alatoire hiprandState_t *d_state; CUDA_CALL(hipMalloc((void **)&d_state, NB_BLOCK*NB_THREAD*sizeof(hiprandState_t))); // debut du chronometrage hipEventRecord(start, 0); // On initialise les gnrateurs hipLaunchKernelGGL(( setup_kernel), dim3(NB_BLOCK),dim3(NB_THREAD), 0, 0, d_state); hipLaunchKernelGGL(( neutron_gpu), dim3(NB_BLOCK),dim3(NB_THREAD), 0, 0, d_state, h, n, c_c, c_s, device_absorbed); hipMemcpy(host_absorbed,device_absorbed,n*sizeof(float),hipMemcpyDeviceToHost); hipMemcpyFromSymbol(&r, device_r, sizeof(int),0); hipMemcpyFromSymbol(&b, device_b, sizeof(int),0); hipMemcpyFromSymbol(&t, device_t, sizeof(int),0); // fin du chronometrage hipEventRecord(finish, 0); hipEventSynchronize(finish); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, finish); printf("r=%d, b=%d, t=%d\n",r,b,t); printf("\nPourcentage des neutrons reflchis : %4.2g\n", (float) r / (float) n); printf("Pourcentage des neutrons absorbs : %4.2g\n", (float) b / (float) n); printf("Pourcentage des neutrons transmis : %4.2g\n", (float) t / (float) n); printf("\nTemps total de calcul: %.8g sec\n", elapsedTime/1000.0); printf("Millions de neutrons /s: %.2g\n", (double) n / ((elapsedTime/1000.0)*1e6)); // ouverture du fichier pour ecrire les positions des neutrons absorbs FILE *f_handle = fopen(OUTPUT_FILE, "w"); if (!f_handle) { fprintf(stderr, "Cannot open " OUTPUT_FILE "\n"); exit(EXIT_FAILURE); } for (int j = 0; j < b; j++) fprintf(f_handle, "%f\n", host_absorbed[j]); // fermeture du fichier fclose(f_handle); printf("Result written in " OUTPUT_FILE "\n"); hipFree(d_state); hipFree(device_absorbed); free(host_absorbed); return EXIT_SUCCESS; }
952faaca490cc7c7764feba7757600d097ae9f1b.cu
/* * Université Pierre et Marie Curie * Calcul de transport de neutrons * Version séquentielle */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #define OUTPUT_FILE "/tmp/absorbed.dat" #define NB_BLOCK 256 #define NB_THREAD 256 #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) // Déclaration dans le mémoire RAM du GPU __device__ int device_r; __device__ int device_b; __device__ int device_t; __device__ int j=0; char info[] = "\ Usage:\n\ neutron-seq H Nb C_c C_s\n\ \n\ H : épaisseur de la plaque\n\ Nb : nombre d'échantillons\n\ C_c: composante absorbante\n\ C_s: componente diffusante\n\ \n\ Exemple d'execution : \n\ neutron-seq 1.0 500000000 0.5 0.5\n\ "; __global__ void setup_kernel(curandState *state){ int idx = threadIdx.x+blockDim.x*blockIdx.x; // On initialise chaque générateur avec une graine différente curand_init(idx, 0, 0, &state[idx]); /*On initialise chaque générateur avec la même graine mais avec une séquence différente Les générateur donneront pas les mêmes chiffres car chaque séquence est séparé de 2^67 nombres*/ // curand_init(1, idx, 0, &state[idx]); } /* * notre gettimeofday() */ double my_gettimeofday(){ struct timeval tmp_time; gettimeofday(&tmp_time, NULL); return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L); } __global__ void neutron_gpu(curandState *state, float h, int n, float c_c, float c_s, float *result) { // nombre de neutrons refléchis, absorbés et transmis int r, b, t; r = b = t = 0; int j_loc; // Tableau pour l'écriture de chaque thread __shared__ int R[NB_THREAD]; __shared__ int B[NB_THREAD]; __shared__ int T[NB_THREAD]; float c; c = c_c + c_s; // distance parcourue par le neutron avant la collision float L; // direction du neutron (0 <= d <= PI) float d; // variable aléatoire uniforme float u; // position de la particule (0 <= x <= h) float x; int idx; idx = threadIdx.x + blockIdx.x * blockDim.x; // On copie le générateur sur le registre pour plus d'efficacité curandState localState = state[idx]; /* code GPU */ while(idx < n) { d = 0.0; x = 0.0; while(1) { u = curand_uniform(&localState); L = -(1 / c) * log(u); x = x + L * cos(d); if (x < 0) { r++; break; } else if (x >= h) { t++; break; } else if ((u = curand_uniform(&localState)) < c_c / c) { b++; j_loc = atomicAdd(&j,1); result[j_loc] = x; break; } else { u = curand_uniform(&localState); d = u * M_PI; } } idx+= blockDim.x * gridDim.x; } // On stock r,b,t dans le tableau R[threadIdx.x] = r; B[threadIdx.x] = b; T[threadIdx.x] = t; // Synchronisation avant qu'un thread calcule la somme totale __syncthreads(); // Reduction des tableaux for(unsigned int s = blockDim.x/2; s > 0; s = s/2) { if(threadIdx.x < s) { R[threadIdx.x] += R[threadIdx.x + s]; B[threadIdx.x] += B[threadIdx.x + s]; T[threadIdx.x] += T[threadIdx.x + s]; } __syncthreads(); } // Seul le thread 0 d'une bloc va additionner l'ensemble des valeurs if(threadIdx.x == 0) { atomicAdd(&device_r,R[0]); atomicAdd(&device_b,B[0]); atomicAdd(&device_t,T[0]); } } /* * main() */ int main(int argc, char *argv[]) { // La distance moyenne entre les interactions neutron/atome est 1/c. // c_c et c_s sont les composantes absorbantes et diffusantes de c. float c_c, c_s; // épaisseur de la plaque float h; // nombre d'échantillons int n; // chronometrage cudaEvent_t start, finish; cudaEventCreate(&start); cudaEventCreate(&finish); if( argc == 1) fprintf( stderr, "%s\n", info); // valeurs par defaut h = 1.0; n = 500000000; c_c = 0.5; c_s = 0.5; // recuperation des parametres if (argc > 1) h = atof(argv[1]); if (argc > 2) n = atoi(argv[2]); if (argc > 3) c_c = atof(argv[3]); if (argc > 4) c_s = atof(argv[4]); // affichage des parametres pour verificatrion printf("Épaisseur de la plaque : %4.g\n", h); printf("Nombre d'échantillons : %d\n", n); printf("C_c : %g\n", c_c); printf("C_s : %g\n", c_s); //Allocation mémoire du résultat côté CPU float *host_absorbed; host_absorbed = (float *) calloc(n, sizeof(float)); int r,b,t; //Allocation mémoire du résultat côté GPU float *device_absorbed; cudaMalloc((void **)&device_absorbed, n*sizeof(float)); cudaMemset(device_absorbed,0,n*sizeof(float)); // Allocation mémoire par le CPU du tableau de générateur pseudo-aléatoire curandState *d_state; CUDA_CALL(cudaMalloc((void **)&d_state, NB_BLOCK*NB_THREAD*sizeof(curandState))); // debut du chronometrage cudaEventRecord(start, 0); // On initialise les générateurs setup_kernel<<<NB_BLOCK,NB_THREAD>>>(d_state); neutron_gpu<<<NB_BLOCK,NB_THREAD>>>(d_state, h, n, c_c, c_s, device_absorbed); cudaMemcpy(host_absorbed,device_absorbed,n*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(&r, device_r, sizeof(int),0); cudaMemcpyFromSymbol(&b, device_b, sizeof(int),0); cudaMemcpyFromSymbol(&t, device_t, sizeof(int),0); // fin du chronometrage cudaEventRecord(finish, 0); cudaEventSynchronize(finish); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, finish); printf("r=%d, b=%d, t=%d\n",r,b,t); printf("\nPourcentage des neutrons refléchis : %4.2g\n", (float) r / (float) n); printf("Pourcentage des neutrons absorbés : %4.2g\n", (float) b / (float) n); printf("Pourcentage des neutrons transmis : %4.2g\n", (float) t / (float) n); printf("\nTemps total de calcul: %.8g sec\n", elapsedTime/1000.0); printf("Millions de neutrons /s: %.2g\n", (double) n / ((elapsedTime/1000.0)*1e6)); // ouverture du fichier pour ecrire les positions des neutrons absorbés FILE *f_handle = fopen(OUTPUT_FILE, "w"); if (!f_handle) { fprintf(stderr, "Cannot open " OUTPUT_FILE "\n"); exit(EXIT_FAILURE); } for (int j = 0; j < b; j++) fprintf(f_handle, "%f\n", host_absorbed[j]); // fermeture du fichier fclose(f_handle); printf("Result written in " OUTPUT_FILE "\n"); cudaFree(d_state); cudaFree(device_absorbed); free(host_absorbed); return EXIT_SUCCESS; }
cd885e554f3f2b96e0ff4813cf4372fbf358c62a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudamat_kernels.cuh" #include "float.h" /* ------------------------- Random number generation ------------------------- */ __global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // The initial x is the seed and the initial carry is 1 unsigned long long rndWord = ((unsigned long long)seed << 32) + 1; const unsigned int rndMult = rndMults[idx]; /* * Run the chain for a few steps so that all the streams have a chance * to differentiate. They start out generating similar random numbers * because all the multipliers are similar. */ for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); } rndWords[idx] = rndWord; } __global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long rndWord = rndWords[idx]; const unsigned int rndMult = rndMults[idx]; for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; } if (idx < numElements) rndWords[idx] = rndWord; } __global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long rndWord = rndWords[idx]; const unsigned int rndMult = rndMults[idx]; float rnd1, rnd2, R, T; for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; T = 2 * PI * rnd2; R = sqrtf(-2 * __logf(rnd1)); gData[i] = R * __cosf(T); if (i + NUM_RND_STREAMS < numElements) gData[i + NUM_RND_STREAMS] = R * __sinf(T); } if (idx < numElements) rndWords[idx] = rndWord; } /* ------------------------- Data copying ------------------------- */ /* Copy row slice from source to target. There is a block for every 32x32 chunk being copied. */ __global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) { const int row = start + blockIdx.x * 32 + threadIdx.x; const int start_col = blockIdx.y * 32; const int end_col = (start_col + 32 < width) ? start_col + 32: width; const int target_height = end - start; if (row < end) { for (int cur_col = start_col; cur_col < end_col; cur_col++) target[cur_col * target_height + row - start] = source[cur_col * height + row]; } } __global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) { const int row = start + blockIdx.x * 32 + threadIdx.x; const int start_col = blockIdx.y * 32; const int end_col = (start_col + 32 < width) ? start_col + 32: width; const int source_height = end - start; if (row < end) { for (int cur_col = start_col; cur_col < end_col; cur_col++) target[cur_col * height + row] = source[cur_col * source_height + row - start]; //source[cur_col * height + row - start] = target[cur_col * target_height + row]; } } __global__ void kTranspose(float *odata, float *idata, int width, int height) { __shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x; unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x; yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } /* ------------------------- Mathematical operations ------------------------- */ __global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] < mat2[i]; } } __global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] < val; } } __global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] > mat2[i]; } } __global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] > val; } } __global__ void kEquals(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] == mat2[i]; } } __global__ void kEqualsScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] == val; } } __global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float max_vals[32]; float cur_max = -FLT_MAX; float val = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { val = mat[blockIdx.x * height + i]; if (val > cur_max) cur_max = val; } max_vals[threadIdx.x] = cur_max; __syncthreads(); if (threadIdx.x == 0) { cur_max = -FLT_MAX; for (unsigned int i = 0; i < 32; i++) if (max_vals[i] > cur_max) cur_max = max_vals[i]; target[blockIdx.x] = cur_max; } } __global__ void kSign(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] ? copysignf(1., mat[i]) : 0.; } } __global__ void kApplySigmoid(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = 1 / (1 + __expf(-mat[i])); } } __global__ void kApplyTanh(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i, exp2x; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; exp2x = __expf(2 * mat_i); target[i] = 1 - 2 / (exp2x + 1); } } __global__ void kApplyAbs(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0)); } } __global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; if (mat_i > 0) target[i] = (__logf(1 + __expf(-mat_i)) + mat_i); else target[i] = __logf(1 + __expf(mat_i)); } } __global__ void kLog(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = __logf(mat[i]); } } __global__ void kExp(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = __expf(mat[i]); } } __global__ void kSqrt(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = sqrt(mat[i]); } } __global__ void kPow(float* mat, float pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = powf(mat[i], pow); } } __global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = powf(mat[i], pow[i]); } } __global__ void kReciprocal(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1. / mat[i]; } __global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + vec[i % height]; } } __global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + vec[i / height]; } } __global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + mult * vec[i % height]; } } __global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] * vec[i % height]; } } __global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] * vec[i / height]; } } __global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] + b[i]; } } __global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] - b[i]; } } __global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] / b[i]; } } __global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] * b[i]; } } __global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = alpha * mat[i]; } } __global__ void kAssignScalar(float* dest, float alpha, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = alpha; } } __global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = mat[i] / alpha; } } __global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] + alpha; } } __global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){ __shared__ int sourceRowIndices[32]; const int startTargetRowI = blockIdx.x * 32; const int tid = threadIdx.x; const int localNRowIs = min(32, nRowIs-startTargetRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ sourceRowIndices[tid] = int(indices[startTargetRowI + tid]); if (sourceRowIndices[tid]<0) sourceRowIndices[tid] += nSourceRows; if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows) sourceRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; } } __global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){ __shared__ int targetRowIndices[32]; const int startSourceRowI = blockIdx.x * 32; const int tid = threadIdx.x; const int localNRowIs = min(32, nRowIs-startSourceRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ targetRowIndices[tid] = int(indices[startSourceRowI + tid]); if (targetRowIndices[tid]<0) targetRowIndices[tid] += nTargetRows; if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows) targetRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; } }
cd885e554f3f2b96e0ff4813cf4372fbf358c62a.cu
#include "cudamat_kernels.cuh" #include "float.h" /* ------------------------- Random number generation ------------------------- */ __global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // The initial x is the seed and the initial carry is 1 unsigned long long rndWord = ((unsigned long long)seed << 32) + 1; const unsigned int rndMult = rndMults[idx]; /* * Run the chain for a few steps so that all the streams have a chance * to differentiate. They start out generating similar random numbers * because all the multipliers are similar. */ for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); } rndWords[idx] = rndWord; } __global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long rndWord = rndWords[idx]; const unsigned int rndMult = rndMults[idx]; for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; } if (idx < numElements) rndWords[idx] = rndWord; } __global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long rndWord = rndWords[idx]; const unsigned int rndMult = rndMults[idx]; float rnd1, rnd2, R, T; for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; T = 2 * PI * rnd2; R = sqrtf(-2 * __logf(rnd1)); gData[i] = R * __cosf(T); if (i + NUM_RND_STREAMS < numElements) gData[i + NUM_RND_STREAMS] = R * __sinf(T); } if (idx < numElements) rndWords[idx] = rndWord; } /* ------------------------- Data copying ------------------------- */ /* Copy row slice from source to target. There is a block for every 32x32 chunk being copied. */ __global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) { const int row = start + blockIdx.x * 32 + threadIdx.x; const int start_col = blockIdx.y * 32; const int end_col = (start_col + 32 < width) ? start_col + 32: width; const int target_height = end - start; if (row < end) { for (int cur_col = start_col; cur_col < end_col; cur_col++) target[cur_col * target_height + row - start] = source[cur_col * height + row]; } } __global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) { const int row = start + blockIdx.x * 32 + threadIdx.x; const int start_col = blockIdx.y * 32; const int end_col = (start_col + 32 < width) ? start_col + 32: width; const int source_height = end - start; if (row < end) { for (int cur_col = start_col; cur_col < end_col; cur_col++) target[cur_col * height + row] = source[cur_col * source_height + row - start]; //source[cur_col * height + row - start] = target[cur_col * target_height + row]; } } __global__ void kTranspose(float *odata, float *idata, int width, int height) { __shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x; unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x; yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } /* ------------------------- Mathematical operations ------------------------- */ __global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] < mat2[i]; } } __global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] < val; } } __global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] > mat2[i]; } } __global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] > val; } } __global__ void kEquals(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] == mat2[i]; } } __global__ void kEqualsScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] == val; } } __global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float max_vals[32]; float cur_max = -FLT_MAX; float val = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { val = mat[blockIdx.x * height + i]; if (val > cur_max) cur_max = val; } max_vals[threadIdx.x] = cur_max; __syncthreads(); if (threadIdx.x == 0) { cur_max = -FLT_MAX; for (unsigned int i = 0; i < 32; i++) if (max_vals[i] > cur_max) cur_max = max_vals[i]; target[blockIdx.x] = cur_max; } } __global__ void kSign(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] ? copysignf(1., mat[i]) : 0.; } } __global__ void kApplySigmoid(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = 1 / (1 + __expf(-mat[i])); } } __global__ void kApplyTanh(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i, exp2x; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; exp2x = __expf(2 * mat_i); target[i] = 1 - 2 / (exp2x + 1); } } __global__ void kApplyAbs(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0)); } } __global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; if (mat_i > 0) target[i] = (__logf(1 + __expf(-mat_i)) + mat_i); else target[i] = __logf(1 + __expf(mat_i)); } } __global__ void kLog(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = __logf(mat[i]); } } __global__ void kExp(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = __expf(mat[i]); } } __global__ void kSqrt(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = sqrt(mat[i]); } } __global__ void kPow(float* mat, float pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = powf(mat[i], pow); } } __global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = powf(mat[i], pow[i]); } } __global__ void kReciprocal(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1. / mat[i]; } __global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + vec[i % height]; } } __global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + vec[i / height]; } } __global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + mult * vec[i % height]; } } __global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] * vec[i % height]; } } __global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] * vec[i / height]; } } __global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] + b[i]; } } __global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] - b[i]; } } __global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] / b[i]; } } __global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] * b[i]; } } __global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = alpha * mat[i]; } } __global__ void kAssignScalar(float* dest, float alpha, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = alpha; } } __global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = mat[i] / alpha; } } __global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] + alpha; } } __global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){ __shared__ int sourceRowIndices[32]; const int startTargetRowI = blockIdx.x * 32; const int tid = threadIdx.x; const int localNRowIs = min(32, nRowIs-startTargetRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ sourceRowIndices[tid] = int(indices[startTargetRowI + tid]); if (sourceRowIndices[tid]<0) sourceRowIndices[tid] += nSourceRows; if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows) sourceRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; } } __global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){ __shared__ int targetRowIndices[32]; const int startSourceRowI = blockIdx.x * 32; const int tid = threadIdx.x; const int localNRowIs = min(32, nRowIs-startSourceRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ targetRowIndices[tid] = int(indices[startSourceRowI + tid]); if (targetRowIndices[tid]<0) targetRowIndices[tid] += nTargetRows; if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows) targetRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; } }
3ba842decea5d128e39992335847725f685c2fd6.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "common.h" #include "TH/THHalf.h" #include "THHHalfAutoNumerics.cuh" #include "generic/VolumetricFullConvolution.cu" #include "THHGenerateFloatTypes.h"
3ba842decea5d128e39992335847725f685c2fd6.cu
#include "THCUNN.h" #include "common.h" #include "TH/THHalf.h" #include "THCHalfAutoNumerics.cuh" #include "generic/VolumetricFullConvolution.cu" #include "THCGenerateFloatTypes.h"
011ab589bebddda31c1c23f855e996ad86d15e84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Modified from nullKernelAsync.cu * * Microbenchmark for throughput of asynchronous kernel launch. * * Build with: nvcc -I ../chLib <options> nullKernelAsync.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include "chTimer.h" __global__ void NullKernel() { } int main( int argc, char *argv[] ) { //const int cIterations = 1000000; const int cIterations = 1000000; printf( "Measuring asynchronous launch time... " ); fflush( stdout ); chTimerTimestamp start, stop; chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { hipLaunchKernelGGL(( NullKernel), dim3(1),dim3(1), 0, 0, ); } hipDeviceSynchronize(); chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf( "%.2f us\n", usPerLaunch ); } return 0; }
011ab589bebddda31c1c23f855e996ad86d15e84.cu
/* * * Modified from nullKernelAsync.cu * * Microbenchmark for throughput of asynchronous kernel launch. * * Build with: nvcc -I ../chLib <options> nullKernelAsync.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include "chTimer.h" __global__ void NullKernel() { } int main( int argc, char *argv[] ) { //const int cIterations = 1000000; const int cIterations = 1000000; printf( "Measuring asynchronous launch time... " ); fflush( stdout ); chTimerTimestamp start, stop; chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { NullKernel<<<1,1>>>(); } cudaThreadSynchronize(); chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf( "%.2f us\n", usPerLaunch ); } return 0; }
d0e909aba6e9f2d3fa14b0792e606bb77e33fb87.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "PreInitialize.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; unsigned int *randoms = NULL; hipMalloc(&randoms, XSIZE*YSIZE); int *bestSeen = NULL; hipMalloc(&bestSeen, XSIZE*YSIZE); int *origin = NULL; hipMalloc(&origin, XSIZE*YSIZE); int *mis = NULL; hipMalloc(&mis, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( PreInitialize), dim3(gridBlock),dim3(threadBlock), 0, 0, size,randoms,bestSeen,origin,mis); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( PreInitialize), dim3(gridBlock),dim3(threadBlock), 0, 0, size,randoms,bestSeen,origin,mis); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( PreInitialize), dim3(gridBlock),dim3(threadBlock), 0, 0, size,randoms,bestSeen,origin,mis); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d0e909aba6e9f2d3fa14b0792e606bb77e33fb87.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "PreInitialize.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; unsigned int *randoms = NULL; cudaMalloc(&randoms, XSIZE*YSIZE); int *bestSeen = NULL; cudaMalloc(&bestSeen, XSIZE*YSIZE); int *origin = NULL; cudaMalloc(&origin, XSIZE*YSIZE); int *mis = NULL; cudaMalloc(&mis, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); PreInitialize<<<gridBlock,threadBlock>>>(size,randoms,bestSeen,origin,mis); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { PreInitialize<<<gridBlock,threadBlock>>>(size,randoms,bestSeen,origin,mis); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { PreInitialize<<<gridBlock,threadBlock>>>(size,randoms,bestSeen,origin,mis); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8e3ea8f28265f53a2d803643bf50cfc40a29b1fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sec_min_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){ for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){ int start = offsets[p_id]; int end = offsets[p_id + 1]; for(int plane = threadIdx.x; plane < C; plane += blockDim.x){ float min_val = 1e50; for(int i = start; i < end; i++){ if(inp[i * C + plane] < min_val){ min_val = inp[i * C + plane]; } } out[p_id * C + plane] = min_val; } } }
8e3ea8f28265f53a2d803643bf50cfc40a29b1fc.cu
#include "includes.h" __global__ void sec_min_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){ for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){ int start = offsets[p_id]; int end = offsets[p_id + 1]; for(int plane = threadIdx.x; plane < C; plane += blockDim.x){ float min_val = 1e50; for(int i = start; i < end; i++){ if(inp[i * C + plane] < min_val){ min_val = inp[i * C + plane]; } } out[p_id * C + plane] = min_val; } } }
db7fd5655583a3af7a4570f8bba9d0115b92ad39.hip
// !!! This is a file automatically generated by hipify!!! #ifndef __HIPCC__ # error "A C or C++ compiler has been selected for CUDA" #endif /* Version number components: V=Version, R=Revision, P=Patch Version date components: YYYY=Year, MM=Month, DD=Day */ #if defined(__NVCC__) # define COMPILER_ID "NVIDIA" # if defined(_MSC_VER) # define SIMULATE_ID "MSVC" # endif # if defined(__CUDACC_VER_MAJOR__) # define COMPILER_VERSION_MAJOR DEC(__CUDACC_VER_MAJOR__) # define COMPILER_VERSION_MINOR DEC(__CUDACC_VER_MINOR__) # define COMPILER_VERSION_PATCH DEC(__CUDACC_VER_BUILD__) # endif # if defined(_MSC_VER) /* _MSC_VER = VVRR */ # define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) # define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) # endif /* These compilers are either not known or too old to define an identification macro. Try to identify the platform and guess that it is the native compiler. */ #elif defined(__sgi) # define COMPILER_ID "MIPSpro" #elif defined(__hpux) || defined(__hpua) # define COMPILER_ID "HP" #else /* unknown compiler */ # define COMPILER_ID "" #endif /* Construct the string literal in pieces to prevent the source from getting matched. Store it in a pointer rather than an array because some compilers will just produce instructions to fill the array rather than assigning a pointer to a static array. */ char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]"; #ifdef SIMULATE_ID char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]"; #endif #define STRINGIFY_HELPER(X) #X #define STRINGIFY(X) STRINGIFY_HELPER(X) /* Identify known platforms by name. */ #if defined(__linux) || defined(__linux__) || defined(linux) # define PLATFORM_ID "Linux" #elif defined(__CYGWIN__) # define PLATFORM_ID "Cygwin" #elif defined(__MINGW32__) # define PLATFORM_ID "MinGW" #elif defined(__APPLE__) # define PLATFORM_ID "Darwin" #elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) # define PLATFORM_ID "Windows" #elif defined(__FreeBSD__) || defined(__FreeBSD) # define PLATFORM_ID "FreeBSD" #elif defined(__NetBSD__) || defined(__NetBSD) # define PLATFORM_ID "NetBSD" #elif defined(__OpenBSD__) || defined(__OPENBSD) # define PLATFORM_ID "OpenBSD" #elif defined(__sun) || defined(sun) # define PLATFORM_ID "SunOS" #elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__) # define PLATFORM_ID "AIX" #elif defined(__sgi) || defined(__sgi__) || defined(_SGI) # define PLATFORM_ID "IRIX" #elif defined(__hpux) || defined(__hpux__) # define PLATFORM_ID "HP-UX" #elif defined(__HAIKU__) # define PLATFORM_ID "Haiku" #elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS) # define PLATFORM_ID "BeOS" #elif defined(__QNX__) || defined(__QNXNTO__) # define PLATFORM_ID "QNX" #elif defined(__tru64) || defined(_tru64) || defined(__TRU64__) # define PLATFORM_ID "Tru64" #elif defined(__riscos) || defined(__riscos__) # define PLATFORM_ID "RISCos" #elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__) # define PLATFORM_ID "SINIX" #elif defined(__UNIX_SV__) # define PLATFORM_ID "UNIX_SV" #elif defined(__bsdos__) # define PLATFORM_ID "BSDOS" #elif defined(_MPRAS) || defined(MPRAS) # define PLATFORM_ID "MP-RAS" #elif defined(__osf) || defined(__osf__) # define PLATFORM_ID "OSF1" #elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv) # define PLATFORM_ID "SCO_SV" #elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX) # define PLATFORM_ID "ULTRIX" #elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX) # define PLATFORM_ID "Xenix" #elif defined(__WATCOMC__) # if defined(__LINUX__) # define PLATFORM_ID "Linux" # elif defined(__DOS__) # define PLATFORM_ID "DOS" # elif defined(__OS2__) # define PLATFORM_ID "OS2" # elif defined(__WINDOWS__) # define PLATFORM_ID "Windows3x" # else /* unknown platform */ # define PLATFORM_ID # endif #else /* unknown platform */ # define PLATFORM_ID #endif /* For windows compilers MSVC and Intel we can determine the architecture of the compiler being used. This is because the compilers do not have flags that can change the architecture, but rather depend on which compiler is being used */ #if defined(_WIN32) && defined(_MSC_VER) # if defined(_M_IA64) # define ARCHITECTURE_ID "IA64" # elif defined(_M_X64) || defined(_M_AMD64) # define ARCHITECTURE_ID "x64" # elif defined(_M_IX86) # define ARCHITECTURE_ID "X86" # elif defined(_M_ARM64) # define ARCHITECTURE_ID "ARM64" # elif defined(_M_ARM) # if _M_ARM == 4 # define ARCHITECTURE_ID "ARMV4I" # elif _M_ARM == 5 # define ARCHITECTURE_ID "ARMV5I" # else # define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM) # endif # elif defined(_M_MIPS) # define ARCHITECTURE_ID "MIPS" # elif defined(_M_SH) # define ARCHITECTURE_ID "SHx" # else /* unknown architecture */ # define ARCHITECTURE_ID "" # endif #elif defined(__WATCOMC__) # if defined(_M_I86) # define ARCHITECTURE_ID "I86" # elif defined(_M_IX86) # define ARCHITECTURE_ID "X86" # else /* unknown architecture */ # define ARCHITECTURE_ID "" # endif #elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC) # if defined(__ICCARM__) # define ARCHITECTURE_ID "ARM" # elif defined(__ICCAVR__) # define ARCHITECTURE_ID "AVR" # else /* unknown architecture */ # define ARCHITECTURE_ID "" # endif #else # define ARCHITECTURE_ID #endif /* Convert integer to decimal digit literals. */ #define DEC(n) \ ('0' + (((n) / 10000000)%10)), \ ('0' + (((n) / 1000000)%10)), \ ('0' + (((n) / 100000)%10)), \ ('0' + (((n) / 10000)%10)), \ ('0' + (((n) / 1000)%10)), \ ('0' + (((n) / 100)%10)), \ ('0' + (((n) / 10)%10)), \ ('0' + ((n) % 10)) /* Convert integer to hex digit literals. */ #define HEX(n) \ ('0' + ((n)>>28 & 0xF)), \ ('0' + ((n)>>24 & 0xF)), \ ('0' + ((n)>>20 & 0xF)), \ ('0' + ((n)>>16 & 0xF)), \ ('0' + ((n)>>12 & 0xF)), \ ('0' + ((n)>>8 & 0xF)), \ ('0' + ((n)>>4 & 0xF)), \ ('0' + ((n) & 0xF)) /* Construct a string literal encoding the version number components. */ #ifdef COMPILER_VERSION_MAJOR char const info_version[] = { 'I', 'N', 'F', 'O', ':', 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[', COMPILER_VERSION_MAJOR, # ifdef COMPILER_VERSION_MINOR '.', COMPILER_VERSION_MINOR, # ifdef COMPILER_VERSION_PATCH '.', COMPILER_VERSION_PATCH, # ifdef COMPILER_VERSION_TWEAK '.', COMPILER_VERSION_TWEAK, # endif # endif # endif ']','\0'}; #endif /* Construct a string literal encoding the internal version number. */ #ifdef COMPILER_VERSION_INTERNAL char const info_version_internal[] = { 'I', 'N', 'F', 'O', ':', 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','_', 'i','n','t','e','r','n','a','l','[', COMPILER_VERSION_INTERNAL,']','\0'}; #endif /* Construct a string literal encoding the version number components. */ #ifdef SIMULATE_VERSION_MAJOR char const info_simulate_version[] = { 'I', 'N', 'F', 'O', ':', 's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[', SIMULATE_VERSION_MAJOR, # ifdef SIMULATE_VERSION_MINOR '.', SIMULATE_VERSION_MINOR, # ifdef SIMULATE_VERSION_PATCH '.', SIMULATE_VERSION_PATCH, # ifdef SIMULATE_VERSION_TWEAK '.', SIMULATE_VERSION_TWEAK, # endif # endif # endif ']','\0'}; #endif /* Construct the string literal in pieces to prevent the source from getting matched. Store it in a pointer rather than an array because some compilers will just produce instructions to fill the array rather than assigning a pointer to a static array. */ char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]"; char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]"; const char* info_language_dialect_default = "INFO" ":" "dialect_default[" #if __cplusplus > 201703L "20" #elif __cplusplus >= 201703L "17" #elif __cplusplus >= 201402L "14" #elif __cplusplus >= 201103L "11" #else "98" #endif "]"; /*--------------------------------------------------------------------------*/ int main(int argc, char* argv[]) { int require = 0; require += info_compiler[argc]; require += info_platform[argc]; #ifdef COMPILER_VERSION_MAJOR require += info_version[argc]; #endif #ifdef SIMULATE_ID require += info_simulate[argc]; #endif #ifdef SIMULATE_VERSION_MAJOR require += info_simulate_version[argc]; #endif require += info_language_dialect_default[argc]; (void)argv; return require; }
db7fd5655583a3af7a4570f8bba9d0115b92ad39.cu
#ifndef __CUDACC__ # error "A C or C++ compiler has been selected for CUDA" #endif /* Version number components: V=Version, R=Revision, P=Patch Version date components: YYYY=Year, MM=Month, DD=Day */ #if defined(__NVCC__) # define COMPILER_ID "NVIDIA" # if defined(_MSC_VER) # define SIMULATE_ID "MSVC" # endif # if defined(__CUDACC_VER_MAJOR__) # define COMPILER_VERSION_MAJOR DEC(__CUDACC_VER_MAJOR__) # define COMPILER_VERSION_MINOR DEC(__CUDACC_VER_MINOR__) # define COMPILER_VERSION_PATCH DEC(__CUDACC_VER_BUILD__) # endif # if defined(_MSC_VER) /* _MSC_VER = VVRR */ # define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) # define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) # endif /* These compilers are either not known or too old to define an identification macro. Try to identify the platform and guess that it is the native compiler. */ #elif defined(__sgi) # define COMPILER_ID "MIPSpro" #elif defined(__hpux) || defined(__hpua) # define COMPILER_ID "HP" #else /* unknown compiler */ # define COMPILER_ID "" #endif /* Construct the string literal in pieces to prevent the source from getting matched. Store it in a pointer rather than an array because some compilers will just produce instructions to fill the array rather than assigning a pointer to a static array. */ char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]"; #ifdef SIMULATE_ID char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]"; #endif #define STRINGIFY_HELPER(X) #X #define STRINGIFY(X) STRINGIFY_HELPER(X) /* Identify known platforms by name. */ #if defined(__linux) || defined(__linux__) || defined(linux) # define PLATFORM_ID "Linux" #elif defined(__CYGWIN__) # define PLATFORM_ID "Cygwin" #elif defined(__MINGW32__) # define PLATFORM_ID "MinGW" #elif defined(__APPLE__) # define PLATFORM_ID "Darwin" #elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) # define PLATFORM_ID "Windows" #elif defined(__FreeBSD__) || defined(__FreeBSD) # define PLATFORM_ID "FreeBSD" #elif defined(__NetBSD__) || defined(__NetBSD) # define PLATFORM_ID "NetBSD" #elif defined(__OpenBSD__) || defined(__OPENBSD) # define PLATFORM_ID "OpenBSD" #elif defined(__sun) || defined(sun) # define PLATFORM_ID "SunOS" #elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__) # define PLATFORM_ID "AIX" #elif defined(__sgi) || defined(__sgi__) || defined(_SGI) # define PLATFORM_ID "IRIX" #elif defined(__hpux) || defined(__hpux__) # define PLATFORM_ID "HP-UX" #elif defined(__HAIKU__) # define PLATFORM_ID "Haiku" #elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS) # define PLATFORM_ID "BeOS" #elif defined(__QNX__) || defined(__QNXNTO__) # define PLATFORM_ID "QNX" #elif defined(__tru64) || defined(_tru64) || defined(__TRU64__) # define PLATFORM_ID "Tru64" #elif defined(__riscos) || defined(__riscos__) # define PLATFORM_ID "RISCos" #elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__) # define PLATFORM_ID "SINIX" #elif defined(__UNIX_SV__) # define PLATFORM_ID "UNIX_SV" #elif defined(__bsdos__) # define PLATFORM_ID "BSDOS" #elif defined(_MPRAS) || defined(MPRAS) # define PLATFORM_ID "MP-RAS" #elif defined(__osf) || defined(__osf__) # define PLATFORM_ID "OSF1" #elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv) # define PLATFORM_ID "SCO_SV" #elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX) # define PLATFORM_ID "ULTRIX" #elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX) # define PLATFORM_ID "Xenix" #elif defined(__WATCOMC__) # if defined(__LINUX__) # define PLATFORM_ID "Linux" # elif defined(__DOS__) # define PLATFORM_ID "DOS" # elif defined(__OS2__) # define PLATFORM_ID "OS2" # elif defined(__WINDOWS__) # define PLATFORM_ID "Windows3x" # else /* unknown platform */ # define PLATFORM_ID # endif #else /* unknown platform */ # define PLATFORM_ID #endif /* For windows compilers MSVC and Intel we can determine the architecture of the compiler being used. This is because the compilers do not have flags that can change the architecture, but rather depend on which compiler is being used */ #if defined(_WIN32) && defined(_MSC_VER) # if defined(_M_IA64) # define ARCHITECTURE_ID "IA64" # elif defined(_M_X64) || defined(_M_AMD64) # define ARCHITECTURE_ID "x64" # elif defined(_M_IX86) # define ARCHITECTURE_ID "X86" # elif defined(_M_ARM64) # define ARCHITECTURE_ID "ARM64" # elif defined(_M_ARM) # if _M_ARM == 4 # define ARCHITECTURE_ID "ARMV4I" # elif _M_ARM == 5 # define ARCHITECTURE_ID "ARMV5I" # else # define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM) # endif # elif defined(_M_MIPS) # define ARCHITECTURE_ID "MIPS" # elif defined(_M_SH) # define ARCHITECTURE_ID "SHx" # else /* unknown architecture */ # define ARCHITECTURE_ID "" # endif #elif defined(__WATCOMC__) # if defined(_M_I86) # define ARCHITECTURE_ID "I86" # elif defined(_M_IX86) # define ARCHITECTURE_ID "X86" # else /* unknown architecture */ # define ARCHITECTURE_ID "" # endif #elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC) # if defined(__ICCARM__) # define ARCHITECTURE_ID "ARM" # elif defined(__ICCAVR__) # define ARCHITECTURE_ID "AVR" # else /* unknown architecture */ # define ARCHITECTURE_ID "" # endif #else # define ARCHITECTURE_ID #endif /* Convert integer to decimal digit literals. */ #define DEC(n) \ ('0' + (((n) / 10000000)%10)), \ ('0' + (((n) / 1000000)%10)), \ ('0' + (((n) / 100000)%10)), \ ('0' + (((n) / 10000)%10)), \ ('0' + (((n) / 1000)%10)), \ ('0' + (((n) / 100)%10)), \ ('0' + (((n) / 10)%10)), \ ('0' + ((n) % 10)) /* Convert integer to hex digit literals. */ #define HEX(n) \ ('0' + ((n)>>28 & 0xF)), \ ('0' + ((n)>>24 & 0xF)), \ ('0' + ((n)>>20 & 0xF)), \ ('0' + ((n)>>16 & 0xF)), \ ('0' + ((n)>>12 & 0xF)), \ ('0' + ((n)>>8 & 0xF)), \ ('0' + ((n)>>4 & 0xF)), \ ('0' + ((n) & 0xF)) /* Construct a string literal encoding the version number components. */ #ifdef COMPILER_VERSION_MAJOR char const info_version[] = { 'I', 'N', 'F', 'O', ':', 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[', COMPILER_VERSION_MAJOR, # ifdef COMPILER_VERSION_MINOR '.', COMPILER_VERSION_MINOR, # ifdef COMPILER_VERSION_PATCH '.', COMPILER_VERSION_PATCH, # ifdef COMPILER_VERSION_TWEAK '.', COMPILER_VERSION_TWEAK, # endif # endif # endif ']','\0'}; #endif /* Construct a string literal encoding the internal version number. */ #ifdef COMPILER_VERSION_INTERNAL char const info_version_internal[] = { 'I', 'N', 'F', 'O', ':', 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','_', 'i','n','t','e','r','n','a','l','[', COMPILER_VERSION_INTERNAL,']','\0'}; #endif /* Construct a string literal encoding the version number components. */ #ifdef SIMULATE_VERSION_MAJOR char const info_simulate_version[] = { 'I', 'N', 'F', 'O', ':', 's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[', SIMULATE_VERSION_MAJOR, # ifdef SIMULATE_VERSION_MINOR '.', SIMULATE_VERSION_MINOR, # ifdef SIMULATE_VERSION_PATCH '.', SIMULATE_VERSION_PATCH, # ifdef SIMULATE_VERSION_TWEAK '.', SIMULATE_VERSION_TWEAK, # endif # endif # endif ']','\0'}; #endif /* Construct the string literal in pieces to prevent the source from getting matched. Store it in a pointer rather than an array because some compilers will just produce instructions to fill the array rather than assigning a pointer to a static array. */ char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]"; char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]"; const char* info_language_dialect_default = "INFO" ":" "dialect_default[" #if __cplusplus > 201703L "20" #elif __cplusplus >= 201703L "17" #elif __cplusplus >= 201402L "14" #elif __cplusplus >= 201103L "11" #else "98" #endif "]"; /*--------------------------------------------------------------------------*/ int main(int argc, char* argv[]) { int require = 0; require += info_compiler[argc]; require += info_platform[argc]; #ifdef COMPILER_VERSION_MAJOR require += info_version[argc]; #endif #ifdef SIMULATE_ID require += info_simulate[argc]; #endif #ifdef SIMULATE_VERSION_MAJOR require += info_simulate_version[argc]; #endif require += info_language_dialect_default[argc]; (void)argv; return require; }
182fa52d525337cb65df532c1dd59c87fe42d6a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <time.h> //#define SIZE 1000 using namespace std; __global__ void Convolution1(int *a,int *filter,int *result,int size_a,int size_filter,int size_result) { int i=blockIdx.x; int j=blockIdx.y; if(i<size_result||j<size_result) { for(int k=0;k<size_filter;k++) for(int l=0;l<size_filter;l++) result[i*size_result+j] += filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l]; } } __global__ void Convolution3(int *a,int *filter,int *result,int size_a,int size_filter,int size_result) { int i=blockIdx.x; int j=blockIdx.y; int k=threadIdx.x; int l=threadIdx.y; if(i<size_result||j<size_result||k<size_filter||l<size_filter) { atomicAdd(&result[i*size_result+j],filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l]); } } void Convolution2(int *a,int *filter,int *result,int size_a,int size_filter,int size_result) { for(int i=0;i<size_result;i++) { for(int j=0;j<size_result;j++) for(int k=0;k<size_filter;k++) for(int l=0;l<size_filter;l++) result[i*size_result+j] += filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l]; } } int main() { int *a,*filter,*result,*result_serial,*result_optimal; int size_a,size_filter,size_result; clock_t t; double time_taken; x: printf("\nEnter size of array:"); scanf("%d",&size_a); printf("\nEnter size of filter:"); scanf("%d",&size_filter); if(size_a%2==0||size_filter%2==0) { printf("\nEnter odd numbers for sizes."); goto x; } if((size_a-size_filter)<0) { printf("\nEnter larger matrix size or smaller filter size."); goto x; } size_result=(size_a-size_filter)/2 +1; printf("Size of Matrix after Convolution with stride = (2) will be: %d \n",size_result); hipMallocManaged(&a,size_a*size_a*sizeof(int)); hipMallocManaged(&filter,size_filter*size_filter*sizeof(int)); hipMallocManaged(&result,size_result*size_result*sizeof(int)); hipMallocManaged(&result_optimal,size_result*size_result*sizeof(int)); hipMallocManaged(&result_serial,size_result*size_result*sizeof(int)); srand(0); for(int i=0;i<size_a*size_a;i++) { a[i]=rand()%100; //printf("Enter a[%d]",i); //scanf("%d",&a[i]); } for(int i=0;i<size_filter*size_filter;i++) { filter[i]=rand()%100; //printf("Enter filter[%d]",i); //scanf("%d",&filter[i]); } for(int i=0;i<size_result*size_result;i++) { result[i]=0; result_serial[i]=0; result_optimal[i]=0; } dim3 res(size_result,size_result); dim3 fil(size_filter,size_filter); t=clock(); hipLaunchKernelGGL(( Convolution1), dim3(res),dim3(1), 0, 0, a,filter,result,size_a,size_filter,size_result); hipDeviceSynchronize(); t=clock()-t; time_taken=((double)t)/CLOCKS_PER_SEC; printf("Time for Convolution with %d threads: %f \n",size_result*size_result,time_taken); t=clock(); hipLaunchKernelGGL(( Convolution3), dim3(res),dim3(fil), 0, 0, a,filter,result_optimal,size_a,size_filter,size_result); hipDeviceSynchronize(); t=clock()-t; time_taken=((double)t)/CLOCKS_PER_SEC; printf("Time for Convolution with %d x %d threads: %f \n",size_result*size_result,size_filter*size_filter,time_taken); t=clock(); Convolution2(a,filter,result_serial,size_a,size_filter,size_result); t=clock()-t; time_taken=((double)t)/CLOCKS_PER_SEC; printf("Time for Convolution using serial:%f \n",time_taken); printf("\nSanity Check:"); if(size_filter*size_filter>11) for(int i=0;i<10;i++) { printf("\nresult[%d]=%d \nresult_serial[%d]=%d \nresult_optimal[%d]=%d\n",i,result[i],i,result_serial[i],i,result_optimal[i]); } else for(int i=0;i<size_filter*size_filter;i++) { printf("\nresult[%d]=%d \nresult_serial[%d]=%d \nresult_optimal[%d]=%d\n",i,result[i],i,result_serial[i],i,result_optimal[i]); } hipFree(a); hipFree(filter); hipFree(result); hipFree(result_serial); return 0; } /***********************OUTPUT************************* [user10@linux-teslagpu ~]$ ./a.out Enter size of array:10001 Enter size of filter:3 Size of Matrix after Convolution with stride = (2) will be: 5000 Time for Convolution with 25000000 threads: 0.000000 Time for Convolution using serial:1.990000 On Gtx 1050: E:\!KUNAL\MIT\BE\HPC\MiniProject>a.exe Enter size of array:10001 Enter size of filter:3 Size of Matrix after Convolution with stride = (2) will be: 5000 Time for Convolution with 25000000 threads: 2.210000 Time for Convolution with 25000000 x 9 threads: 0.134000 Time for Convolution using serial:3.210000 Sanity Check: result[0]=12792 result_serial[0]=12792 result_optimal[0]=12792 result[1]=14060 result_serial[1]=14060 result_optimal[1]=14060 result[2]=20138 result_serial[2]=20138 result_optimal[2]=20138 result[3]=19328 result_serial[3]=19328 result_optimal[3]=19328 result[4]=20288 result_serial[4]=20288 result_optimal[4]=20288 result[5]=14252 result_serial[5]=14252 result_optimal[5]=14252 result[6]=16804 result_serial[6]=16804 result_optimal[6]=16804 result[7]=20854 result_serial[7]=20854 result_optimal[7]=20854 result[8]=24886 result_serial[8]=24886 result_optimal[8]=24886 ******************************************************/
182fa52d525337cb65df532c1dd59c87fe42d6a7.cu
#include "cuda_runtime.h" #include <stdio.h> #include <time.h> //#define SIZE 1000 using namespace std; __global__ void Convolution1(int *a,int *filter,int *result,int size_a,int size_filter,int size_result) { int i=blockIdx.x; int j=blockIdx.y; if(i<size_result||j<size_result) { for(int k=0;k<size_filter;k++) for(int l=0;l<size_filter;l++) result[i*size_result+j] += filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l]; } } __global__ void Convolution3(int *a,int *filter,int *result,int size_a,int size_filter,int size_result) { int i=blockIdx.x; int j=blockIdx.y; int k=threadIdx.x; int l=threadIdx.y; if(i<size_result||j<size_result||k<size_filter||l<size_filter) { atomicAdd(&result[i*size_result+j],filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l]); } } void Convolution2(int *a,int *filter,int *result,int size_a,int size_filter,int size_result) { for(int i=0;i<size_result;i++) { for(int j=0;j<size_result;j++) for(int k=0;k<size_filter;k++) for(int l=0;l<size_filter;l++) result[i*size_result+j] += filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l]; } } int main() { int *a,*filter,*result,*result_serial,*result_optimal; int size_a,size_filter,size_result; clock_t t; double time_taken; x: printf("\nEnter size of array:"); scanf("%d",&size_a); printf("\nEnter size of filter:"); scanf("%d",&size_filter); if(size_a%2==0||size_filter%2==0) { printf("\nEnter odd numbers for sizes."); goto x; } if((size_a-size_filter)<0) { printf("\nEnter larger matrix size or smaller filter size."); goto x; } size_result=(size_a-size_filter)/2 +1; printf("Size of Matrix after Convolution with stride = (2) will be: %d \n",size_result); cudaMallocManaged(&a,size_a*size_a*sizeof(int)); cudaMallocManaged(&filter,size_filter*size_filter*sizeof(int)); cudaMallocManaged(&result,size_result*size_result*sizeof(int)); cudaMallocManaged(&result_optimal,size_result*size_result*sizeof(int)); cudaMallocManaged(&result_serial,size_result*size_result*sizeof(int)); srand(0); for(int i=0;i<size_a*size_a;i++) { a[i]=rand()%100; //printf("Enter a[%d]",i); //scanf("%d",&a[i]); } for(int i=0;i<size_filter*size_filter;i++) { filter[i]=rand()%100; //printf("Enter filter[%d]",i); //scanf("%d",&filter[i]); } for(int i=0;i<size_result*size_result;i++) { result[i]=0; result_serial[i]=0; result_optimal[i]=0; } dim3 res(size_result,size_result); dim3 fil(size_filter,size_filter); t=clock(); Convolution1<<<res,1>>>(a,filter,result,size_a,size_filter,size_result); cudaDeviceSynchronize(); t=clock()-t; time_taken=((double)t)/CLOCKS_PER_SEC; printf("Time for Convolution with %d threads: %f \n",size_result*size_result,time_taken); t=clock(); Convolution3<<<res,fil>>>(a,filter,result_optimal,size_a,size_filter,size_result); cudaDeviceSynchronize(); t=clock()-t; time_taken=((double)t)/CLOCKS_PER_SEC; printf("Time for Convolution with %d x %d threads: %f \n",size_result*size_result,size_filter*size_filter,time_taken); t=clock(); Convolution2(a,filter,result_serial,size_a,size_filter,size_result); t=clock()-t; time_taken=((double)t)/CLOCKS_PER_SEC; printf("Time for Convolution using serial:%f \n",time_taken); printf("\nSanity Check:"); if(size_filter*size_filter>11) for(int i=0;i<10;i++) { printf("\nresult[%d]=%d \nresult_serial[%d]=%d \nresult_optimal[%d]=%d\n",i,result[i],i,result_serial[i],i,result_optimal[i]); } else for(int i=0;i<size_filter*size_filter;i++) { printf("\nresult[%d]=%d \nresult_serial[%d]=%d \nresult_optimal[%d]=%d\n",i,result[i],i,result_serial[i],i,result_optimal[i]); } cudaFree(a); cudaFree(filter); cudaFree(result); cudaFree(result_serial); return 0; } /***********************OUTPUT************************* [user10@linux-teslagpu ~]$ ./a.out Enter size of array:10001 Enter size of filter:3 Size of Matrix after Convolution with stride = (2) will be: 5000 Time for Convolution with 25000000 threads: 0.000000 Time for Convolution using serial:1.990000 On Gtx 1050: E:\!KUNAL\MIT\BE\HPC\MiniProject>a.exe Enter size of array:10001 Enter size of filter:3 Size of Matrix after Convolution with stride = (2) will be: 5000 Time for Convolution with 25000000 threads: 2.210000 Time for Convolution with 25000000 x 9 threads: 0.134000 Time for Convolution using serial:3.210000 Sanity Check: result[0]=12792 result_serial[0]=12792 result_optimal[0]=12792 result[1]=14060 result_serial[1]=14060 result_optimal[1]=14060 result[2]=20138 result_serial[2]=20138 result_optimal[2]=20138 result[3]=19328 result_serial[3]=19328 result_optimal[3]=19328 result[4]=20288 result_serial[4]=20288 result_optimal[4]=20288 result[5]=14252 result_serial[5]=14252 result_optimal[5]=14252 result[6]=16804 result_serial[6]=16804 result_optimal[6]=16804 result[7]=20854 result_serial[7]=20854 result_optimal[7]=20854 result[8]=24886 result_serial[8]=24886 result_optimal[8]=24886 ******************************************************/
1d206d8b669dd1f02c57717c8588f4dd306eb280.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/cross_entropy_op.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float log_threshold, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { Ydata[i] = -logf(max(Xdata[i * D + labeldata[i]], log_threshold)); } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float* dYdata, const float log_threshold, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = - dYdata[i] / max(Xdata[idx], log_threshold); } } } // namespace template <> bool LabelCrossEntropyOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto* Y = Output(0); CAFFE_DCHECK_EQ(X.ndim(), 2); int N = X.dim32(0); int D = X.dim32(1); CAFFE_DCHECK_EQ(label.ndim(), 1); CAFFE_DCHECK_EQ(label.dim32(0), N); Y->Reshape(vector<TIndex>(1, N)); hipLaunchKernelGGL(( LabelCrossEntropyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), label.data<int>(), kLOG_THRESHOLD(), Y->mutable_data<float>()); return true; } template <> bool LabelCrossEntropyGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto& dY = Input(2); auto* dX = Output(0); CAFFE_DCHECK_EQ(X.ndim(), 2); int N = X.dim32(0); int D = X.dim32(1); CAFFE_DCHECK_EQ(label.ndim(), 1); CAFFE_DCHECK_EQ(label.dim32(0), N); CAFFE_DCHECK_EQ(dY.ndim(), 1); CAFFE_DCHECK_EQ(dY.dim32(0), N); dX->ReshapeLike(X); math::Set<float, CUDAContext>( dX->size(), 0.f, dX->mutable_data<float>(), &context_); hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), label.data<int>(), dY.data<float>(), kLOG_THRESHOLD(), dX->mutable_data<float>()); return true; } namespace { __global__ void MakeTwoClassKernel( const int N, const float* Xdata, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { Ydata[i * 2] = 1.0 - Xdata[i]; Ydata[i * 2 + 1] = Xdata[i]; } } __global__ void MakeTwoClassGradientKernel( const int N, const float* dYdata, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { dXdata[i] = dYdata[i * 2 + 1] - dYdata[i * 2]; } } } // namespace template <> bool MakeTwoClassOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); auto shape = X.dims(); shape.push_back(2); CAFFE_CHECK_LT(X.size(), std::numeric_limits<int>::max() / 2); Y->Reshape(shape); int N = X.size(); hipLaunchKernelGGL(( MakeTwoClassKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, X.data<float>(), Y->mutable_data<float>()); return true; } template <> bool MakeTwoClassGradientOp<float, CUDAContext>::RunOnDevice() { auto& dY = Input(0); auto* dX = Output(0); auto shape = dY.dims(); CAFFE_CHECK_GE(shape.size(), 1); CAFFE_CHECK_EQ(shape.back(), 2); shape.pop_back(); CAFFE_CHECK_LT(dY.size(), std::numeric_limits<int>::max()); dX->Reshape(shape); int N = dX->size(); hipLaunchKernelGGL(( MakeTwoClassGradientKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, dY.data<float>(), dX->mutable_data<float>()); return true; } namespace { REGISTER_CUDA_OPERATOR(LabelCrossEntropy, LabelCrossEntropyOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LabelCrossEntropyGradient, LabelCrossEntropyGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClass, MakeTwoClassOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClassGradient, MakeTwoClassGradientOp<float, CUDAContext>); } // namespace } // namespace caffe2
1d206d8b669dd1f02c57717c8588f4dd306eb280.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/cross_entropy_op.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float log_threshold, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { Ydata[i] = -logf(max(Xdata[i * D + labeldata[i]], log_threshold)); } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float* dYdata, const float log_threshold, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = - dYdata[i] / max(Xdata[idx], log_threshold); } } } // namespace template <> bool LabelCrossEntropyOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto* Y = Output(0); CAFFE_DCHECK_EQ(X.ndim(), 2); int N = X.dim32(0); int D = X.dim32(1); CAFFE_DCHECK_EQ(label.ndim(), 1); CAFFE_DCHECK_EQ(label.dim32(0), N); Y->Reshape(vector<TIndex>(1, N)); LabelCrossEntropyKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, X.data<float>(), label.data<int>(), kLOG_THRESHOLD(), Y->mutable_data<float>()); return true; } template <> bool LabelCrossEntropyGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto& dY = Input(2); auto* dX = Output(0); CAFFE_DCHECK_EQ(X.ndim(), 2); int N = X.dim32(0); int D = X.dim32(1); CAFFE_DCHECK_EQ(label.ndim(), 1); CAFFE_DCHECK_EQ(label.dim32(0), N); CAFFE_DCHECK_EQ(dY.ndim(), 1); CAFFE_DCHECK_EQ(dY.dim32(0), N); dX->ReshapeLike(X); math::Set<float, CUDAContext>( dX->size(), 0.f, dX->mutable_data<float>(), &context_); LabelCrossEntropyGradientKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, X.data<float>(), label.data<int>(), dY.data<float>(), kLOG_THRESHOLD(), dX->mutable_data<float>()); return true; } namespace { __global__ void MakeTwoClassKernel( const int N, const float* Xdata, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { Ydata[i * 2] = 1.0 - Xdata[i]; Ydata[i * 2 + 1] = Xdata[i]; } } __global__ void MakeTwoClassGradientKernel( const int N, const float* dYdata, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { dXdata[i] = dYdata[i * 2 + 1] - dYdata[i * 2]; } } } // namespace template <> bool MakeTwoClassOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); auto shape = X.dims(); shape.push_back(2); CAFFE_CHECK_LT(X.size(), std::numeric_limits<int>::max() / 2); Y->Reshape(shape); int N = X.size(); MakeTwoClassKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, X.data<float>(), Y->mutable_data<float>()); return true; } template <> bool MakeTwoClassGradientOp<float, CUDAContext>::RunOnDevice() { auto& dY = Input(0); auto* dX = Output(0); auto shape = dY.dims(); CAFFE_CHECK_GE(shape.size(), 1); CAFFE_CHECK_EQ(shape.back(), 2); shape.pop_back(); CAFFE_CHECK_LT(dY.size(), std::numeric_limits<int>::max()); dX->Reshape(shape); int N = dX->size(); MakeTwoClassGradientKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, dY.data<float>(), dX->mutable_data<float>()); return true; } namespace { REGISTER_CUDA_OPERATOR(LabelCrossEntropy, LabelCrossEntropyOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LabelCrossEntropyGradient, LabelCrossEntropyGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClass, MakeTwoClassOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClassGradient, MakeTwoClassGradientOp<float, CUDAContext>); } // namespace } // namespace caffe2
f1005cba9eff15b3eae725f24086a5b7d00402b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdexcept> #include <utility> #include <iostream> #include <hiprand/hiprand_kernel.h> #include "cupy_distributions.cuh" struct rk_state { __device__ virtual uint32_t rk_int() { return 0; } __device__ virtual double rk_double() { return 0.0; } __device__ virtual double rk_normal() { return 0.0; } __device__ virtual float rk_normal_float() { return 0.0; } }; template<typename CURAND_TYPE> struct curand_pseudo_state: rk_state { // Valid for XORWOW and MRG32k3a CURAND_TYPE* _state; int _id; __device__ curand_pseudo_state(int id, intptr_t state) { _state = reinterpret_cast<CURAND_TYPE*>(state) + id; _id = id; } __device__ virtual uint32_t rk_int() { return hiprand(_state); } __device__ virtual double rk_double() { // Curand returns (0, 1] while the functions // below rely on [0, 1) double r = hiprand_uniform(_state); if (r >= 1.0) { r = 0.0; } return r; } __device__ virtual double rk_normal() { return hiprand_normal_double(_state); } __device__ virtual float rk_normal_float() { return hiprand_normal(_state); } }; // This design is the same as the dtypes one template <typename F, typename... Ts> void generator_dispatcher(int generator_id, F f, Ts&&... args) { switch(generator_id) { case CURAND_XOR_WOW: return f.template operator()<curand_pseudo_state<hiprandState_t>>(std::forward<Ts>(args)...); case CURAND_MRG32k3a: return f.template operator()<curand_pseudo_state<curandStateMRG32k3a>>(std::forward<Ts>(args)...); case CURAND_PHILOX_4x32_10: return f.template operator()<curand_pseudo_state<hiprandStatePhilox4_32_10_t>>(std::forward<Ts>(args)...); default: throw std::runtime_error("Unknown random generator"); } } template<typename T> __global__ void init_curand(intptr_t state, uint64_t seed, ssize_t size) { int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ T curand_state(id, state); if (id < size) { hiprand_init(seed, id, 0, curand_state._state); } } struct initialize_launcher { initialize_launcher(ssize_t size, hipStream_t stream) : _size(size), _stream(stream) { } template<typename T, typename... Args> void operator()(Args&&... args) { int tpb = 256; int bpg = (_size + tpb - 1) / tpb; hipLaunchKernelGGL(( init_curand<T>), dim3(bpg), dim3(tpb), 0, _stream, std::forward<Args>(args)...); } ssize_t _size; hipStream_t _stream; }; void init_curand_generator(int generator, intptr_t state_ptr, uint64_t seed, ssize_t size, intptr_t stream) { // state_ptr is a device ptr initialize_launcher launcher(size, reinterpret_cast<hipStream_t>(stream)); generator_dispatcher(generator, launcher, state_ptr, seed, size); } __device__ double rk_standard_exponential(rk_state* state) { /* We use -log(1-U) since U is [0, 1) */ return -log(1.0 - state->rk_double()); } __device__ double rk_standard_normal(rk_state* state) { return state->rk_normal(); } __device__ float rk_standard_normal_float(rk_state* state) { return state->rk_normal_float(); } __device__ double rk_standard_gamma(rk_state* state, double shape) { double b, c; double U, V, X, Y; if (shape == 1.0) { return rk_standard_exponential(state); } else if (shape < 1.0) { for (;;) { U = state->rk_double(); V = rk_standard_exponential(state); if (U <= 1.0 - shape) { X = pow(U, 1./shape); if (X <= V) { return X; } } else { Y = -log((1-U)/shape); X = pow(1.0 - shape + shape*Y, 1./shape); if (X <= (V + Y)) { return X; } } } } else { b = shape - 1./3.; c = 1./sqrt(9*b); for (;;) { do { X = state->rk_normal(); V = 1.0 + c*X; } while (V <= 0.0); V = V*V*V; U = state->rk_double(); if (U < 1.0 - 0.0331*(X*X)*(X*X)) return (b*V); if (log(U) < 0.5*X*X + b*(1. - V + log(V))) return (b*V); } } } __device__ double rk_beta(rk_state* state, double a, double b) { double Ga, Gb; if ((a <= 1.0) && (b <= 1.0)) { double U, V, X, Y; /* Use Johnk's algorithm */ while (1) { U = state->rk_double(); V = state->rk_double(); X = pow(U, 1.0/a); Y = pow(V, 1.0/b); if ((X + Y) <= 1.0) { if (X +Y > 0) { return X / (X + Y); } else { double logX = log(U) / a; double logY = log(V) / b; double logM = logX > logY ? logX : logY; logX -= logM; logY -= logM; return exp(logX - log(exp(logX) + exp(logY))); } } } } else { Ga = rk_standard_gamma(state, a); Gb = rk_standard_gamma(state, b); return Ga/(Ga + Gb); } } __device__ uint32_t rk_raw(rk_state* state) { return state->rk_int(); } __device__ uint32_t rk_interval_32(rk_state* state, uint32_t mx, uint32_t mask) { uint32_t sampled = state->rk_int() & mask; while(sampled > mx) { sampled = state->rk_int() & mask; } return sampled; } __device__ uint64_t rk_interval_64(rk_state* state, uint64_t mx, uint64_t mask) { uint32_t hi= state->rk_int(); uint32_t lo= state->rk_int(); uint64_t sampled = (static_cast<uint64_t>(hi) << 32 | lo) & mask; while(sampled > mx) { hi= state->rk_int(); lo= state->rk_int(); sampled = (static_cast<uint64_t>(hi) << 32 | lo) & mask; } return sampled; } struct raw_functor { template<typename... Args> __device__ uint32_t operator () (Args&&... args) { return rk_raw(args...); } }; struct interval_32_functor { template<typename... Args> __device__ uint32_t operator () (Args&&... args) { return rk_interval_32(args...); } }; struct interval_64_functor { template<typename... Args> __device__ uint64_t operator () (Args&&... args) { return rk_interval_64(args...); } }; struct beta_functor { template<typename... Args> __device__ double operator () (Args&&... args) { return rk_beta(args...); } }; // There are several errors when trying to do this a full template struct exponential_functor { template<typename... Args> __device__ double operator () (Args&&... args) { return rk_standard_exponential(args...); } }; struct standard_normal_functor { template<typename... Args> __device__ double operator () (Args&&... args) { return rk_standard_normal(args...); } }; struct standard_normal_float_functor { template<typename... Args> __device__ float operator () (Args&&... args) { return rk_standard_normal_float(args...); } }; template<typename F, typename T, typename R, typename... Args> __global__ void execute_dist(intptr_t state, intptr_t out, ssize_t size, Args... args) { int id = threadIdx.x + blockIdx.x * blockDim.x; R* out_ptr = reinterpret_cast<R*>(out); if (id < size) { T random(id, state); F func; out_ptr[id] = func(&random, std::forward<Args>(args)...); } return; } template <typename F, typename R> struct kernel_launcher { kernel_launcher(ssize_t size, hipStream_t stream) : _size(size), _stream(stream) { } template<typename T, typename... Args> void operator()(Args&&... args) { int tpb = 256; int bpg = (_size + tpb - 1) / tpb; hipLaunchKernelGGL(( execute_dist<F, T, R>), dim3(bpg), dim3(tpb), 0, _stream, std::forward<Args>(args)...); } ssize_t _size; hipStream_t _stream; }; //These functions will take the generator_id as a parameter void raw(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream) { kernel_launcher<raw_functor, int32_t> launcher(size, reinterpret_cast<hipStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size); } //These functions will take the generator_id as a parameter void interval_32(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream, int32_t mx, int32_t mask) { kernel_launcher<interval_32_functor, int32_t> launcher(size, reinterpret_cast<hipStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size, static_cast<uint32_t>(mx), static_cast<uint32_t>(mask)); } void interval_64(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream, int64_t mx, int64_t mask) { kernel_launcher<interval_64_functor, int64_t> launcher(size, reinterpret_cast<hipStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size, static_cast<uint64_t>(mx), static_cast<uint64_t>(mask)); } void beta(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream, double a, double b) { kernel_launcher<beta_functor, double> launcher(size, reinterpret_cast<hipStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size, a, b); } void exponential(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream) { kernel_launcher<exponential_functor, double> launcher(size, reinterpret_cast<hipStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size); } void standard_normal(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream) { kernel_launcher<standard_normal_functor, double> launcher(size, reinterpret_cast<hipStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size); } void standard_normal_float(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream) { kernel_launcher<standard_normal_float_functor, float> launcher(size, reinterpret_cast<hipStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size); }
f1005cba9eff15b3eae725f24086a5b7d00402b8.cu
#include <stdio.h> #include <stdexcept> #include <utility> #include <iostream> #include <curand_kernel.h> #include "cupy_distributions.cuh" struct rk_state { __device__ virtual uint32_t rk_int() { return 0; } __device__ virtual double rk_double() { return 0.0; } __device__ virtual double rk_normal() { return 0.0; } __device__ virtual float rk_normal_float() { return 0.0; } }; template<typename CURAND_TYPE> struct curand_pseudo_state: rk_state { // Valid for XORWOW and MRG32k3a CURAND_TYPE* _state; int _id; __device__ curand_pseudo_state(int id, intptr_t state) { _state = reinterpret_cast<CURAND_TYPE*>(state) + id; _id = id; } __device__ virtual uint32_t rk_int() { return curand(_state); } __device__ virtual double rk_double() { // Curand returns (0, 1] while the functions // below rely on [0, 1) double r = curand_uniform(_state); if (r >= 1.0) { r = 0.0; } return r; } __device__ virtual double rk_normal() { return curand_normal_double(_state); } __device__ virtual float rk_normal_float() { return curand_normal(_state); } }; // This design is the same as the dtypes one template <typename F, typename... Ts> void generator_dispatcher(int generator_id, F f, Ts&&... args) { switch(generator_id) { case CURAND_XOR_WOW: return f.template operator()<curand_pseudo_state<curandState>>(std::forward<Ts>(args)...); case CURAND_MRG32k3a: return f.template operator()<curand_pseudo_state<curandStateMRG32k3a>>(std::forward<Ts>(args)...); case CURAND_PHILOX_4x32_10: return f.template operator()<curand_pseudo_state<curandStatePhilox4_32_10_t>>(std::forward<Ts>(args)...); default: throw std::runtime_error("Unknown random generator"); } } template<typename T> __global__ void init_curand(intptr_t state, uint64_t seed, ssize_t size) { int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ T curand_state(id, state); if (id < size) { curand_init(seed, id, 0, curand_state._state); } } struct initialize_launcher { initialize_launcher(ssize_t size, cudaStream_t stream) : _size(size), _stream(stream) { } template<typename T, typename... Args> void operator()(Args&&... args) { int tpb = 256; int bpg = (_size + tpb - 1) / tpb; init_curand<T><<<bpg, tpb, 0, _stream>>>(std::forward<Args>(args)...); } ssize_t _size; cudaStream_t _stream; }; void init_curand_generator(int generator, intptr_t state_ptr, uint64_t seed, ssize_t size, intptr_t stream) { // state_ptr is a device ptr initialize_launcher launcher(size, reinterpret_cast<cudaStream_t>(stream)); generator_dispatcher(generator, launcher, state_ptr, seed, size); } __device__ double rk_standard_exponential(rk_state* state) { /* We use -log(1-U) since U is [0, 1) */ return -log(1.0 - state->rk_double()); } __device__ double rk_standard_normal(rk_state* state) { return state->rk_normal(); } __device__ float rk_standard_normal_float(rk_state* state) { return state->rk_normal_float(); } __device__ double rk_standard_gamma(rk_state* state, double shape) { double b, c; double U, V, X, Y; if (shape == 1.0) { return rk_standard_exponential(state); } else if (shape < 1.0) { for (;;) { U = state->rk_double(); V = rk_standard_exponential(state); if (U <= 1.0 - shape) { X = pow(U, 1./shape); if (X <= V) { return X; } } else { Y = -log((1-U)/shape); X = pow(1.0 - shape + shape*Y, 1./shape); if (X <= (V + Y)) { return X; } } } } else { b = shape - 1./3.; c = 1./sqrt(9*b); for (;;) { do { X = state->rk_normal(); V = 1.0 + c*X; } while (V <= 0.0); V = V*V*V; U = state->rk_double(); if (U < 1.0 - 0.0331*(X*X)*(X*X)) return (b*V); if (log(U) < 0.5*X*X + b*(1. - V + log(V))) return (b*V); } } } __device__ double rk_beta(rk_state* state, double a, double b) { double Ga, Gb; if ((a <= 1.0) && (b <= 1.0)) { double U, V, X, Y; /* Use Johnk's algorithm */ while (1) { U = state->rk_double(); V = state->rk_double(); X = pow(U, 1.0/a); Y = pow(V, 1.0/b); if ((X + Y) <= 1.0) { if (X +Y > 0) { return X / (X + Y); } else { double logX = log(U) / a; double logY = log(V) / b; double logM = logX > logY ? logX : logY; logX -= logM; logY -= logM; return exp(logX - log(exp(logX) + exp(logY))); } } } } else { Ga = rk_standard_gamma(state, a); Gb = rk_standard_gamma(state, b); return Ga/(Ga + Gb); } } __device__ uint32_t rk_raw(rk_state* state) { return state->rk_int(); } __device__ uint32_t rk_interval_32(rk_state* state, uint32_t mx, uint32_t mask) { uint32_t sampled = state->rk_int() & mask; while(sampled > mx) { sampled = state->rk_int() & mask; } return sampled; } __device__ uint64_t rk_interval_64(rk_state* state, uint64_t mx, uint64_t mask) { uint32_t hi= state->rk_int(); uint32_t lo= state->rk_int(); uint64_t sampled = (static_cast<uint64_t>(hi) << 32 | lo) & mask; while(sampled > mx) { hi= state->rk_int(); lo= state->rk_int(); sampled = (static_cast<uint64_t>(hi) << 32 | lo) & mask; } return sampled; } struct raw_functor { template<typename... Args> __device__ uint32_t operator () (Args&&... args) { return rk_raw(args...); } }; struct interval_32_functor { template<typename... Args> __device__ uint32_t operator () (Args&&... args) { return rk_interval_32(args...); } }; struct interval_64_functor { template<typename... Args> __device__ uint64_t operator () (Args&&... args) { return rk_interval_64(args...); } }; struct beta_functor { template<typename... Args> __device__ double operator () (Args&&... args) { return rk_beta(args...); } }; // There are several errors when trying to do this a full template struct exponential_functor { template<typename... Args> __device__ double operator () (Args&&... args) { return rk_standard_exponential(args...); } }; struct standard_normal_functor { template<typename... Args> __device__ double operator () (Args&&... args) { return rk_standard_normal(args...); } }; struct standard_normal_float_functor { template<typename... Args> __device__ float operator () (Args&&... args) { return rk_standard_normal_float(args...); } }; template<typename F, typename T, typename R, typename... Args> __global__ void execute_dist(intptr_t state, intptr_t out, ssize_t size, Args... args) { int id = threadIdx.x + blockIdx.x * blockDim.x; R* out_ptr = reinterpret_cast<R*>(out); if (id < size) { T random(id, state); F func; out_ptr[id] = func(&random, std::forward<Args>(args)...); } return; } template <typename F, typename R> struct kernel_launcher { kernel_launcher(ssize_t size, cudaStream_t stream) : _size(size), _stream(stream) { } template<typename T, typename... Args> void operator()(Args&&... args) { int tpb = 256; int bpg = (_size + tpb - 1) / tpb; execute_dist<F, T, R><<<bpg, tpb, 0, _stream>>>(std::forward<Args>(args)...); } ssize_t _size; cudaStream_t _stream; }; //These functions will take the generator_id as a parameter void raw(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream) { kernel_launcher<raw_functor, int32_t> launcher(size, reinterpret_cast<cudaStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size); } //These functions will take the generator_id as a parameter void interval_32(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream, int32_t mx, int32_t mask) { kernel_launcher<interval_32_functor, int32_t> launcher(size, reinterpret_cast<cudaStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size, static_cast<uint32_t>(mx), static_cast<uint32_t>(mask)); } void interval_64(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream, int64_t mx, int64_t mask) { kernel_launcher<interval_64_functor, int64_t> launcher(size, reinterpret_cast<cudaStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size, static_cast<uint64_t>(mx), static_cast<uint64_t>(mask)); } void beta(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream, double a, double b) { kernel_launcher<beta_functor, double> launcher(size, reinterpret_cast<cudaStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size, a, b); } void exponential(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream) { kernel_launcher<exponential_functor, double> launcher(size, reinterpret_cast<cudaStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size); } void standard_normal(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream) { kernel_launcher<standard_normal_functor, double> launcher(size, reinterpret_cast<cudaStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size); } void standard_normal_float(int generator, intptr_t state, intptr_t out, ssize_t size, intptr_t stream) { kernel_launcher<standard_normal_float_functor, float> launcher(size, reinterpret_cast<cudaStream_t>(stream)); generator_dispatcher(generator, launcher, state, out, size); }
6cec241d3cb5d7e73d51efa8f6374346bd43a09f.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "assert.h" #include "matx.h" #include "test_types.h" #include "utilities.h" #include "gtest/gtest.h" using namespace matx; /* NOTE: CUTLASS tests are disabled for now. The compile times are too long at * the moment */ template <typename T> class MatMulTest : public ::testing::Test { protected: void SetUp() override { CheckTestTensorCoreTypeSupport<T>(); pb = std::make_unique<detail::MatXPybind>(); // Half precision needs a bit more // tolerance when compared to fp32 if constexpr (is_complex_half_v<T> || is_matx_half_v<T>) { thresh = 0.5f; } } void TearDown() { pb.reset(); } std::unique_ptr<detail::MatXPybind> pb; float thresh = 0.01f; }; template <typename TensorType> class MatMulTestFloatTypes : public MatMulTest<TensorType> { }; template <typename TensorType> class MatMulTestFloatNonHalfTypes : public MatMulTest<TensorType> { }; template <typename TensorType> class MatMulTestFloatNonComplexTypes : public MatMulTest<TensorType> { }; TYPED_TEST_SUITE(MatMulTestFloatTypes, MatXFloatTypes); TYPED_TEST_SUITE(MatMulTestFloatNonHalfTypes, MatXFloatNonHalfTypes); TYPED_TEST_SUITE(MatMulTestFloatNonComplexTypes, MatXFloatNonComplexTypes); template <typename T> struct float_to_complex { using type = cuda::std::complex<T>; }; template <> struct float_to_complex<matxFp16> { using type = matxFp16Complex; }; template <> struct float_to_complex<matxBf16> { using type = matxBf16Complex; }; template <typename T> using float_to_complex_t = typename float_to_complex<T>::type; TYPED_TEST(MatMulTestFloatTypes, SmallRect) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); // example-begin matmul-test-1 // Perform the GEMM C = A*B (c = matmul(a, b)).run(); // example-end matmul-test-1 MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, SmallRectATranspose) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; tensor_t<TypeParam, 2> a{{k, m}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run_a_transpose", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); // example-begin matmul-test-2 // Perform the GEMM C = A^T * B auto at = a.PermuteMatrix(); (c = matmul(at, b)).run(); // example-end matmul-test-2 MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, SmallRectBTranspose) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{n, k}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run_b_transpose", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); // example-begin matmul-test-3 // Perform the GEMM C = A * B^T auto bt = b.PermuteMatrix(); (c = matmul(a, bt)).run(); // example-end matmul-test-3 MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatNonHalfTypes, SmallRectCTranspose) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{n, m}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); auto ct = transpose_matrix(c); (ct = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, ct, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, SmallRectUserPointer) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; TypeParam *ap, *bp, *cp; hipMallocManaged(&ap, m*k*sizeof(TypeParam)); hipMallocManaged(&bp, k*n*sizeof(TypeParam)); hipMallocManaged(&cp, m*n*sizeof(TypeParam)); auto a = make_tensor<TypeParam, 2>(ap, {m, k},false); auto b = make_tensor<TypeParam, 2>(bp, {k, n},false); auto c = make_tensor<TypeParam, 2>(cp, {m, n},false); this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (c = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); hipFree(ap); hipFree(bp); hipFree(cp); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, DISABLED_SmallRectTranspose) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; auto at = a.Permute({1,0}); auto bt = b.Permute({1,0}); auto ct = c.Permute({1,0}); this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run_transpose", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (ct = matmul(bt, at)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, ct, "c", 0.01); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, SmallSquare) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 4; constexpr index_t n = 4; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (c = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); // matmul<TypeParam, TypeParam, TypeParam, 2, PROVIDER_TYPE_CUTLASS>(c, a, // b); // MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRect) { MATX_ENTER_HANDLER(); constexpr index_t m = 128; constexpr index_t k = 256; constexpr index_t n = 512; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (c = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); // matmul<TypeParam, TypeParam, TypeParam, 2, PROVIDER_TYPE_CUTLASS>(c, a, // b); // MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRectBatched) { MATX_ENTER_HANDLER(); // example-begin matmul-test-4 constexpr index_t batches = 5; constexpr index_t m = 128; constexpr index_t k = 256; constexpr index_t n = 512; tensor_t<TypeParam, 3> a{{batches, m, k}}; tensor_t<TypeParam, 3> b{{batches, k, n}}; tensor_t<TypeParam, 3> c{{batches, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {batches, m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); // Perform a batched gemm with "batches" GEMMs (c = matmul(a, b)).run(); // example-end matmul-test-4 MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRectBatched0StrideA) { MATX_ENTER_HANDLER(); constexpr index_t batches = 2; constexpr index_t m = 3; constexpr index_t k = 4; constexpr index_t n = 5; tensor_t<TypeParam, 2> a0{{m, k}}; tensor_t<TypeParam, 3> b{{batches, k, n}}; tensor_t<TypeParam, 2> b0{{k, n}}; tensor_t<TypeParam, 3> c{{batches, m, n}}; tensor_t<TypeParam, 2> c0{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a0, "a"); this->pb->NumpyToTensorView(b0, "b"); this->pb->NumpyToTensorView(c0, "c"); (b = b0).run(); // Perform a batched gemm with "batches" GEMMs (c = matmul(a0, b)).run(); hipStreamSynchronize(0); for (int i = 0; i < c.Size(0); i++) { for (int j = 0; j < c.Size(1); j++) { for (int p = 0; p < c.Size(2); p++) { EXPECT_TRUE(MatXUtils::MatXTypeCompare(c0(j, p), c(i, j, p), this->thresh)); } } } MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRectBatched0StrideB) { MATX_ENTER_HANDLER(); constexpr index_t batches = 2; constexpr index_t m = 3; constexpr index_t k = 4; constexpr index_t n = 5; tensor_t<TypeParam, 3> a{{batches, m, k}}; tensor_t<TypeParam, 2> a0{{m, k}}; tensor_t<TypeParam, 2> b0{{k, n}}; tensor_t<TypeParam, 3> c{{batches, m, n}}; tensor_t<TypeParam, 2> c0{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a0, "a"); this->pb->NumpyToTensorView(b0, "b"); this->pb->NumpyToTensorView(c0, "c"); (a = a0).run(); // Perform a batched gemm with "batches" GEMMs (c = matmul(a, b0)).run(); hipStreamSynchronize(0); for (int i = 0; i < c.Size(0); i++) { for (int j = 0; j < c.Size(1); j++) { for (int p = 0; p < c.Size(2); p++) { EXPECT_TRUE(MatXUtils::MatXTypeCompare(c0(j, p), c(i, j, p), this->thresh)); } } } MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRectBatched3DStridedBatch) { MATX_ENTER_HANDLER(); // example-begin matmul-test-5 constexpr index_t batches = 16; constexpr index_t m = 128; constexpr index_t k = 256; constexpr index_t n = 512; tensor_t<TypeParam, 3> a{{batches, m, k}}; tensor_t<TypeParam, 3> b{{batches, k, n}}; tensor_t<TypeParam, 3> c{{batches, m, n}}; auto as = a.Slice({0, 0, 0}, {matxEnd, matxEnd, matxEnd}, {2, 1, 1}); auto bs = b.Slice({0, 0, 0}, {matxEnd, matxEnd, matxEnd}, {2, 1, 1}); tensor_t<TypeParam, 3> cs{{batches/2, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {batches, m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); // Perform a strided and batched GEMM where "as" and "bs" have a stride of 2 in their inner-most dimension (cs = matmul(as, bs)).run(); // example-end matmul-test-5 MATX_TEST_ASSERT_COMPARE(this->pb, cs, "cs", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatNonComplexTypes, MixedTypes) { // a -> complex, b -> real, c -> complex MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; using ComplexTypeParam = float_to_complex_t<TypeParam>; tensor_t<ComplexTypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<ComplexTypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run_mixed", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (c = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRectBatched4D) { MATX_ENTER_HANDLER(); // constexpr index_t batches = 5; // constexpr index_t m = 128; // constexpr index_t k = 256; // constexpr index_t n = 512; auto a = make_tensor<TypeParam>({5, 5, 128, 256}); auto b = make_tensor<TypeParam>({5, 5, 256, 512}); auto c = make_tensor<TypeParam>({5, 5, 128, 512}); this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {5, 5, 128, 256, 512}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (c = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatNonHalfTypes, MatMulAxis) { MATX_ENTER_HANDLER(); constexpr index_t m = 16; constexpr index_t k = 32; constexpr index_t n = 64; constexpr index_t b = 8; tensor_t<TypeParam, 3> a3{{b, m, k}}; tensor_t<TypeParam, 3> b3{{b, k, n}}; tensor_t<TypeParam, 3> c3{{b, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {b, m, k, n}); this->pb->NumpyToTensorView(a3, "a"); this->pb->NumpyToTensorView(b3, "b"); { // identity permute const int axis[2] = {1, 2}; std::array<int, 3> perm({0, 1, 2}); auto ai = make_tensor<TypeParam>({b, m, k}); auto bi = make_tensor<TypeParam>({b, k, n}); auto ci = make_tensor<TypeParam>({b, m, n}); auto ap = permute(ai, perm); auto bp = permute(bi, perm); auto cp = permute(ci, perm); (ap = a3).run(); (bp = b3).run(); (ci = matmul(ai, bi, axis)).run(); (c3 = cp).run(); hipStreamSynchronize(0); MATX_TEST_ASSERT_COMPARE(this->pb, c3, "c", this->thresh); } { // transposing inner dims // example-begin matmul-test-6 const int axis[2] = {2, 1}; std::array<int, 3> perm({0, 2, 1}); auto ai = make_tensor<TypeParam>({b, k, m}); auto bi = make_tensor<TypeParam>({b, n, k}); auto ci = make_tensor<TypeParam>({b, n, m}); auto ap = permute(ai, perm); auto bp = permute(bi, perm); auto cp = permute(ci, perm); // copy data into permuted inputs (ap = a3).run(); (bp = b3).run(); // Perform a GEMM with the last two dimensions permuted (ci = matmul(ai, bi, axis)).run(); // example-end matmul-test-6 // copy result from permuted output (c3 = cp).run(); hipStreamSynchronize(0); MATX_TEST_ASSERT_COMPARE(this->pb, c3, "c", this->thresh); } { // first and last const int axis[2] = {0 ,2}; std::array<int, 3> perm({1, 0, 2}); tensor_t<TypeParam, 3> ai{{m, b, k}}; tensor_t<TypeParam, 3> bi{{k, b, n}}; tensor_t<TypeParam, 3> ci{{m, b, n}}; auto ap = permute(ai, perm); auto bp = permute(bi, perm); auto cp = permute(ci, perm); // copy data into permuted inputs (ap = a3).run(); (bp = b3).run(); (ci = matmul(ai, bi, axis)).run(); // copy result from permuted output (c3 = cp).run(); hipStreamSynchronize(0); MATX_TEST_ASSERT_COMPARE(this->pb, c3, "c", this->thresh); } { // affine not supported const int axis[2] = {0, 1}; std::array<int, 3> perm({2, 0, 1}); tensor_t<TypeParam, 3> ai{{m, k, b}}; tensor_t<TypeParam, 3> bi{{k, n, b}}; tensor_t<TypeParam, 3> ci{{m, n, b}}; auto ap = permute(ai, perm); auto bp = permute(bi, perm); auto cp = permute(ci, perm); // copy data into permuted inputs (ap = a3).run(); (bp = b3).run(); (ci = matmul(ai, bi, axis)).run(); // copy result from permuted output (c3 = cp).run(); hipStreamSynchronize(0); MATX_TEST_ASSERT_COMPARE(this->pb, c3, "c", this->thresh); } MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatNonHalfTypes, MatMulOp) { MATX_ENTER_HANDLER(); constexpr index_t m = 16; constexpr index_t k = 32; constexpr index_t n = 64; constexpr index_t b = 8; tensor_t<TypeParam, 3> a3{{b, m, k}}; tensor_t<TypeParam, 3> b3{{b, k, n}}; tensor_t<TypeParam, 3> c3{{b, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {b, m, k, n}); this->pb->NumpyToTensorView(a3, "a"); this->pb->NumpyToTensorView(b3, "b"); { // simple identity remaps auto rb = range<0>({b},0, 1); auto ar = remap<0>(a3, rb); auto br = remap<0>(b3, rb); auto cr = remap<0>(c3, rb); (cr = matmul(ar, br)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c3, "c", this->thresh); } MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumMatVec) { MATX_ENTER_HANDLER(); constexpr index_t m = 128; constexpr index_t k = 256; constexpr index_t n = 1; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); auto cs = slice<1>(c, {0,0}, {matxEnd, matxDropDim}); auto bs = slice<1>(b, {0,0}, {matxEnd, matxDropDim}); // example-begin matvec-test-1 // "a" is a matrix and "bs" is a vector matvec(cs, a, bs); // example-end matvec-test-1 MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); // Test also with rank-1 tensors rather than just slices tensor_t<TypeParam, 1> bv{{k}}; tensor_t<TypeParam, 1> cv{{m}}; (bv = bs).run(); (cv = cs).run(); matvec<decltype(cv), decltype(a), decltype(bv), PROVIDER_TYPE_CUBLASLT>(cv, a, bv); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumMatVecBatch) { MATX_ENTER_HANDLER(); constexpr index_t m = 128; constexpr index_t k = 256; constexpr index_t n = 1; constexpr index_t blocks = 8; tensor_t<TypeParam, 3> a{{blocks, m, k}}; tensor_t<TypeParam, 3> b{{blocks, k, n}}; tensor_t<TypeParam, 3> c{{blocks, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {blocks, m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); auto cs = slice<2>(c, {0,0,0}, {matxEnd, matxEnd, matxDropDim}); auto bs = slice<2>(b, {0,0,0}, {matxEnd, matxEnd, matxDropDim}); matvec<decltype(cs), decltype(a), decltype(bs), PROVIDER_TYPE_CUBLASLT>(cs, a, bs); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); tensor_t<TypeParam, 2> bv{{blocks, k}}; tensor_t<TypeParam, 2> cv{{blocks, m}}; (bv = bs).run(); (cv = cs).run(); matvec<decltype(cv), decltype(a), decltype(bv), PROVIDER_TYPE_CUBLASLT>(cv, a, bv); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MatVecRowVector) { MATX_ENTER_HANDLER(); // Test that the second-to-last dimension of A can be 1 (i.e. A can be a row // vector). In the case of matvec, this means that A*b is effectively a dot product. constexpr index_t m = 1; constexpr index_t k = 256; constexpr index_t n = 1; constexpr index_t blocks = 8; tensor_t<TypeParam, 3> a{{blocks, m, k}}; tensor_t<TypeParam, 3> b{{blocks, k, n}}; tensor_t<TypeParam, 3> c{{blocks, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {blocks, m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); auto cs = slice<2>(c, {0,0,0}, {matxEnd, matxEnd, matxDropDim}); auto bs = slice<2>(b, {0,0,0}, {matxEnd, matxEnd, matxDropDim}); matvec<decltype(cs), decltype(a), decltype(bs), PROVIDER_TYPE_CUBLASLT>(cs, a, bs); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); tensor_t<TypeParam, 2> bv{{blocks, k}}; tensor_t<TypeParam, 2> cv{{blocks, m}}; (bv = bs).run(); (cv = cs).run(); matvec<decltype(cv), decltype(a), decltype(bv), PROVIDER_TYPE_CUBLASLT>(cv, a, bv); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); }
6cec241d3cb5d7e73d51efa8f6374346bd43a09f.cu
//////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "assert.h" #include "matx.h" #include "test_types.h" #include "utilities.h" #include "gtest/gtest.h" using namespace matx; /* NOTE: CUTLASS tests are disabled for now. The compile times are too long at * the moment */ template <typename T> class MatMulTest : public ::testing::Test { protected: void SetUp() override { CheckTestTensorCoreTypeSupport<T>(); pb = std::make_unique<detail::MatXPybind>(); // Half precision needs a bit more // tolerance when compared to fp32 if constexpr (is_complex_half_v<T> || is_matx_half_v<T>) { thresh = 0.5f; } } void TearDown() { pb.reset(); } std::unique_ptr<detail::MatXPybind> pb; float thresh = 0.01f; }; template <typename TensorType> class MatMulTestFloatTypes : public MatMulTest<TensorType> { }; template <typename TensorType> class MatMulTestFloatNonHalfTypes : public MatMulTest<TensorType> { }; template <typename TensorType> class MatMulTestFloatNonComplexTypes : public MatMulTest<TensorType> { }; TYPED_TEST_SUITE(MatMulTestFloatTypes, MatXFloatTypes); TYPED_TEST_SUITE(MatMulTestFloatNonHalfTypes, MatXFloatNonHalfTypes); TYPED_TEST_SUITE(MatMulTestFloatNonComplexTypes, MatXFloatNonComplexTypes); template <typename T> struct float_to_complex { using type = cuda::std::complex<T>; }; template <> struct float_to_complex<matxFp16> { using type = matxFp16Complex; }; template <> struct float_to_complex<matxBf16> { using type = matxBf16Complex; }; template <typename T> using float_to_complex_t = typename float_to_complex<T>::type; TYPED_TEST(MatMulTestFloatTypes, SmallRect) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); // example-begin matmul-test-1 // Perform the GEMM C = A*B (c = matmul(a, b)).run(); // example-end matmul-test-1 MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, SmallRectATranspose) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; tensor_t<TypeParam, 2> a{{k, m}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run_a_transpose", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); // example-begin matmul-test-2 // Perform the GEMM C = A^T * B auto at = a.PermuteMatrix(); (c = matmul(at, b)).run(); // example-end matmul-test-2 MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, SmallRectBTranspose) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{n, k}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run_b_transpose", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); // example-begin matmul-test-3 // Perform the GEMM C = A * B^T auto bt = b.PermuteMatrix(); (c = matmul(a, bt)).run(); // example-end matmul-test-3 MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatNonHalfTypes, SmallRectCTranspose) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{n, m}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); auto ct = transpose_matrix(c); (ct = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, ct, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, SmallRectUserPointer) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; TypeParam *ap, *bp, *cp; cudaMallocManaged(&ap, m*k*sizeof(TypeParam)); cudaMallocManaged(&bp, k*n*sizeof(TypeParam)); cudaMallocManaged(&cp, m*n*sizeof(TypeParam)); auto a = make_tensor<TypeParam, 2>(ap, {m, k},false); auto b = make_tensor<TypeParam, 2>(bp, {k, n},false); auto c = make_tensor<TypeParam, 2>(cp, {m, n},false); this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (c = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); cudaFree(ap); cudaFree(bp); cudaFree(cp); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, DISABLED_SmallRectTranspose) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; auto at = a.Permute({1,0}); auto bt = b.Permute({1,0}); auto ct = c.Permute({1,0}); this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run_transpose", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (ct = matmul(bt, at)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, ct, "c", 0.01); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, SmallSquare) { MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 4; constexpr index_t n = 4; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (c = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); // matmul<TypeParam, TypeParam, TypeParam, 2, PROVIDER_TYPE_CUTLASS>(c, a, // b); // MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRect) { MATX_ENTER_HANDLER(); constexpr index_t m = 128; constexpr index_t k = 256; constexpr index_t n = 512; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (c = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); // matmul<TypeParam, TypeParam, TypeParam, 2, PROVIDER_TYPE_CUTLASS>(c, a, // b); // MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRectBatched) { MATX_ENTER_HANDLER(); // example-begin matmul-test-4 constexpr index_t batches = 5; constexpr index_t m = 128; constexpr index_t k = 256; constexpr index_t n = 512; tensor_t<TypeParam, 3> a{{batches, m, k}}; tensor_t<TypeParam, 3> b{{batches, k, n}}; tensor_t<TypeParam, 3> c{{batches, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {batches, m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); // Perform a batched gemm with "batches" GEMMs (c = matmul(a, b)).run(); // example-end matmul-test-4 MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRectBatched0StrideA) { MATX_ENTER_HANDLER(); constexpr index_t batches = 2; constexpr index_t m = 3; constexpr index_t k = 4; constexpr index_t n = 5; tensor_t<TypeParam, 2> a0{{m, k}}; tensor_t<TypeParam, 3> b{{batches, k, n}}; tensor_t<TypeParam, 2> b0{{k, n}}; tensor_t<TypeParam, 3> c{{batches, m, n}}; tensor_t<TypeParam, 2> c0{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a0, "a"); this->pb->NumpyToTensorView(b0, "b"); this->pb->NumpyToTensorView(c0, "c"); (b = b0).run(); // Perform a batched gemm with "batches" GEMMs (c = matmul(a0, b)).run(); cudaStreamSynchronize(0); for (int i = 0; i < c.Size(0); i++) { for (int j = 0; j < c.Size(1); j++) { for (int p = 0; p < c.Size(2); p++) { EXPECT_TRUE(MatXUtils::MatXTypeCompare(c0(j, p), c(i, j, p), this->thresh)); } } } MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRectBatched0StrideB) { MATX_ENTER_HANDLER(); constexpr index_t batches = 2; constexpr index_t m = 3; constexpr index_t k = 4; constexpr index_t n = 5; tensor_t<TypeParam, 3> a{{batches, m, k}}; tensor_t<TypeParam, 2> a0{{m, k}}; tensor_t<TypeParam, 2> b0{{k, n}}; tensor_t<TypeParam, 3> c{{batches, m, n}}; tensor_t<TypeParam, 2> c0{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a0, "a"); this->pb->NumpyToTensorView(b0, "b"); this->pb->NumpyToTensorView(c0, "c"); (a = a0).run(); // Perform a batched gemm with "batches" GEMMs (c = matmul(a, b0)).run(); cudaStreamSynchronize(0); for (int i = 0; i < c.Size(0); i++) { for (int j = 0; j < c.Size(1); j++) { for (int p = 0; p < c.Size(2); p++) { EXPECT_TRUE(MatXUtils::MatXTypeCompare(c0(j, p), c(i, j, p), this->thresh)); } } } MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRectBatched3DStridedBatch) { MATX_ENTER_HANDLER(); // example-begin matmul-test-5 constexpr index_t batches = 16; constexpr index_t m = 128; constexpr index_t k = 256; constexpr index_t n = 512; tensor_t<TypeParam, 3> a{{batches, m, k}}; tensor_t<TypeParam, 3> b{{batches, k, n}}; tensor_t<TypeParam, 3> c{{batches, m, n}}; auto as = a.Slice({0, 0, 0}, {matxEnd, matxEnd, matxEnd}, {2, 1, 1}); auto bs = b.Slice({0, 0, 0}, {matxEnd, matxEnd, matxEnd}, {2, 1, 1}); tensor_t<TypeParam, 3> cs{{batches/2, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {batches, m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); // Perform a strided and batched GEMM where "as" and "bs" have a stride of 2 in their inner-most dimension (cs = matmul(as, bs)).run(); // example-end matmul-test-5 MATX_TEST_ASSERT_COMPARE(this->pb, cs, "cs", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatNonComplexTypes, MixedTypes) { // a -> complex, b -> real, c -> complex MATX_ENTER_HANDLER(); constexpr index_t m = 4; constexpr index_t k = 8; constexpr index_t n = 16; using ComplexTypeParam = float_to_complex_t<TypeParam>; tensor_t<ComplexTypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<ComplexTypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run_mixed", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (c = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumRectBatched4D) { MATX_ENTER_HANDLER(); // constexpr index_t batches = 5; // constexpr index_t m = 128; // constexpr index_t k = 256; // constexpr index_t n = 512; auto a = make_tensor<TypeParam>({5, 5, 128, 256}); auto b = make_tensor<TypeParam>({5, 5, 256, 512}); auto c = make_tensor<TypeParam>({5, 5, 128, 512}); this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {5, 5, 128, 256, 512}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); (c = matmul(a, b)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatNonHalfTypes, MatMulAxis) { MATX_ENTER_HANDLER(); constexpr index_t m = 16; constexpr index_t k = 32; constexpr index_t n = 64; constexpr index_t b = 8; tensor_t<TypeParam, 3> a3{{b, m, k}}; tensor_t<TypeParam, 3> b3{{b, k, n}}; tensor_t<TypeParam, 3> c3{{b, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {b, m, k, n}); this->pb->NumpyToTensorView(a3, "a"); this->pb->NumpyToTensorView(b3, "b"); { // identity permute const int axis[2] = {1, 2}; std::array<int, 3> perm({0, 1, 2}); auto ai = make_tensor<TypeParam>({b, m, k}); auto bi = make_tensor<TypeParam>({b, k, n}); auto ci = make_tensor<TypeParam>({b, m, n}); auto ap = permute(ai, perm); auto bp = permute(bi, perm); auto cp = permute(ci, perm); (ap = a3).run(); (bp = b3).run(); (ci = matmul(ai, bi, axis)).run(); (c3 = cp).run(); cudaStreamSynchronize(0); MATX_TEST_ASSERT_COMPARE(this->pb, c3, "c", this->thresh); } { // transposing inner dims // example-begin matmul-test-6 const int axis[2] = {2, 1}; std::array<int, 3> perm({0, 2, 1}); auto ai = make_tensor<TypeParam>({b, k, m}); auto bi = make_tensor<TypeParam>({b, n, k}); auto ci = make_tensor<TypeParam>({b, n, m}); auto ap = permute(ai, perm); auto bp = permute(bi, perm); auto cp = permute(ci, perm); // copy data into permuted inputs (ap = a3).run(); (bp = b3).run(); // Perform a GEMM with the last two dimensions permuted (ci = matmul(ai, bi, axis)).run(); // example-end matmul-test-6 // copy result from permuted output (c3 = cp).run(); cudaStreamSynchronize(0); MATX_TEST_ASSERT_COMPARE(this->pb, c3, "c", this->thresh); } { // first and last const int axis[2] = {0 ,2}; std::array<int, 3> perm({1, 0, 2}); tensor_t<TypeParam, 3> ai{{m, b, k}}; tensor_t<TypeParam, 3> bi{{k, b, n}}; tensor_t<TypeParam, 3> ci{{m, b, n}}; auto ap = permute(ai, perm); auto bp = permute(bi, perm); auto cp = permute(ci, perm); // copy data into permuted inputs (ap = a3).run(); (bp = b3).run(); (ci = matmul(ai, bi, axis)).run(); // copy result from permuted output (c3 = cp).run(); cudaStreamSynchronize(0); MATX_TEST_ASSERT_COMPARE(this->pb, c3, "c", this->thresh); } { // affine not supported const int axis[2] = {0, 1}; std::array<int, 3> perm({2, 0, 1}); tensor_t<TypeParam, 3> ai{{m, k, b}}; tensor_t<TypeParam, 3> bi{{k, n, b}}; tensor_t<TypeParam, 3> ci{{m, n, b}}; auto ap = permute(ai, perm); auto bp = permute(bi, perm); auto cp = permute(ci, perm); // copy data into permuted inputs (ap = a3).run(); (bp = b3).run(); (ci = matmul(ai, bi, axis)).run(); // copy result from permuted output (c3 = cp).run(); cudaStreamSynchronize(0); MATX_TEST_ASSERT_COMPARE(this->pb, c3, "c", this->thresh); } MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatNonHalfTypes, MatMulOp) { MATX_ENTER_HANDLER(); constexpr index_t m = 16; constexpr index_t k = 32; constexpr index_t n = 64; constexpr index_t b = 8; tensor_t<TypeParam, 3> a3{{b, m, k}}; tensor_t<TypeParam, 3> b3{{b, k, n}}; tensor_t<TypeParam, 3> c3{{b, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {b, m, k, n}); this->pb->NumpyToTensorView(a3, "a"); this->pb->NumpyToTensorView(b3, "b"); { // simple identity remaps auto rb = range<0>({b},0, 1); auto ar = remap<0>(a3, rb); auto br = remap<0>(b3, rb); auto cr = remap<0>(c3, rb); (cr = matmul(ar, br)).run(); MATX_TEST_ASSERT_COMPARE(this->pb, c3, "c", this->thresh); } MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumMatVec) { MATX_ENTER_HANDLER(); constexpr index_t m = 128; constexpr index_t k = 256; constexpr index_t n = 1; tensor_t<TypeParam, 2> a{{m, k}}; tensor_t<TypeParam, 2> b{{k, n}}; tensor_t<TypeParam, 2> c{{m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); auto cs = slice<1>(c, {0,0}, {matxEnd, matxDropDim}); auto bs = slice<1>(b, {0,0}, {matxEnd, matxDropDim}); // example-begin matvec-test-1 // "a" is a matrix and "bs" is a vector matvec(cs, a, bs); // example-end matvec-test-1 MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); // Test also with rank-1 tensors rather than just slices tensor_t<TypeParam, 1> bv{{k}}; tensor_t<TypeParam, 1> cv{{m}}; (bv = bs).run(); (cv = cs).run(); matvec<decltype(cv), decltype(a), decltype(bv), PROVIDER_TYPE_CUBLASLT>(cv, a, bv); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MediumMatVecBatch) { MATX_ENTER_HANDLER(); constexpr index_t m = 128; constexpr index_t k = 256; constexpr index_t n = 1; constexpr index_t blocks = 8; tensor_t<TypeParam, 3> a{{blocks, m, k}}; tensor_t<TypeParam, 3> b{{blocks, k, n}}; tensor_t<TypeParam, 3> c{{blocks, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {blocks, m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); auto cs = slice<2>(c, {0,0,0}, {matxEnd, matxEnd, matxDropDim}); auto bs = slice<2>(b, {0,0,0}, {matxEnd, matxEnd, matxDropDim}); matvec<decltype(cs), decltype(a), decltype(bs), PROVIDER_TYPE_CUBLASLT>(cs, a, bs); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); tensor_t<TypeParam, 2> bv{{blocks, k}}; tensor_t<TypeParam, 2> cv{{blocks, m}}; (bv = bs).run(); (cv = cs).run(); matvec<decltype(cv), decltype(a), decltype(bv), PROVIDER_TYPE_CUBLASLT>(cv, a, bv); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); } TYPED_TEST(MatMulTestFloatTypes, MatVecRowVector) { MATX_ENTER_HANDLER(); // Test that the second-to-last dimension of A can be 1 (i.e. A can be a row // vector). In the case of matvec, this means that A*b is effectively a dot product. constexpr index_t m = 1; constexpr index_t k = 256; constexpr index_t n = 1; constexpr index_t blocks = 8; tensor_t<TypeParam, 3> a{{blocks, m, k}}; tensor_t<TypeParam, 3> b{{blocks, k, n}}; tensor_t<TypeParam, 3> c{{blocks, m, n}}; this->pb->template InitAndRunTVGenerator<TypeParam>( "00_transforms", "matmul_operators", "run", {blocks, m, k, n}); this->pb->NumpyToTensorView(a, "a"); this->pb->NumpyToTensorView(b, "b"); auto cs = slice<2>(c, {0,0,0}, {matxEnd, matxEnd, matxDropDim}); auto bs = slice<2>(b, {0,0,0}, {matxEnd, matxEnd, matxDropDim}); matvec<decltype(cs), decltype(a), decltype(bs), PROVIDER_TYPE_CUBLASLT>(cs, a, bs); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); tensor_t<TypeParam, 2> bv{{blocks, k}}; tensor_t<TypeParam, 2> cv{{blocks, m}}; (bv = bs).run(); (cv = cs).run(); matvec<decltype(cv), decltype(a), decltype(bv), PROVIDER_TYPE_CUBLASLT>(cv, a, bv); MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); MATX_EXIT_HANDLER(); }
144e034c2a0f3db24df3f53466f737c022698402.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void k_dummy_test() { }
144e034c2a0f3db24df3f53466f737c022698402.cu
#include "includes.h" __global__ void k_dummy_test() { }
b8586d22f8ceb7283b2d3bf0563ae1f58f9dfbcc.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "wb.h" #include <bits/stdc++.h> //#include <cstdio> //#include <cstdlib> #define NUM_BINS 4096 #define BIN_CAP 127 #define CUDA_CHECK(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #define THREADS 8 __global__ void histogram(unsigned int* output, unsigned int* input, int inputLength) { __shared__ unsigned int value[NUM_BINS]; __shared__ int done[NUM_BINS]; unsigned int x = threadIdx.x + blockIdx.x * blockDim.x; if(x < inputLength){ unsigned int idx = input[x]; value[idx] = 0; done[idx] = 0; __syncthreads(); atomicAdd(value + idx, (unsigned int)1); __syncthreads(); atomicMin(value + idx, BIN_CAP); __syncthreads(); if(!atomicAdd(done + idx, 1)){ atomicAdd(output + idx, value[idx]); atomicMin(output + idx, BIN_CAP); } } } int main(int argc, char *argv[]) { int inputLength; unsigned int *hostInput; unsigned int *hostBins; unsigned int *deviceInput; unsigned int *deviceBins; /* Read input arguments here */ wbArg_t args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0), &inputLength); hostBins = (unsigned int *)calloc(NUM_BINS, sizeof(unsigned int)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); wbLog(TRACE, "The number of bins is ", NUM_BINS); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here hipMalloc((void **)&deviceInput, inputLength * sizeof(unsigned int)); hipMalloc((void **)&deviceBins, NUM_BINS * sizeof(unsigned int)); hipMemset(deviceBins, 0, NUM_BINS); CUDA_CHECK(hipDeviceSynchronize()); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy(deviceInput, hostInput, inputLength * sizeof(unsigned int), hipMemcpyHostToDevice); CUDA_CHECK(hipDeviceSynchronize()); wbTime_stop(GPU, "Copying input memory to the GPU."); // Launch kernel // ---------------------------------------------------------- wbLog(TRACE, "Launching kernel"); wbTime_start(Compute, "Performing CUDA computation"); //@@ Perform kernel computation here dim3 block(THREADS, 1, 1); dim3 grid(ceil(inputLength / block.x), 1, 1); histogram << <grid, block >> > (deviceBins, deviceInput, inputLength); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostBins, deviceBins, NUM_BINS * sizeof(unsigned int), hipMemcpyDeviceToHost); CUDA_CHECK(hipDeviceSynchronize()); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceInput); hipFree(deviceBins); wbTime_stop(GPU, "Freeing GPU Memory"); // Verify correctness // ----------------------------------------------------- wbSolution(args, hostBins, NUM_BINS); free(hostBins); free(hostInput); return 0; }
b8586d22f8ceb7283b2d3bf0563ae1f58f9dfbcc.cu
#include <cuda.h> #include "cuda_runtime.h" #include "wb.h" #include <bits/stdc++.h> //#include <cstdio> //#include <cstdlib> #define NUM_BINS 4096 #define BIN_CAP 127 #define CUDA_CHECK(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define THREADS 8 __global__ void histogram(unsigned int* output, unsigned int* input, int inputLength) { __shared__ unsigned int value[NUM_BINS]; __shared__ int done[NUM_BINS]; unsigned int x = threadIdx.x + blockIdx.x * blockDim.x; if(x < inputLength){ unsigned int idx = input[x]; value[idx] = 0; done[idx] = 0; __syncthreads(); atomicAdd(value + idx, (unsigned int)1); __syncthreads(); atomicMin(value + idx, BIN_CAP); __syncthreads(); if(!atomicAdd(done + idx, 1)){ atomicAdd(output + idx, value[idx]); atomicMin(output + idx, BIN_CAP); } } } int main(int argc, char *argv[]) { int inputLength; unsigned int *hostInput; unsigned int *hostBins; unsigned int *deviceInput; unsigned int *deviceBins; /* Read input arguments here */ wbArg_t args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0), &inputLength); hostBins = (unsigned int *)calloc(NUM_BINS, sizeof(unsigned int)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); wbLog(TRACE, "The number of bins is ", NUM_BINS); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here cudaMalloc((void **)&deviceInput, inputLength * sizeof(unsigned int)); cudaMalloc((void **)&deviceBins, NUM_BINS * sizeof(unsigned int)); cudaMemset(deviceBins, 0, NUM_BINS); CUDA_CHECK(cudaDeviceSynchronize()); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy(deviceInput, hostInput, inputLength * sizeof(unsigned int), cudaMemcpyHostToDevice); CUDA_CHECK(cudaDeviceSynchronize()); wbTime_stop(GPU, "Copying input memory to the GPU."); // Launch kernel // ---------------------------------------------------------- wbLog(TRACE, "Launching kernel"); wbTime_start(Compute, "Performing CUDA computation"); //@@ Perform kernel computation here dim3 block(THREADS, 1, 1); dim3 grid(ceil(inputLength / block.x), 1, 1); histogram << <grid, block >> > (deviceBins, deviceInput, inputLength); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostBins, deviceBins, NUM_BINS * sizeof(unsigned int), cudaMemcpyDeviceToHost); CUDA_CHECK(cudaDeviceSynchronize()); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceInput); cudaFree(deviceBins); wbTime_stop(GPU, "Freeing GPU Memory"); // Verify correctness // ----------------------------------------------------- wbSolution(args, hostBins, NUM_BINS); free(hostBins); free(hostInput); return 0; }
e3817b119996783236c7f086fd2dce3298d326d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #define BLOCK_SIZE 3 #define WA 3 #define HA 3 #define WB 3 #define HB 3 #define WC 3 #define HC 3 void Init(float * data ,int size) { for(int i = 0; i < size; ++i) data[i] = i; } __global__ void matrixMul(float* A,float* B,float* C,int wA,int wB) { int tx = threadIdx.x; int ty = threadIdx.y; float value = 0; for(int i = 0; i < wA; ++i) { float elementA = A[ty * wA + i]; float elementB = B[i * wB + tx]; value += elementA * elementB; } // write to device mem C[ty * wA + tx] = value; } int main(int argc ,char** argv) { srand(2006); unsigned int size_A = WA * HA; unsigned int mem_size_A =sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B =sizeof(float) * size_B; float * h_B = (float*) malloc(mem_size_B); unsigned int size_C = WC * HC; unsigned int mem_size_C =sizeof(float) * size_C; float * h_C = (float *) malloc(mem_size_C); Init(h_A, size_A); Init(h_B, size_B); printf("\n\nMatrix A\n"); for(int i = 0; i < size_A; i++) { printf("%f ", h_A[i]); if(((i + 1) % WA) == 0) printf("\n"); } printf("\n\nMatrix B\n"); for(int i = 0; i < size_B; i++) { printf ("%f ", h_B[i]); if(((i + 1) % WB) == 0) printf("\n"); } float* d_A; float* d_B; float* d_C; hipMalloc((void**) &d_A, mem_size_A); hipMalloc((void**) &d_B, mem_size_B); hipMalloc((void**) &d_C, mem_size_C); hipMemcpy(d_A, h_A,mem_size_A ,hipMemcpyHostToDevice); hipMemcpy(d_B, h_B,mem_size_B ,hipMemcpyHostToDevice); dim3 threads(BLOCK_SIZE , BLOCK_SIZE); dim3 grid(WC / threads.x, HC / threads.y); hipLaunchKernelGGL(( matrixMul), dim3(grid) , dim3(threads) , 0, 0, d_A,d_B, d_C, WA, WB); hipMemcpy(h_C, d_C, mem_size_C ,hipMemcpyDeviceToHost); printf("\n\nMatrix C (Results) \n"); for(int i = 0;i<size_C; i ++){ printf("%f ",h_C[i]); if(((i+ 1) % WC) == 0) printf("\n"); } printf("\n"); hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); }
e3817b119996783236c7f086fd2dce3298d326d8.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> #define BLOCK_SIZE 3 #define WA 3 #define HA 3 #define WB 3 #define HB 3 #define WC 3 #define HC 3 void Init(float * data ,int size) { for(int i = 0; i < size; ++i) data[i] = i; } __global__ void matrixMul(float* A,float* B,float* C,int wA,int wB) { int tx = threadIdx.x; int ty = threadIdx.y; float value = 0; for(int i = 0; i < wA; ++i) { float elementA = A[ty * wA + i]; float elementB = B[i * wB + tx]; value += elementA * elementB; } // write to device mem C[ty * wA + tx] = value; } int main(int argc ,char** argv) { srand(2006); unsigned int size_A = WA * HA; unsigned int mem_size_A =sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B =sizeof(float) * size_B; float * h_B = (float*) malloc(mem_size_B); unsigned int size_C = WC * HC; unsigned int mem_size_C =sizeof(float) * size_C; float * h_C = (float *) malloc(mem_size_C); Init(h_A, size_A); Init(h_B, size_B); printf("\n\nMatrix A\n"); for(int i = 0; i < size_A; i++) { printf("%f ", h_A[i]); if(((i + 1) % WA) == 0) printf("\n"); } printf("\n\nMatrix B\n"); for(int i = 0; i < size_B; i++) { printf ("%f ", h_B[i]); if(((i + 1) % WB) == 0) printf("\n"); } float* d_A; float* d_B; float* d_C; cudaMalloc((void**) &d_A, mem_size_A); cudaMalloc((void**) &d_B, mem_size_B); cudaMalloc((void**) &d_C, mem_size_C); cudaMemcpy(d_A, h_A,mem_size_A ,cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B,mem_size_B ,cudaMemcpyHostToDevice); dim3 threads(BLOCK_SIZE , BLOCK_SIZE); dim3 grid(WC / threads.x, HC / threads.y); matrixMul<<< grid , threads >>>(d_A,d_B, d_C, WA, WB); cudaMemcpy(h_C, d_C, mem_size_C ,cudaMemcpyDeviceToHost); printf("\n\nMatrix C (Results) \n"); for(int i = 0;i<size_C; i ++){ printf("%f ",h_C[i]); if(((i+ 1) % WC) == 0) printf("\n"); } printf("\n"); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); }
f70635ddffa54fc7c03634e648b20fb169986034.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrixClip.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *a = NULL; hipMalloc(&a, XSIZE*YSIZE); double min = 1; double max = 1; double *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int cr = 1; int cc = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrixClip), dim3(gridBlock),dim3(threadBlock), 0, 0, a,min,max,c,cr,cc); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrixClip), dim3(gridBlock),dim3(threadBlock), 0, 0, a,min,max,c,cr,cc); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrixClip), dim3(gridBlock),dim3(threadBlock), 0, 0, a,min,max,c,cr,cc); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f70635ddffa54fc7c03634e648b20fb169986034.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrixClip.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); double min = 1; double max = 1; double *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int cr = 1; int cc = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrixClip<<<gridBlock,threadBlock>>>(a,min,max,c,cr,cc); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrixClip<<<gridBlock,threadBlock>>>(a,min,max,c,cr,cc); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrixClip<<<gridBlock,threadBlock>>>(a,min,max,c,cr,cc); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cad9d1b515159dc4f9a4610af8c2ec214dfbad93.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include "helper_timer.h" #define cudaCheckError(err) __cudaCheckError(err, __FILE__, __LINE__ ) inline void __cudaCheckError(hipError_t err, const char *file, const int line) { if (hipSuccess != err) { fprintf(stderr, "%s at (%s:%i)\n", hipGetErrorString(err), file, line); exit(-1); } } __global__ void add (int *a, int *b, int *c, int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < N) { c[tid] = a[tid]+b[tid]; } } void add_cpu(int *a, int *b, int *c, int N, unsigned Blocks, unsigned Threads, FILE * pFile, float time) { struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); for (int i = 0; i < N; i++) { c[i] = a[i] + b[i]; } gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); fprintf(pFile, "%u %u %u %f %ld.%06ld\n", Blocks, Threads, N, time, (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); } void check (int* cpu_c, int* gpu_c, int N) { int flag = 0; for (int i = 0; i < N; i++) { if(gpu_c[i] != cpu_c[i]) { printf("Not Equal!\n"); flag = 1; break; } } if(!flag) { printf("Equal!\n"); } } int main(int argc, char* argv[]) { FILE * gFile = fopen("results", "a"); if(argc != 4) { printf("Usage: %s [liczba-blockw] [wtki-na-block] [rozmiar-tablicy]\n", argv[0]); exit(-1); } unsigned Blocks = atoi(argv[1]); unsigned Threads = atoi(argv[2]); unsigned N = atoi(argv[3]); int* a = (int*) malloc(N * sizeof(int)); int* b = (int*) malloc(N * sizeof(int)); int* c = (int*) malloc(N * sizeof(int)); int* cpu_a = (int*) malloc(N * sizeof(int)); int* cpu_b = (int*) malloc(N * sizeof(int)); int* cpu_c = (int*) malloc(N * sizeof(int)); int *dev_a, *dev_b, *dev_c;//, *a_d; hipMalloc((void**)&dev_a, N * sizeof(int)); hipMalloc((void**)&dev_b, N * sizeof(int)); hipMalloc((void**)&dev_c, N * sizeof(int)); for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; cpu_a[i] = i; cpu_b[i] = i*2; } cudaCheckError(hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice)); cudaCheckError(hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice)); cudaCheckError(hipMemcpy(dev_c, c, N*sizeof(int), hipMemcpyHostToDevice)); StopWatchInterface *timer=NULL; sdkCreateTimer(&timer); sdkResetTimer(&timer); sdkStartTimer(&timer); hipLaunchKernelGGL(( add) , dim3(Blocks),dim3(Threads), 0, 0, dev_a,dev_b,dev_c, N); cudaCheckError(hipPeekAtLastError()); cudaCheckError(hipDeviceSynchronize()); sdkStopTimer(&timer); float time = sdkGetTimerValue(&timer); sdkDeleteTimer(&timer); cudaCheckError(hipMemcpy(c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost)); /*for (int i = 0; i < N; i++) { printf("%d+%d=%d\n", a[i], b[i], c[i]); }*/ hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); add_cpu(cpu_a, cpu_b, cpu_c, N, Blocks, Threads, gFile, time/1000); //check(cpu_c, c, N); /*for (int i = 0; i < N; i++) { printf("%d+%d=%d\n", cpu_a[i], cpu_b[i], cpu_c[i]); }*/ fclose (gFile); free(a); free(b); free(c); free(cpu_a); free(cpu_b); free(cpu_c); return 0; }
cad9d1b515159dc4f9a4610af8c2ec214dfbad93.cu
#include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include "helper_timer.h" #define cudaCheckError(err) __cudaCheckError(err, __FILE__, __LINE__ ) inline void __cudaCheckError(cudaError err, const char *file, const int line) { if (cudaSuccess != err) { fprintf(stderr, "%s at (%s:%i)\n", cudaGetErrorString(err), file, line); exit(-1); } } __global__ void add (int *a, int *b, int *c, int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < N) { c[tid] = a[tid]+b[tid]; } } void add_cpu(int *a, int *b, int *c, int N, unsigned Blocks, unsigned Threads, FILE * pFile, float time) { struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); for (int i = 0; i < N; i++) { c[i] = a[i] + b[i]; } gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); fprintf(pFile, "%u %u %u %f %ld.%06ld\n", Blocks, Threads, N, time, (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); } void check (int* cpu_c, int* gpu_c, int N) { int flag = 0; for (int i = 0; i < N; i++) { if(gpu_c[i] != cpu_c[i]) { printf("Not Equal!\n"); flag = 1; break; } } if(!flag) { printf("Equal!\n"); } } int main(int argc, char* argv[]) { FILE * gFile = fopen("results", "a"); if(argc != 4) { printf("Usage: %s [liczba-blocków] [wątki-na-block] [rozmiar-tablicy]\n", argv[0]); exit(-1); } unsigned Blocks = atoi(argv[1]); unsigned Threads = atoi(argv[2]); unsigned N = atoi(argv[3]); int* a = (int*) malloc(N * sizeof(int)); int* b = (int*) malloc(N * sizeof(int)); int* c = (int*) malloc(N * sizeof(int)); int* cpu_a = (int*) malloc(N * sizeof(int)); int* cpu_b = (int*) malloc(N * sizeof(int)); int* cpu_c = (int*) malloc(N * sizeof(int)); int *dev_a, *dev_b, *dev_c;//, *a_d; cudaMalloc((void**)&dev_a, N * sizeof(int)); cudaMalloc((void**)&dev_b, N * sizeof(int)); cudaMalloc((void**)&dev_c, N * sizeof(int)); for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; cpu_a[i] = i; cpu_b[i] = i*2; } cudaCheckError(cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice)); cudaCheckError(cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice)); cudaCheckError(cudaMemcpy(dev_c, c, N*sizeof(int), cudaMemcpyHostToDevice)); StopWatchInterface *timer=NULL; sdkCreateTimer(&timer); sdkResetTimer(&timer); sdkStartTimer(&timer); add <<<Blocks,Threads>>> (dev_a,dev_b,dev_c, N); cudaCheckError(cudaPeekAtLastError()); cudaCheckError(cudaThreadSynchronize()); sdkStopTimer(&timer); float time = sdkGetTimerValue(&timer); sdkDeleteTimer(&timer); cudaCheckError(cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost)); /*for (int i = 0; i < N; i++) { printf("%d+%d=%d\n", a[i], b[i], c[i]); }*/ cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); add_cpu(cpu_a, cpu_b, cpu_c, N, Blocks, Threads, gFile, time/1000); //check(cpu_c, c, N); /*for (int i = 0; i < N; i++) { printf("%d+%d=%d\n", cpu_a[i], cpu_b[i], cpu_c[i]); }*/ fclose (gFile); free(a); free(b); free(c); free(cpu_a); free(cpu_b); free(cpu_c); return 0; }
e1aeb462e0559737e02ee9238734a2258d576eb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stackUtil.cuh" #include "stripedModel.cuh" #include "TetrahedronSystemInterface.h" #include <CudaBase.h> template<class T> struct SharedMemory { __device__ inline operator T*() { extern __shared__ int __smem[]; return (T*)__smem; } }; __device__ int isElementExcluded(uint b, uint a, uint * exclusionInd, uint * exclusionStart) { if(a >= b) return 1; uint cur = exclusionStart[a+1]-1; uint minInd = exclusionStart[a]; for(; cur >= minInd; cur--) { if(b <= exclusionInd[cur]) return 1; } return 0; } __device__ int isTetrahedronConnected(uint a, uint b, const uint4 * v) { if(a == b) return 1; const uint4 ta = v[a]; const uint4 tb = v[b]; if(ta.x == tb.x || ta.x == tb.y || ta.x == tb.z || ta.x == tb.w) return 1; if(ta.y == tb.x || ta.y == tb.y || ta.y == tb.z || ta.y == tb.w) return 1; if(ta.z == tb.x || ta.z == tb.y || ta.z == tb.z || ta.z == tb.w) return 1; if(ta.w == tb.x || ta.w == tb.y || ta.w == tb.z || ta.w == tb.w) return 1; return 0; } __global__ void writePairCacheSelfCollideExclusion_kernel(uint2 * dst, uint * cacheWriteLocation, uint * cacheStarts, uint * overlappingCounts, Aabb * boxes, uint maxBoxInd, int * rootNodeIndex, int2 * internalNodeChildIndices, Aabb * internalNodeAabbs, Aabb * leafAabbs, KeyValuePair * mortonCodesAndAabbIndices, unsigned queryIdx, uint * exclusionIndices, uint * exclusionStarts) { uint boxIndex = blockIdx.x*blockDim.x + threadIdx.x; if(boxIndex >= maxBoxInd) return; uint cacheSize = overlappingCounts[boxIndex]; if(cacheSize < 1) return; uint startLoc = cacheStarts[boxIndex]; uint writeLoc = cacheWriteLocation[boxIndex]; if((writeLoc - startLoc) >= cacheSize) return; Aabb box = boxes[boxIndex]; uint stack[B3_BROADPHASE_MAX_STACK_SIZE]; int stackSize = 1; stack[0] = *rootNodeIndex; int isLeaf; for(;;) { if(outOfStack(stackSize)) break; uint internalOrLeafNodeIndex = stack[ stackSize - 1 ]; stackSize--; isLeaf = isLeafNode(internalOrLeafNodeIndex); //Internal node if false uint bvhNodeIndex = getIndexWithInternalNodeMarkerRemoved(internalOrLeafNodeIndex); //bvhRigidIndex is not used if internal node int bvhRigidIndex = (isLeaf) ? (int)mortonCodesAndAabbIndices[bvhNodeIndex].value : -1; Aabb bvhNodeAabb = (isLeaf) ? leafAabbs[bvhRigidIndex] : internalNodeAabbs[bvhNodeIndex]; uint2 pair; if(isAabbOverlapping(box, bvhNodeAabb)) { if(isLeaf) { if(!isElementExcluded(bvhRigidIndex, boxIndex, exclusionIndices, exclusionStarts)) { pair.x = combineObjectElementInd(queryIdx, boxIndex); pair.y = combineObjectElementInd(queryIdx, bvhRigidIndex); dst[writeLoc] = pair; writeLoc++; } if((writeLoc - startLoc)==cacheSize) { // cache if full break; } } else { if(isStackFull(stackSize)) continue; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].x; stackSize++; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].y; stackSize++; } } } cacheWriteLocation[boxIndex] = writeLoc; } __global__ void computePairCountsSelfCollideExclusion_kernel(uint * overlappingCounts, Aabb * boxes, uint maxBoxInd, int * rootNodeIndex, int2 * internalNodeChildIndices, Aabb * internalNodeAabbs, Aabb * leafAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint * exclusionIndices, uint * exclusionStarts) { uint boxIndex = blockIdx.x*blockDim.x + threadIdx.x; if(boxIndex >= maxBoxInd) return; Aabb box = boxes[boxIndex]; uint stack[B3_BROADPHASE_MAX_STACK_SIZE]; int stackSize = 1; stack[0] = *rootNodeIndex; int isLeaf; for(;;) { if(outOfStack(stackSize)) break; uint internalOrLeafNodeIndex = stack[ stackSize - 1 ]; stackSize--; isLeaf = isLeafNode(internalOrLeafNodeIndex); //Internal node if false uint bvhNodeIndex = getIndexWithInternalNodeMarkerRemoved(internalOrLeafNodeIndex); //bvhRigidIndex is not used if internal node int bvhRigidIndex = (isLeaf) ? mortonCodesAndAabbIndices[bvhNodeIndex].value : -1; Aabb bvhNodeAabb = (isLeaf) ? leafAabbs[bvhRigidIndex] : internalNodeAabbs[bvhNodeIndex]; if(isAabbOverlapping(box, bvhNodeAabb)) { if(isLeaf) { if(!isElementExcluded(bvhRigidIndex, boxIndex, exclusionIndices, exclusionStarts)) overlappingCounts[boxIndex] += 1; } else { if(isStackFull(stackSize)) continue; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].x; stackSize++; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].y; stackSize++; } } } } __global__ void resetPairCounts_kernel(uint * dst, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; dst[ind] = 0; } __global__ void resetPairCache_kernel(uint2 * dst, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; dst[ind].x = 0x80000000; dst[ind].y = 0x80000000; } __global__ void computePairCountsSelfCollide_kernel(uint * overlappingCounts, Aabb * boxes, uint maxBoxInd, int * rootNodeIndex, int2 * internalNodeChildIndices, Aabb * internalNodeAabbs, Aabb * leafAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint4 * tetrahedronIndices) { uint boxIndex = blockIdx.x*blockDim.x + threadIdx.x; if(boxIndex >= maxBoxInd) return; Aabb box = boxes[boxIndex]; uint stack[B3_BROADPHASE_MAX_STACK_SIZE]; int stackSize = 1; stack[0] = *rootNodeIndex; int isLeaf; for(;;) { if(outOfStack(stackSize)) break; uint internalOrLeafNodeIndex = stack[ stackSize - 1 ]; stackSize--; isLeaf = isLeafNode(internalOrLeafNodeIndex); //Internal node if false uint bvhNodeIndex = getIndexWithInternalNodeMarkerRemoved(internalOrLeafNodeIndex); //bvhRigidIndex is not used if internal node int bvhRigidIndex = (isLeaf) ? mortonCodesAndAabbIndices[bvhNodeIndex].value : -1; Aabb bvhNodeAabb = (isLeaf) ? leafAabbs[bvhRigidIndex] : internalNodeAabbs[bvhNodeIndex]; if(isAabbOverlapping(box, bvhNodeAabb)) { if(isLeaf) { if(!isTetrahedronConnected(bvhRigidIndex, boxIndex, tetrahedronIndices)) overlappingCounts[boxIndex] += 1; } else { if(isStackFull(stackSize)) continue; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].x; stackSize++; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].y; stackSize++; } } } } __global__ void writePairCacheSelfCollide_kernel(uint2 * dst, uint * cacheStarts, uint * overlappingCounts, Aabb * boxes, uint maxBoxInd, int * rootNodeIndex, int2 * internalNodeChildIndices, Aabb * internalNodeAabbs, Aabb * leafAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint4 * tetrahedronIndices, unsigned queryIdx) { uint boxIndex = blockIdx.x*blockDim.x + threadIdx.x; if(boxIndex >= maxBoxInd) return; //uint cacheSize = overlappingCounts[boxIndex]; //if(cacheSize < 1) return; uint startLoc = cacheStarts[boxIndex]; uint writeLoc = startLoc; Aabb box = boxes[boxIndex]; uint stack[B3_BROADPHASE_MAX_STACK_SIZE]; int stackSize = 1; stack[0] = *rootNodeIndex; int isLeaf; for(;;) { if(outOfStack(stackSize)) break; uint internalOrLeafNodeIndex = stack[ stackSize - 1 ]; stackSize--; isLeaf = isLeafNode(internalOrLeafNodeIndex); //Internal node if false uint bvhNodeIndex = getIndexWithInternalNodeMarkerRemoved(internalOrLeafNodeIndex); //bvhRigidIndex is not used if internal node int bvhRigidIndex = (isLeaf) ? mortonCodesAndAabbIndices[bvhNodeIndex].value : -1; Aabb bvhNodeAabb = (isLeaf) ? leafAabbs[bvhRigidIndex] : internalNodeAabbs[bvhNodeIndex]; uint2 pair; if(isAabbOverlapping(box, bvhNodeAabb)) { if(isLeaf) { if(!isTetrahedronConnected(bvhRigidIndex, boxIndex, tetrahedronIndices)) { pair.x = combineObjectElementInd(queryIdx, boxIndex); pair.y = combineObjectElementInd(queryIdx, bvhRigidIndex); // ascentOrder<uint2>(pair); dst[writeLoc] = pair; writeLoc++; } // } //if((writeLoc - startLoc)==cacheSize) { // cache if full // return; //} } else { if(isStackFull(stackSize)) continue; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].x; stackSize++; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].y; stackSize++; } } } } __global__ void uniquePair_kernel(uint * dst, uint2 * pairs, uint pairLength, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; if(ind >= pairLength) { dst[ind] = 0; return; } // assume it is unique dst[ind] = 1; uint a = pairs[ind].x; uint b = pairs[ind].y; unsigned cur = ind; // check forward for(;;) { if(cur < 1) return; cur--; if(pairs[cur].x != a) return; if(pairs[cur].y == b) { dst[ind] = 0; return; } } } __global__ void compactUniquePairs_kernel(uint2 * dst, uint2 * pairs, uint * unique, uint * dstLoc, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; if(unique[ind] > 0) { dst[dstLoc[ind]] = pairs[ind]; } } extern "C" { void broadphaseResetPairCounts(uint * dst, uint num) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(num, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( resetPairCounts_kernel), dim3(grid), dim3(block) , 0, 0, dst, num); } void broadphaseResetPairCache(uint2 * dst, uint num) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(num, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( resetPairCache_kernel), dim3(grid), dim3(block) , 0, 0, dst, num); } void broadphaseComputePairCountsSelfCollide(uint * dst, Aabb * boxes, uint numBoxes, int * rootNodeIndex, int2 * internalNodeChildIndex, Aabb * internalNodeAabbs, Aabb * leafNodeAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint4 * tetrahedronIndices) { int tpb = CudaBase::LimitNThreadPerBlock(20, 50); dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numBoxes, tpb); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( computePairCountsSelfCollide_kernel), dim3(grid), dim3(block) , 0, 0, dst, boxes, numBoxes, rootNodeIndex, internalNodeChildIndex, internalNodeAabbs, leafNodeAabbs, mortonCodesAndAabbIndices, tetrahedronIndices); } void broadphaseWritePairCacheSelfCollide(uint2 * dst, uint * starts, uint * counts, Aabb * boxes, uint numBoxes, int * rootNodeIndex, int2 * internalNodeChildIndex, Aabb * internalNodeAabbs, Aabb * leafNodeAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint4 * tetrahedronIndices, unsigned queryIdx) { int tpb = CudaBase::LimitNThreadPerBlock(22, 50); dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numBoxes, tpb); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( writePairCacheSelfCollide_kernel), dim3(grid), dim3(block) , 0, 0, dst, starts, counts, boxes, numBoxes, rootNodeIndex, internalNodeChildIndex, internalNodeAabbs, leafNodeAabbs, mortonCodesAndAabbIndices, tetrahedronIndices, queryIdx); } void broadphaseUniquePair(uint * dst, uint2 * pairs, uint pairLength, uint bufLength) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(bufLength, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( uniquePair_kernel), dim3(grid), dim3(block) , 0, 0, dst, pairs, pairLength, bufLength); } void broadphaseCompactUniquePairs(uint2 * dst, uint2 * pairs, uint * unique, uint * loc, uint pairLength) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(pairLength, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( compactUniquePairs_kernel), dim3(grid), dim3(block) , 0, 0, dst, pairs, unique, loc, pairLength); } void broadphaseComputePairCountsSelfCollideExclusion(uint * dst, Aabb * boxes, uint numBoxes, int * rootNodeIndex, int2 * internalNodeChildIndex, Aabb * internalNodeAabbs, Aabb * leafNodeAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint * exclusionIndices, uint * exclusionStarts) { int tpb = CudaBase::LimitNThreadPerBlock(18, 50); dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numBoxes, tpb); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( computePairCountsSelfCollideExclusion_kernel), dim3(grid), dim3(block) , 0, 0, dst, boxes, numBoxes, rootNodeIndex, internalNodeChildIndex, internalNodeAabbs, leafNodeAabbs, mortonCodesAndAabbIndices, exclusionIndices, exclusionStarts); } void cuBroadphase_writePairCacheSelfCollideExclusion(uint2 * dst, uint * locations, uint * starts, uint * counts, Aabb * boxes, uint numBoxes, int * rootNodeIndex, int2 * internalNodeChildIndex, Aabb * internalNodeAabbs, Aabb * leafNodeAabbs, KeyValuePair * mortonCodesAndAabbIndices, unsigned queryIdx, uint * exclusionIndices, uint * exclusionStarts) { int tpb = CudaBase::LimitNThreadPerBlock(20, 50); dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numBoxes, tpb); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( writePairCacheSelfCollideExclusion_kernel), dim3(grid), dim3(block) , 0, 0, dst, locations, starts, counts, boxes, numBoxes, rootNodeIndex, internalNodeChildIndex, internalNodeAabbs, leafNodeAabbs, mortonCodesAndAabbIndices, queryIdx, exclusionIndices, exclusionStarts); } }
e1aeb462e0559737e02ee9238734a2258d576eb1.cu
#include "stackUtil.cuh" #include "stripedModel.cuh" #include "TetrahedronSystemInterface.h" #include <CudaBase.h> template<class T> struct SharedMemory { __device__ inline operator T*() { extern __shared__ int __smem[]; return (T*)__smem; } }; __device__ int isElementExcluded(uint b, uint a, uint * exclusionInd, uint * exclusionStart) { if(a >= b) return 1; uint cur = exclusionStart[a+1]-1; uint minInd = exclusionStart[a]; for(; cur >= minInd; cur--) { if(b <= exclusionInd[cur]) return 1; } return 0; } __device__ int isTetrahedronConnected(uint a, uint b, const uint4 * v) { if(a == b) return 1; const uint4 ta = v[a]; const uint4 tb = v[b]; if(ta.x == tb.x || ta.x == tb.y || ta.x == tb.z || ta.x == tb.w) return 1; if(ta.y == tb.x || ta.y == tb.y || ta.y == tb.z || ta.y == tb.w) return 1; if(ta.z == tb.x || ta.z == tb.y || ta.z == tb.z || ta.z == tb.w) return 1; if(ta.w == tb.x || ta.w == tb.y || ta.w == tb.z || ta.w == tb.w) return 1; return 0; } __global__ void writePairCacheSelfCollideExclusion_kernel(uint2 * dst, uint * cacheWriteLocation, uint * cacheStarts, uint * overlappingCounts, Aabb * boxes, uint maxBoxInd, int * rootNodeIndex, int2 * internalNodeChildIndices, Aabb * internalNodeAabbs, Aabb * leafAabbs, KeyValuePair * mortonCodesAndAabbIndices, unsigned queryIdx, uint * exclusionIndices, uint * exclusionStarts) { uint boxIndex = blockIdx.x*blockDim.x + threadIdx.x; if(boxIndex >= maxBoxInd) return; uint cacheSize = overlappingCounts[boxIndex]; if(cacheSize < 1) return; uint startLoc = cacheStarts[boxIndex]; uint writeLoc = cacheWriteLocation[boxIndex]; if((writeLoc - startLoc) >= cacheSize) return; Aabb box = boxes[boxIndex]; uint stack[B3_BROADPHASE_MAX_STACK_SIZE]; int stackSize = 1; stack[0] = *rootNodeIndex; int isLeaf; for(;;) { if(outOfStack(stackSize)) break; uint internalOrLeafNodeIndex = stack[ stackSize - 1 ]; stackSize--; isLeaf = isLeafNode(internalOrLeafNodeIndex); //Internal node if false uint bvhNodeIndex = getIndexWithInternalNodeMarkerRemoved(internalOrLeafNodeIndex); //bvhRigidIndex is not used if internal node int bvhRigidIndex = (isLeaf) ? (int)mortonCodesAndAabbIndices[bvhNodeIndex].value : -1; Aabb bvhNodeAabb = (isLeaf) ? leafAabbs[bvhRigidIndex] : internalNodeAabbs[bvhNodeIndex]; uint2 pair; if(isAabbOverlapping(box, bvhNodeAabb)) { if(isLeaf) { if(!isElementExcluded(bvhRigidIndex, boxIndex, exclusionIndices, exclusionStarts)) { pair.x = combineObjectElementInd(queryIdx, boxIndex); pair.y = combineObjectElementInd(queryIdx, bvhRigidIndex); dst[writeLoc] = pair; writeLoc++; } if((writeLoc - startLoc)==cacheSize) { // cache if full break; } } else { if(isStackFull(stackSize)) continue; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].x; stackSize++; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].y; stackSize++; } } } cacheWriteLocation[boxIndex] = writeLoc; } __global__ void computePairCountsSelfCollideExclusion_kernel(uint * overlappingCounts, Aabb * boxes, uint maxBoxInd, int * rootNodeIndex, int2 * internalNodeChildIndices, Aabb * internalNodeAabbs, Aabb * leafAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint * exclusionIndices, uint * exclusionStarts) { uint boxIndex = blockIdx.x*blockDim.x + threadIdx.x; if(boxIndex >= maxBoxInd) return; Aabb box = boxes[boxIndex]; uint stack[B3_BROADPHASE_MAX_STACK_SIZE]; int stackSize = 1; stack[0] = *rootNodeIndex; int isLeaf; for(;;) { if(outOfStack(stackSize)) break; uint internalOrLeafNodeIndex = stack[ stackSize - 1 ]; stackSize--; isLeaf = isLeafNode(internalOrLeafNodeIndex); //Internal node if false uint bvhNodeIndex = getIndexWithInternalNodeMarkerRemoved(internalOrLeafNodeIndex); //bvhRigidIndex is not used if internal node int bvhRigidIndex = (isLeaf) ? mortonCodesAndAabbIndices[bvhNodeIndex].value : -1; Aabb bvhNodeAabb = (isLeaf) ? leafAabbs[bvhRigidIndex] : internalNodeAabbs[bvhNodeIndex]; if(isAabbOverlapping(box, bvhNodeAabb)) { if(isLeaf) { if(!isElementExcluded(bvhRigidIndex, boxIndex, exclusionIndices, exclusionStarts)) overlappingCounts[boxIndex] += 1; } else { if(isStackFull(stackSize)) continue; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].x; stackSize++; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].y; stackSize++; } } } } __global__ void resetPairCounts_kernel(uint * dst, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; dst[ind] = 0; } __global__ void resetPairCache_kernel(uint2 * dst, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; dst[ind].x = 0x80000000; dst[ind].y = 0x80000000; } __global__ void computePairCountsSelfCollide_kernel(uint * overlappingCounts, Aabb * boxes, uint maxBoxInd, int * rootNodeIndex, int2 * internalNodeChildIndices, Aabb * internalNodeAabbs, Aabb * leafAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint4 * tetrahedronIndices) { uint boxIndex = blockIdx.x*blockDim.x + threadIdx.x; if(boxIndex >= maxBoxInd) return; Aabb box = boxes[boxIndex]; uint stack[B3_BROADPHASE_MAX_STACK_SIZE]; int stackSize = 1; stack[0] = *rootNodeIndex; int isLeaf; for(;;) { if(outOfStack(stackSize)) break; uint internalOrLeafNodeIndex = stack[ stackSize - 1 ]; stackSize--; isLeaf = isLeafNode(internalOrLeafNodeIndex); //Internal node if false uint bvhNodeIndex = getIndexWithInternalNodeMarkerRemoved(internalOrLeafNodeIndex); //bvhRigidIndex is not used if internal node int bvhRigidIndex = (isLeaf) ? mortonCodesAndAabbIndices[bvhNodeIndex].value : -1; Aabb bvhNodeAabb = (isLeaf) ? leafAabbs[bvhRigidIndex] : internalNodeAabbs[bvhNodeIndex]; if(isAabbOverlapping(box, bvhNodeAabb)) { if(isLeaf) { if(!isTetrahedronConnected(bvhRigidIndex, boxIndex, tetrahedronIndices)) overlappingCounts[boxIndex] += 1; } else { if(isStackFull(stackSize)) continue; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].x; stackSize++; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].y; stackSize++; } } } } __global__ void writePairCacheSelfCollide_kernel(uint2 * dst, uint * cacheStarts, uint * overlappingCounts, Aabb * boxes, uint maxBoxInd, int * rootNodeIndex, int2 * internalNodeChildIndices, Aabb * internalNodeAabbs, Aabb * leafAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint4 * tetrahedronIndices, unsigned queryIdx) { uint boxIndex = blockIdx.x*blockDim.x + threadIdx.x; if(boxIndex >= maxBoxInd) return; //uint cacheSize = overlappingCounts[boxIndex]; //if(cacheSize < 1) return; uint startLoc = cacheStarts[boxIndex]; uint writeLoc = startLoc; Aabb box = boxes[boxIndex]; uint stack[B3_BROADPHASE_MAX_STACK_SIZE]; int stackSize = 1; stack[0] = *rootNodeIndex; int isLeaf; for(;;) { if(outOfStack(stackSize)) break; uint internalOrLeafNodeIndex = stack[ stackSize - 1 ]; stackSize--; isLeaf = isLeafNode(internalOrLeafNodeIndex); //Internal node if false uint bvhNodeIndex = getIndexWithInternalNodeMarkerRemoved(internalOrLeafNodeIndex); //bvhRigidIndex is not used if internal node int bvhRigidIndex = (isLeaf) ? mortonCodesAndAabbIndices[bvhNodeIndex].value : -1; Aabb bvhNodeAabb = (isLeaf) ? leafAabbs[bvhRigidIndex] : internalNodeAabbs[bvhNodeIndex]; uint2 pair; if(isAabbOverlapping(box, bvhNodeAabb)) { if(isLeaf) { if(!isTetrahedronConnected(bvhRigidIndex, boxIndex, tetrahedronIndices)) { pair.x = combineObjectElementInd(queryIdx, boxIndex); pair.y = combineObjectElementInd(queryIdx, bvhRigidIndex); // ascentOrder<uint2>(pair); dst[writeLoc] = pair; writeLoc++; } // } //if((writeLoc - startLoc)==cacheSize) { // cache if full // return; //} } else { if(isStackFull(stackSize)) continue; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].x; stackSize++; stack[ stackSize ] = internalNodeChildIndices[bvhNodeIndex].y; stackSize++; } } } } __global__ void uniquePair_kernel(uint * dst, uint2 * pairs, uint pairLength, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; if(ind >= pairLength) { dst[ind] = 0; return; } // assume it is unique dst[ind] = 1; uint a = pairs[ind].x; uint b = pairs[ind].y; unsigned cur = ind; // check forward for(;;) { if(cur < 1) return; cur--; if(pairs[cur].x != a) return; if(pairs[cur].y == b) { dst[ind] = 0; return; } } } __global__ void compactUniquePairs_kernel(uint2 * dst, uint2 * pairs, uint * unique, uint * dstLoc, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= maxInd) return; if(unique[ind] > 0) { dst[dstLoc[ind]] = pairs[ind]; } } extern "C" { void broadphaseResetPairCounts(uint * dst, uint num) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(num, 512); dim3 grid(nblk, 1, 1); resetPairCounts_kernel<<< grid, block >>>(dst, num); } void broadphaseResetPairCache(uint2 * dst, uint num) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(num, 512); dim3 grid(nblk, 1, 1); resetPairCache_kernel<<< grid, block >>>(dst, num); } void broadphaseComputePairCountsSelfCollide(uint * dst, Aabb * boxes, uint numBoxes, int * rootNodeIndex, int2 * internalNodeChildIndex, Aabb * internalNodeAabbs, Aabb * leafNodeAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint4 * tetrahedronIndices) { int tpb = CudaBase::LimitNThreadPerBlock(20, 50); dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numBoxes, tpb); dim3 grid(nblk, 1, 1); computePairCountsSelfCollide_kernel<<< grid, block >>>(dst, boxes, numBoxes, rootNodeIndex, internalNodeChildIndex, internalNodeAabbs, leafNodeAabbs, mortonCodesAndAabbIndices, tetrahedronIndices); } void broadphaseWritePairCacheSelfCollide(uint2 * dst, uint * starts, uint * counts, Aabb * boxes, uint numBoxes, int * rootNodeIndex, int2 * internalNodeChildIndex, Aabb * internalNodeAabbs, Aabb * leafNodeAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint4 * tetrahedronIndices, unsigned queryIdx) { int tpb = CudaBase::LimitNThreadPerBlock(22, 50); dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numBoxes, tpb); dim3 grid(nblk, 1, 1); writePairCacheSelfCollide_kernel<<< grid, block >>>(dst, starts, counts, boxes, numBoxes, rootNodeIndex, internalNodeChildIndex, internalNodeAabbs, leafNodeAabbs, mortonCodesAndAabbIndices, tetrahedronIndices, queryIdx); } void broadphaseUniquePair(uint * dst, uint2 * pairs, uint pairLength, uint bufLength) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(bufLength, 512); dim3 grid(nblk, 1, 1); uniquePair_kernel<<< grid, block >>>(dst, pairs, pairLength, bufLength); } void broadphaseCompactUniquePairs(uint2 * dst, uint2 * pairs, uint * unique, uint * loc, uint pairLength) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(pairLength, 512); dim3 grid(nblk, 1, 1); compactUniquePairs_kernel<<< grid, block >>>(dst, pairs, unique, loc, pairLength); } void broadphaseComputePairCountsSelfCollideExclusion(uint * dst, Aabb * boxes, uint numBoxes, int * rootNodeIndex, int2 * internalNodeChildIndex, Aabb * internalNodeAabbs, Aabb * leafNodeAabbs, KeyValuePair * mortonCodesAndAabbIndices, uint * exclusionIndices, uint * exclusionStarts) { int tpb = CudaBase::LimitNThreadPerBlock(18, 50); dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numBoxes, tpb); dim3 grid(nblk, 1, 1); computePairCountsSelfCollideExclusion_kernel<<< grid, block >>>(dst, boxes, numBoxes, rootNodeIndex, internalNodeChildIndex, internalNodeAabbs, leafNodeAabbs, mortonCodesAndAabbIndices, exclusionIndices, exclusionStarts); } void cuBroadphase_writePairCacheSelfCollideExclusion(uint2 * dst, uint * locations, uint * starts, uint * counts, Aabb * boxes, uint numBoxes, int * rootNodeIndex, int2 * internalNodeChildIndex, Aabb * internalNodeAabbs, Aabb * leafNodeAabbs, KeyValuePair * mortonCodesAndAabbIndices, unsigned queryIdx, uint * exclusionIndices, uint * exclusionStarts) { int tpb = CudaBase::LimitNThreadPerBlock(20, 50); dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numBoxes, tpb); dim3 grid(nblk, 1, 1); writePairCacheSelfCollideExclusion_kernel<<< grid, block >>>(dst, locations, starts, counts, boxes, numBoxes, rootNodeIndex, internalNodeChildIndex, internalNodeAabbs, leafNodeAabbs, mortonCodesAndAabbIndices, queryIdx, exclusionIndices, exclusionStarts); } }
93fcb96430a4bb5bd6f666e1af21bd52a8bcfdf1.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" namespace filter { template void linearColumn<float3, ushort3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream); } #endif /* CUDA_DISABLER */
93fcb96430a4bb5bd6f666e1af21bd52a8bcfdf1.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" namespace filter { template void linearColumn<float3, ushort3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream); } #endif /* CUDA_DISABLER */
14c6fe8325712252a02743b49fa35d4568617697.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <Shared.hh> #include <Number.h> #include <One.h> #include <Two.h> #include <Allocator.h> #define ARRAY_SIZE 8 __global__ void getData( Number** numbers, int* data, int N ) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if( idx < N ) { data[idx] = numbers[idx]->getNumber( ); } } int main( ) { /// Validation data int* dataHost = new int[ARRAY_SIZE]; int* dataDevice; hipMalloc( &dataDevice, sizeof( int ) * ARRAY_SIZE ); /// ONES // Allocate on the GPU to use the virtual function Allocator< One >* onesAllocator = new Allocator< One >( ARRAY_SIZE ); Number** ones = ( Number** )( onesAllocator->getObjects( )); // Run the kernel for the ones hipLaunchKernelGGL(( getData) , dim3(1), dim3(ARRAY_SIZE) , 0, 0, ones, dataDevice, ARRAY_SIZE ); // Get the data back hipMemcpy( dataHost, dataDevice, sizeof( int ) * ARRAY_SIZE, hipMemcpyDeviceToHost ); for( int i = 0; i < ARRAY_SIZE; i++ ) std::cout << "ones: " << dataHost[i] << std::endl; /// TWOS Allocator< Two >* twosAllocator = new Allocator<Two>( ARRAY_SIZE ); Number** twos = ( Number** )( twosAllocator->getObjects( )); // Run the kernel for the twos hipLaunchKernelGGL(( getData) , dim3(1), dim3(ARRAY_SIZE) , 0, 0, twos, dataDevice, ARRAY_SIZE ); // Get the data back hipMemcpy( dataHost, dataDevice, sizeof( int ) * ARRAY_SIZE, hipMemcpyDeviceToHost); for( int i = 0; i < ARRAY_SIZE; i++ ) std::cout << "twos: " << dataHost[i] << std::endl; return EXIT_SUCCESS; }
14c6fe8325712252a02743b49fa35d4568617697.cu
#include <Shared.hh> #include <Number.h> #include <One.h> #include <Two.h> #include <Allocator.h> #define ARRAY_SIZE 8 __global__ void getData( Number** numbers, int* data, int N ) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if( idx < N ) { data[idx] = numbers[idx]->getNumber( ); } } int main( ) { /// Validation data int* dataHost = new int[ARRAY_SIZE]; int* dataDevice; cudaMalloc( &dataDevice, sizeof( int ) * ARRAY_SIZE ); /// ONES // Allocate on the GPU to use the virtual function Allocator< One >* onesAllocator = new Allocator< One >( ARRAY_SIZE ); Number** ones = ( Number** )( onesAllocator->getObjects( )); // Run the kernel for the ones getData <<< 1, ARRAY_SIZE >>> ( ones, dataDevice, ARRAY_SIZE ); // Get the data back cudaMemcpy( dataHost, dataDevice, sizeof( int ) * ARRAY_SIZE, cudaMemcpyDeviceToHost ); for( int i = 0; i < ARRAY_SIZE; i++ ) std::cout << "ones: " << dataHost[i] << std::endl; /// TWOS Allocator< Two >* twosAllocator = new Allocator<Two>( ARRAY_SIZE ); Number** twos = ( Number** )( twosAllocator->getObjects( )); // Run the kernel for the twos getData <<< 1, ARRAY_SIZE >>> ( twos, dataDevice, ARRAY_SIZE ); // Get the data back cudaMemcpy( dataHost, dataDevice, sizeof( int ) * ARRAY_SIZE, cudaMemcpyDeviceToHost); for( int i = 0; i < ARRAY_SIZE; i++ ) std::cout << "twos: " << dataHost[i] << std::endl; return EXIT_SUCCESS; }
4b1b47df1935af687af02dcb18352c58412d0987.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zgetf2.cu, normal z -> c, Sun Nov 20 20:20:31 2016 */ #include "magma_internal.h" #define cgeru_bs 512 // 512 is max threads for 1.x cards void magma_cgetf2_swap( magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ); void magma_cscal_cgeru( magma_int_t m, magma_int_t n, magmaFloatComplex *dA, magma_int_t ldda, magma_queue_t ); // TODO: this function could be in .cpp file -- it has no CUDA code in it. /***************************************************************************//** CGETF2 computes an LU factorization of a general m-by-n matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 2 BLAS version of the algorithm. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 and N <= 1024. On CUDA architecture 1.x cards, N <= 512. @param[in,out] dA COMPLEX array, dimension (LDDA,N) On entry, the m by n matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] ipiv INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[in] queue magma_queue_t Queue to execute in. @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, U(k,k) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @ingroup magma_getf2 *******************************************************************************/ extern "C" magma_int_t magma_cgetf2_gpu( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t *ipiv, magma_queue_t queue, magma_int_t *info ) { #define dA(i, j) (dA + (i) + (j)*ldda) *info = 0; if (m < 0) { *info = -1; } else if (n < 0 || n > cgeru_bs) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } magma_int_t min_mn = min(m, n); magma_int_t j, jp; for (j=0; j < min_mn; j++) { hipDeviceSetCacheConfig( hipFuncCachePreferShared ); // Find pivot and test for singularity. jp = j - 1 + magma_icamax( m-j, dA(j,j), 1, queue ); ipiv[j] = jp + 1; // ipiv uses Fortran one-based index // Can't check value of dA since it is on GPU //if ( dA(jp, j) != 0.0) { hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); // Apply the interchange to columns 1:N. if (jp != j) { magma_cgetf2_swap( n, dA, j, jp, ldda, queue ); } // Compute elements J+1:M of J-th column. if (j < m) { magma_cscal_cgeru( m-j, n-j, dA(j, j), ldda, queue ); } //} //else if (*info == 0) { // *info = j; //} } return *info; } // =========================================================================== // TODO: use standard BLAS magma_cswap? #define cswap_bs 64 /******************************************************************************/ __global__ void kernel_cswap(int n, magmaFloatComplex *x, int i, int j, int incx) { int id = blockIdx.x * cswap_bs + threadIdx.x; if (id < n) { magmaFloatComplex tmp = x[i + incx*id]; x[i + incx*id] = x[j + incx*id]; x[j + incx*id] = tmp; } } /******************************************************************************/ void magma_cgetf2_swap( magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ) { /* cswap two row vectors: ith and jth */ dim3 threads( cswap_bs ); dim3 grid( magma_ceildiv( n, cswap_bs ) ); hipLaunchKernelGGL(( kernel_cswap) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, x, i, j, incx); } /******************************************************************************/ // dynamically allocated shared memory, set to size n when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ magmaFloatComplex shared_data[]; /******************************************************************************/ __global__ void kernel_cscal_cgeru(int m, int n, magmaFloatComplex *A, int lda) { magmaFloatComplex *shared_y = shared_data; int tid = blockIdx.x * cgeru_bs + threadIdx.x; magmaFloatComplex reg = MAGMA_C_ZERO; if (threadIdx.x < n) { shared_y[threadIdx.x] = A[lda * threadIdx.x]; } __syncthreads(); if (tid < m && tid > 0) { reg = A[tid]; reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]); A[tid] = reg; #pragma unroll for (int i=1; i < n; i++) { A[tid + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg; } } } /******************************************************************************/ void magma_cscal_cgeru( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_queue_t queue ) { /* Specialized kernel that merges cscal and cgeru 1) cscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ dim3 threads( cgeru_bs ); dim3 grid( magma_ceildiv( m, cgeru_bs ) ); size_t shared_size = sizeof(magmaFloatComplex)*(n); hipLaunchKernelGGL(( kernel_cscal_cgeru) , dim3(grid), dim3(threads), shared_size, queue->cuda_stream() , m, n, dA, ldda); }
4b1b47df1935af687af02dcb18352c58412d0987.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zgetf2.cu, normal z -> c, Sun Nov 20 20:20:31 2016 */ #include "magma_internal.h" #define cgeru_bs 512 // 512 is max threads for 1.x cards void magma_cgetf2_swap( magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ); void magma_cscal_cgeru( magma_int_t m, magma_int_t n, magmaFloatComplex *dA, magma_int_t ldda, magma_queue_t ); // TODO: this function could be in .cpp file -- it has no CUDA code in it. /***************************************************************************//** CGETF2 computes an LU factorization of a general m-by-n matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 2 BLAS version of the algorithm. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 and N <= 1024. On CUDA architecture 1.x cards, N <= 512. @param[in,out] dA COMPLEX array, dimension (LDDA,N) On entry, the m by n matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] ipiv INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[in] queue magma_queue_t Queue to execute in. @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, U(k,k) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @ingroup magma_getf2 *******************************************************************************/ extern "C" magma_int_t magma_cgetf2_gpu( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t *ipiv, magma_queue_t queue, magma_int_t *info ) { #define dA(i, j) (dA + (i) + (j)*ldda) *info = 0; if (m < 0) { *info = -1; } else if (n < 0 || n > cgeru_bs) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } magma_int_t min_mn = min(m, n); magma_int_t j, jp; for (j=0; j < min_mn; j++) { cudaDeviceSetCacheConfig( cudaFuncCachePreferShared ); // Find pivot and test for singularity. jp = j - 1 + magma_icamax( m-j, dA(j,j), 1, queue ); ipiv[j] = jp + 1; // ipiv uses Fortran one-based index // Can't check value of dA since it is on GPU //if ( dA(jp, j) != 0.0) { cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); // Apply the interchange to columns 1:N. if (jp != j) { magma_cgetf2_swap( n, dA, j, jp, ldda, queue ); } // Compute elements J+1:M of J-th column. if (j < m) { magma_cscal_cgeru( m-j, n-j, dA(j, j), ldda, queue ); } //} //else if (*info == 0) { // *info = j; //} } return *info; } // =========================================================================== // TODO: use standard BLAS magma_cswap? #define cswap_bs 64 /******************************************************************************/ __global__ void kernel_cswap(int n, magmaFloatComplex *x, int i, int j, int incx) { int id = blockIdx.x * cswap_bs + threadIdx.x; if (id < n) { magmaFloatComplex tmp = x[i + incx*id]; x[i + incx*id] = x[j + incx*id]; x[j + incx*id] = tmp; } } /******************************************************************************/ void magma_cgetf2_swap( magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ) { /* cswap two row vectors: ith and jth */ dim3 threads( cswap_bs ); dim3 grid( magma_ceildiv( n, cswap_bs ) ); kernel_cswap <<< grid, threads, 0, queue->cuda_stream() >>> (n, x, i, j, incx); } /******************************************************************************/ // dynamically allocated shared memory, set to size n when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ magmaFloatComplex shared_data[]; /******************************************************************************/ __global__ void kernel_cscal_cgeru(int m, int n, magmaFloatComplex *A, int lda) { magmaFloatComplex *shared_y = shared_data; int tid = blockIdx.x * cgeru_bs + threadIdx.x; magmaFloatComplex reg = MAGMA_C_ZERO; if (threadIdx.x < n) { shared_y[threadIdx.x] = A[lda * threadIdx.x]; } __syncthreads(); if (tid < m && tid > 0) { reg = A[tid]; reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]); A[tid] = reg; #pragma unroll for (int i=1; i < n; i++) { A[tid + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg; } } } /******************************************************************************/ void magma_cscal_cgeru( magma_int_t m, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_queue_t queue ) { /* Specialized kernel that merges cscal and cgeru 1) cscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ dim3 threads( cgeru_bs ); dim3 grid( magma_ceildiv( m, cgeru_bs ) ); size_t shared_size = sizeof(magmaFloatComplex)*(n); kernel_cscal_cgeru <<< grid, threads, shared_size, queue->cuda_stream() >>> (m, n, dA, ldda); }
bdd63ad3dde85fe161fa5a2cc507c3beb29f25f6.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <iostream> #include <random> #include <map> #include <functional> #include <algorithm> #include "cu-utils.h" #include "perf-measure.h" template <typename T> __global__ void addKernel(T* mA, T* mB, T* mC, int Nx, int Ny) { const unsigned ix = blockIdx.x * blockDim.x + threadIdx.x; const unsigned iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix < Nx && iy < Ny) { const unsigned idx = iy * Nx + ix; mC[idx] = mA[idx] + mB[idx]; } } struct IRngGen { virtual bool is_device_gen() = 0; virtual void generate_int (int*, int size) = 0; virtual void generate_float (float*, int size) = 0; virtual void generate_double(double*,int size) = 0; virtual ~IRngGen() {}; }; struct CudaRndGen : public IRngGen { hiprandGenerator_t m_gen; static hiprandRngType_t genName2Type(const char* name) { std::map<string, hiprandRngType_t> types = { {"DEFAULT", HIPRAND_RNG_PSEUDO_DEFAULT}, {"XORWOW", HIPRAND_RNG_PSEUDO_XORWOW}, {"MRG32K3A", HIPRAND_RNG_PSEUDO_MRG32K3A}, {"MTGP32",HIPRAND_RNG_PSEUDO_MTGP32}, {"MT19937",HIPRAND_RNG_PSEUDO_MT19937}, {"PHILOX4", HIPRAND_RNG_PSEUDO_PHILOX4_32_10}, {"QUASI_DEFAULT", HIPRAND_RNG_QUASI_DEFAULT}, {"QUASI_SOBOL32", HIPRAND_RNG_QUASI_SOBOL32}, {"SCRAMBLED_SOBOL32", HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL32} }; auto it = types.find(name); if (it != types.end()) return it->second; else return HIPRAND_RNG_PSEUDO_DEFAULT; } CudaRndGen(const char* type) { const auto status = hiprandCreateGenerator(&m_gen, genName2Type(type)); if (status != HIPRAND_STATUS_SUCCESS) { throw "hiprandCreateGenerator failed"; } } ~CudaRndGen() { hiprandDestroyGenerator(m_gen); } bool is_device_gen() override { return true; } void generate_int(int* ptr, int size) override { const auto status = hiprandGenerate(m_gen, (unsigned int*)ptr, size); if (status != HIPRAND_STATUS_SUCCESS) { throw "hiprandGenerate failed"; } } void generate_float(float* ptr, int size) override { const auto status = hiprandGenerateUniform(m_gen, ptr, size); if (status != HIPRAND_STATUS_SUCCESS) { throw "hiprandGenerateUniform failed"; } } void generate_double(double* ptr, int size) override { const auto status = hiprandGenerateUniformDouble(m_gen, ptr, size); if (status != HIPRAND_STATUS_SUCCESS) { throw "hiprandGenerateUniformDouble failed"; } } }; struct CpuRndGen : public IRngGen { std::random_device dev; std::mt19937 rng; CpuRndGen() : rng(dev()) {} bool is_device_gen() override { return false; } void generate_int(int* ptr, int size) override { std::uniform_int_distribution<int> dist(-10, 10); while (size-- > 0) { *ptr++ = dist(rng); } } void generate_float(float* ptr, int size) override { std::uniform_real_distribution<float> dist(-1, 1); while (size-- > 0) { *ptr++ = dist(rng); } } void generate_double(double* ptr, int size) override { std::uniform_real_distribution<float> dist(-1, 1); while (size-- > 0) { *ptr++ = dist(rng); } } }; template <typename T> __host__ void doMatrixAdd(int one_dim_size, int blockx, int blocky, int dimx, int dimy, std::function<void(T*,int)> rng, bool rng_on_device) { const auto tot_size = one_dim_size * one_dim_size; auto host_a = std::make_unique<T[]>(tot_size); auto host_b = std::make_unique<T[]>(tot_size); auto host_c = std::make_unique<T[]>(tot_size); Measurements mm; mm.start(); std::cout << "allocating device memory ..."; auto dev_a = cu_make_unique<T>(tot_size); auto dev_b = cu_make_unique<T>(tot_size); auto dev_c = cu_make_unique<T>(tot_size); std::cout << " done " << mm.elapsed() << std::endl; if (rng_on_device) { mm.start(); std::cout << "generating random numbers on gpu ..."; rng(dev_a.get(), tot_size); rng(dev_b.get(), tot_size); cu_device_synchronize(); std::cout << " done " << mm.elapsed() << std::endl; } else { mm.start(); std::cout << "generating random numbers on cpu ..."; rng(host_a.get(), tot_size); rng(host_b.get(), tot_size); std::cout << " done " << mm.elapsed() << std::endl; mm.start(); std::cout << "copying data to device ..."; cu_copy_to_device(host_b, dev_b, tot_size); cu_copy_to_device(host_c, dev_c, tot_size); cu_device_synchronize(); std::cout << " done " << mm.elapsed() << std::endl; } mm.start(); addKernel<T> << <dim3(blockx, blocky), dim3(dimx, dimy) >> > (dev_a.get(), dev_b.get(), dev_c.get(), one_dim_size, one_dim_size); if (auto status = hipGetLastError(); status != hipSuccess) { std::cout << "kernel launch error " << hipGetErrorString(status) << std::endl; } cu_device_synchronize(); std::cout << "kernel exec time : " << mm.elapsed() << std::endl; cu_copy_to_host(dev_c, host_c, tot_size); } IRngGen* makeRndGen(const char* _gen_type) { string gen_type(_gen_type); std::transform(gen_type.begin(), gen_type.end(), gen_type.begin(), [](auto c) { return std::toupper(c);}); if (gen_type == "CPU") { return new CpuRndGen(); } else { return new CudaRndGen(gen_type.c_str()); } } int matrixAdd(int argc, char** argv) { const int oneDimMatrixSize = argc > 1 ? 1 << std::atoi(argv[0]) : 1 << 14; const int dimx = argc > 2 ? std::atoi(argv[1]) : 32; const int dimy = argc > 3 ? std::atoi(argv[2]) : 32; const char* type = argc > 4 ? argv[3] : "float"; const char* RNG_type = argc > 5 ? argv[4] : "CPU"; const int blockx = (oneDimMatrixSize + dimx - 1) / dimx; const int blocky = (oneDimMatrixSize + dimy - 1) / dimy; auto rng = makeRndGen(RNG_type); printf("matrix size (%d,%d) grid (%d,%d) block (%d,%d)\n", oneDimMatrixSize, oneDimMatrixSize, blockx, blocky, dimx, dimy); if (0 == strcmp(type, "float")) { auto gen = [&](float* ptr, int size) { rng->generate_float(ptr, size); }; doMatrixAdd<float>(oneDimMatrixSize,blockx,blocky,dimx,dimy, gen, rng->is_device_gen()); } else if (0 == strcmp(type, "double")) { auto gen = [&](double* ptr, int size) { rng->generate_double(ptr, size); }; doMatrixAdd<double>(oneDimMatrixSize, blockx, blocky, dimx, dimy, gen, rng->is_device_gen()); } else if (0 == strcmp(type, "int")) { auto gen = [&](int* ptr, int size) { rng->generate_int(ptr, size); }; doMatrixAdd<int>(oneDimMatrixSize, blockx, blocky, dimx, dimy, gen, rng->is_device_gen()); } delete rng; return 0; }
bdd63ad3dde85fe161fa5a2cc507c3beb29f25f6.cu
#include <cuda.h> #include <curand.h> #include <iostream> #include <random> #include <map> #include <functional> #include <algorithm> #include "cu-utils.h" #include "perf-measure.h" template <typename T> __global__ void addKernel(T* mA, T* mB, T* mC, int Nx, int Ny) { const unsigned ix = blockIdx.x * blockDim.x + threadIdx.x; const unsigned iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix < Nx && iy < Ny) { const unsigned idx = iy * Nx + ix; mC[idx] = mA[idx] + mB[idx]; } } struct IRngGen { virtual bool is_device_gen() = 0; virtual void generate_int (int*, int size) = 0; virtual void generate_float (float*, int size) = 0; virtual void generate_double(double*,int size) = 0; virtual ~IRngGen() {}; }; struct CudaRndGen : public IRngGen { curandGenerator_t m_gen; static curandRngType_t genName2Type(const char* name) { std::map<string, curandRngType_t> types = { {"DEFAULT", CURAND_RNG_PSEUDO_DEFAULT}, {"XORWOW", CURAND_RNG_PSEUDO_XORWOW}, {"MRG32K3A", CURAND_RNG_PSEUDO_MRG32K3A}, {"MTGP32",CURAND_RNG_PSEUDO_MTGP32}, {"MT19937",CURAND_RNG_PSEUDO_MT19937}, {"PHILOX4", CURAND_RNG_PSEUDO_PHILOX4_32_10}, {"QUASI_DEFAULT", CURAND_RNG_QUASI_DEFAULT}, {"QUASI_SOBOL32", CURAND_RNG_QUASI_SOBOL32}, {"SCRAMBLED_SOBOL32", CURAND_RNG_QUASI_SCRAMBLED_SOBOL32} }; auto it = types.find(name); if (it != types.end()) return it->second; else return CURAND_RNG_PSEUDO_DEFAULT; } CudaRndGen(const char* type) { const auto status = curandCreateGenerator(&m_gen, genName2Type(type)); if (status != CURAND_STATUS_SUCCESS) { throw "curandCreateGenerator failed"; } } ~CudaRndGen() { curandDestroyGenerator(m_gen); } bool is_device_gen() override { return true; } void generate_int(int* ptr, int size) override { const auto status = curandGenerate(m_gen, (unsigned int*)ptr, size); if (status != CURAND_STATUS_SUCCESS) { throw "curandGenerate failed"; } } void generate_float(float* ptr, int size) override { const auto status = curandGenerateUniform(m_gen, ptr, size); if (status != CURAND_STATUS_SUCCESS) { throw "curandGenerateUniform failed"; } } void generate_double(double* ptr, int size) override { const auto status = curandGenerateUniformDouble(m_gen, ptr, size); if (status != CURAND_STATUS_SUCCESS) { throw "curandGenerateUniformDouble failed"; } } }; struct CpuRndGen : public IRngGen { std::random_device dev; std::mt19937 rng; CpuRndGen() : rng(dev()) {} bool is_device_gen() override { return false; } void generate_int(int* ptr, int size) override { std::uniform_int_distribution<int> dist(-10, 10); while (size-- > 0) { *ptr++ = dist(rng); } } void generate_float(float* ptr, int size) override { std::uniform_real_distribution<float> dist(-1, 1); while (size-- > 0) { *ptr++ = dist(rng); } } void generate_double(double* ptr, int size) override { std::uniform_real_distribution<float> dist(-1, 1); while (size-- > 0) { *ptr++ = dist(rng); } } }; template <typename T> __host__ void doMatrixAdd(int one_dim_size, int blockx, int blocky, int dimx, int dimy, std::function<void(T*,int)> rng, bool rng_on_device) { const auto tot_size = one_dim_size * one_dim_size; auto host_a = std::make_unique<T[]>(tot_size); auto host_b = std::make_unique<T[]>(tot_size); auto host_c = std::make_unique<T[]>(tot_size); Measurements mm; mm.start(); std::cout << "allocating device memory ..."; auto dev_a = cu_make_unique<T>(tot_size); auto dev_b = cu_make_unique<T>(tot_size); auto dev_c = cu_make_unique<T>(tot_size); std::cout << " done " << mm.elapsed() << std::endl; if (rng_on_device) { mm.start(); std::cout << "generating random numbers on gpu ..."; rng(dev_a.get(), tot_size); rng(dev_b.get(), tot_size); cu_device_synchronize(); std::cout << " done " << mm.elapsed() << std::endl; } else { mm.start(); std::cout << "generating random numbers on cpu ..."; rng(host_a.get(), tot_size); rng(host_b.get(), tot_size); std::cout << " done " << mm.elapsed() << std::endl; mm.start(); std::cout << "copying data to device ..."; cu_copy_to_device(host_b, dev_b, tot_size); cu_copy_to_device(host_c, dev_c, tot_size); cu_device_synchronize(); std::cout << " done " << mm.elapsed() << std::endl; } mm.start(); addKernel<T> << <dim3(blockx, blocky), dim3(dimx, dimy) >> > (dev_a.get(), dev_b.get(), dev_c.get(), one_dim_size, one_dim_size); if (auto status = cudaGetLastError(); status != cudaSuccess) { std::cout << "kernel launch error " << cudaGetErrorString(status) << std::endl; } cu_device_synchronize(); std::cout << "kernel exec time : " << mm.elapsed() << std::endl; cu_copy_to_host(dev_c, host_c, tot_size); } IRngGen* makeRndGen(const char* _gen_type) { string gen_type(_gen_type); std::transform(gen_type.begin(), gen_type.end(), gen_type.begin(), [](auto c) { return std::toupper(c);}); if (gen_type == "CPU") { return new CpuRndGen(); } else { return new CudaRndGen(gen_type.c_str()); } } int matrixAdd(int argc, char** argv) { const int oneDimMatrixSize = argc > 1 ? 1 << std::atoi(argv[0]) : 1 << 14; const int dimx = argc > 2 ? std::atoi(argv[1]) : 32; const int dimy = argc > 3 ? std::atoi(argv[2]) : 32; const char* type = argc > 4 ? argv[3] : "float"; const char* RNG_type = argc > 5 ? argv[4] : "CPU"; const int blockx = (oneDimMatrixSize + dimx - 1) / dimx; const int blocky = (oneDimMatrixSize + dimy - 1) / dimy; auto rng = makeRndGen(RNG_type); printf("matrix size (%d,%d) grid (%d,%d) block (%d,%d)\n", oneDimMatrixSize, oneDimMatrixSize, blockx, blocky, dimx, dimy); if (0 == strcmp(type, "float")) { auto gen = [&](float* ptr, int size) { rng->generate_float(ptr, size); }; doMatrixAdd<float>(oneDimMatrixSize,blockx,blocky,dimx,dimy, gen, rng->is_device_gen()); } else if (0 == strcmp(type, "double")) { auto gen = [&](double* ptr, int size) { rng->generate_double(ptr, size); }; doMatrixAdd<double>(oneDimMatrixSize, blockx, blocky, dimx, dimy, gen, rng->is_device_gen()); } else if (0 == strcmp(type, "int")) { auto gen = [&](int* ptr, int size) { rng->generate_int(ptr, size); }; doMatrixAdd<int>(oneDimMatrixSize, blockx, blocky, dimx, dimy, gen, rng->is_device_gen()); } delete rng; return 0; }
48b7158346d3a430e8a7429d3dd37d368f0ea11b.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include "NativeOpExecutioner.h" #include "../NativeOps.h" #include <hip/hip_runtime.h> #include <buffer.h> #include <loops/transform_any.h> #include <loops/reduce_bool.h> #include <loops/reduce_long.h> #include <helpers/threshold.h> #include <ops/specials_cuda.h> #include <helpers/DebugHelper.h> #include <AffinityManager.h> #include <exceptions/datatype_exception.h> #include <exceptions/cuda_exception.h> #include <helpers/CudaLaunchHelper.h> // FIXME: we need cuda-specific implementations #include <GraphExecutioner.h> #include <graph/GraphHolder.h> #include <ops/declarable/CustomOperations.h> #include <PointersManager.h> //#include <sys/time.h> #include <hiprand/hiprand.h> #include <Status.h> #include <helpers/DebugHelper.h> using namespace nd4j; #include <loops/special_kernels.h> #include <performance/benchmarking/FullBenchmarkSuit.h> #include <performance/benchmarking/LightBenchmarkSuit.h> hipDeviceProp_t *deviceProperties; hipFuncAttributes *funcAttributes = new hipFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __ND4J_EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jLong)ptrToDeviceId; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(hipStream_t stream) { Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<Nd4jLong> *scalarDimension; nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(hipStream_t stream) { auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong))); CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(Nd4jLong)); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } Nd4jLong *getShapeInfoHostPointer() { return scalarShapeInfo->data; } Nd4jLong * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } Nd4jLong * getDimensionHostPointer() { return scalarDimension->data; } Nd4jLong * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; hipStream_t streamRef; public: ScalarInfo(hipStream_t stream) { T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T))); CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T)); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ Nd4jLong *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the dZ pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ Nd4jLong *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; void execPairwiseTransform( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseTransform(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execPairwiseTransformBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseBoolTransform(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStatsScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execBroadcastBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { //Nd4jLong *tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); //Nd4jLong *tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[1]); //Nd4jLong *tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[2]); //Nd4jLong *tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[3]); auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcastBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void execBroadcast( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcast(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloatScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSameScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSame(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceLong(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hXShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("execReduceBool requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ //////////////////////////////////////////////////////////////////////// void execIndexReduce(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduce(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloat(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ //////////////////////////////////////////////////////////////////////// void execIndexReduceScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo){ try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduceScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformSame(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformSame(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformBool(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformAny(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto streamSpecial = reinterpret_cast<hipStream_t &>(extraPointers[4]); LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3], reinterpret_cast<int *>(extraPointers[6])); NativeOpExecutioner::execTransformAny(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, nullptr); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformStrict(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformStrict(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformFloat(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformFloat(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void checkP2P() { int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; hipSetDevice(dX); hipDeviceCanAccessPeer(&canAccess, dX , dY); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; hipSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; hipSetDevice(dX); hipDeviceCanAccessPeer(&canAccess, dX , dY); if (canAccess) { if (enable) { hipDeviceEnablePeerAccess(dY, 0); } else { hipDeviceDisablePeerAccess(dY); } } else { if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY); } } } hipSetDevice(curDevice); } allowedP2P = enable; hipSetDevice(curDevice); } bool isP2PAvailable() { return supportedP2P; } void initializeDevicesAndFunctions() { try { int devCnt = 0; hipGetDeviceCount(&devCnt); deviceProperties = new hipDeviceProp_t[devCnt]; for (int i = 0; i < devCnt; i++) { hipSetDevice(i); hipGetDeviceProperties(&deviceProperties[i], i); hipDeviceSetLimit(hipLimitStackSize, 4096); } hipSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void initializeFunctions(Nd4jPointer *functions) { nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions); /* hipblasSgemv = (CublasSgemv)functions[0]; hipblasDgemv = (CublasDgemv)functions[1]; hipblasHgemm = (CublasHgemm)functions[2]; hipblasSgemm = (CublasSgemm)functions[3]; hipblasDgemm = (CublasDgemm)functions[4]; cublasSgemmEx = (CublasSgemmEx)functions[5]; hipblasHgemmBatched = (CublasHgemmBatched)functions[6]; hipblasSgemmBatched = (CublasSgemmBatched)functions[7]; hipblasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer mallocHost(Nd4jLong memorySize, int flags) { Nd4jPointer pointer; // hipHostMallocMapped |hipHostMallocPortable auto res = hipHostMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8, hipHostMallocDefault); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostMalloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer mallocDevice(Nd4jLong memorySize, int deviceId, int flags) { Nd4jPointer pointer; auto res = hipMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMalloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int freeHost(Nd4jPointer pointer) { auto res = hipHostFree(reinterpret_cast<void *>(pointer)); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostFree failed"); } return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int freeDevice(Nd4jPointer pointer, int deviceId) { auto res = hipFree(reinterpret_cast<void *>(pointer)); // we're intentionally skipping if (res != 0 && res != 1) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipFree failed"); } return res == 0 ? 1L : 0L; } Nd4jPointer createContext() { return 0L; } Nd4jPointer createStream() { auto stream = new hipStream_t(); auto dZ = hipStreamCreate(stream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamCreate failed"); } return stream; } Nd4jPointer createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t)); CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(hipEvent_t)); auto dZ = hipEventCreateWithFlags(reinterpret_cast<hipEvent_t *>(&nativeEvent), hipEventDisableTiming); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventCreateWithFlags failed"); } return nativeEvent; } int registerEvent(Nd4jPointer event, Nd4jPointer stream) { auto pEvent = reinterpret_cast<hipEvent_t *>(&event); auto pStream = reinterpret_cast<hipStream_t *>(stream); auto dZ = hipEventRecord(*pEvent, *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventRecord failed"); } return 1; } int setDevice(int deviceId) { AffinityManager::setCurrentDevice(deviceId); return 1; } Nd4jLong getDeviceFreeMemoryDefault() { size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); return (Nd4jLong) memFree; } Nd4jLong getDeviceFreeMemory(int device) { int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jLong) memFree; } Nd4jLong getDeviceTotalMemory(int device) { int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jLong) memTotal; } int memcpySync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<hipStream_t *>(reserved); hipMemcpyKind kind; //nd4j::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed"); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } break; case 3: { kind = hipMemcpyDeviceToDevice; } break; default: { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY"); return 0; } } auto dZ = hipMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream); //auto dZ = hipMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind); if (dZ != 0) { printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyAsync failed"); } return 1; } int memsetSync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto dZ = hipMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size)); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemset failed"); } return 1; } int memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<hipStream_t *>(reserved); auto dZ = hipMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemsetAsync failed"); } return 1; } int destroyEvent(Nd4jPointer event) { auto pEvent = reinterpret_cast<hipEvent_t *>(&event); auto dZ = hipEventDestroy(*pEvent); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventDestroy failed"); } return 1; } int streamSynchronize(Nd4jPointer stream) { auto pStream = reinterpret_cast<hipStream_t *>(stream); auto dZ = hipStreamSynchronize(*pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamSynchronize failed"); } return 1L; } int eventSynchronize(Nd4jPointer event) { auto pEvent = reinterpret_cast<hipEvent_t *>(&event); auto dZ = hipEventSynchronize(*pEvent); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventSynchronize failed"); } return 1L; } int getAvailableDevices() { int devCnt = 0; hipGetDeviceCount(&devCnt); return devCnt; } void enableDebugMode(bool reallyEnable) { nd4j::Environment::getInstance()->setDebug(reallyEnable); } void setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int ompGetMaxThreads() { return maxThreads; } int ompGetNumThreads() { return maxThreads; } void setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void enableVerboseMode(bool reallyEnable) { nd4j::Environment::getInstance()->setVerbose(reallyEnable); } int getDeviceMajor(int device) { return deviceProperties[device].major; } int getDeviceMinor(int device) { return deviceProperties[device].minor; } const char * getDeviceName(int device) { return deviceProperties[device].name; } void specialConcat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { try { BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), nd4j::SpecialMethods, ::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo), LIBND4J_TYPES); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * This method saves */ nd4j::TadPack* tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength) { try { auto pack = new TadPack(); *pack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(dXShapeInfo, dimension, dimensionLength); return pack; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong* getPrimaryShapeInfo(nd4j::TadPack* pack) { return pack->primaryShapeInfo(); } Nd4jLong* getPrimaryOffsets(nd4j::TadPack* pack) { return pack->primaryOffsets(); } Nd4jLong* getSpecialShapeInfo(nd4j::TadPack* pack) { return pack->specialShapeInfo(); } Nd4jLong* getSpecialOffsets(nd4j::TadPack* pack) { return pack->specialOffsets(); } Nd4jLong getNumberOfTads(nd4j::TadPack* pack) { return pack->numberOfTads(); } int getShapeInfoLength(nd4j::TadPack* pack) { return pack->shapeInfoLength(); } int memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(reserved); hipMemcpyKind kind; DEBUG_KERNEL(pStream, -1); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } case 3: { kind = hipMemcpyDeviceToDevice; } break; } auto dZ = hipMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyToSymbolAsync failed"); } return 1; } Nd4jPointer getConstantSpace() { Nd4jPointer dConstAddr; hipError_t dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipGetSymbolAddress failed"); } return dConstAddr; } void pullRows(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); dim3 launchDims(64, 256, 1024); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void average(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length, bool propagate) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(256, 256, 4096); BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void accumulate(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length) { try { auto stream = reinterpret_cast<hipStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(n, 256, 16384); BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void shuffle(Nd4jPointer *extras, Nd4jPointer *x, Nd4jPointer *xShapeInfo, Nd4jPointer *dx, Nd4jPointer *dXShapeInfo, Nd4jPointer *z, Nd4jPointer *zShapeInfo, Nd4jPointer *dz, Nd4jPointer *dZShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]); auto dX = reinterpret_cast<void **>(dx); auto dZ = reinterpret_cast<void **>(dz); auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo); auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo); auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets); auto xType = nd4j::ArrayOptions::dataType(xShape[0]); dim3 launchDims(256, 512, 8192); BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "shuffle(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } bool isExperimentalEnabled() { return nd4j::Environment::getInstance()->isExperimentalBuild(); } void setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int getDevice() { return nd4j::AffinityManager::currentDeviceId(); } void setElementThreshold(int num) { // this is no-op for CUDA } void setTADThreshold(int num) { // this is no-op for CUDA } //////////////////////////////////////////////////////////////////////// void execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, bool biasCorrected, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Tad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); auto tadLength = shape::length(tadPack.primaryShapeInfo()); auto yLength = shape::length(hYShapeInfo); auto xLength = shape::length(hXShapeInfo); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); if (tadLength == yLength || tadLength == xLength) { // nd4j_printf("== way\n",""); NativeOpExecutioner::execReduce3(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } else NativeOpExecutioner::execReduce3TAD(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Scalar(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3Scalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalar, hScalarShapeInfo, dScalar, dScalarShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBoolTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalars, hScalarShapeInfo, dScalars, dScalarShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalar, hScalarShapeInfo, dScalar, dScalarShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != nd4j::DataType::BOOL && !isExperimentalEnabled()) throw nd4j::datatype_exception::build("execScalar both operands must have same data type", xType, yType); dim3 launchDims(256, 256, 16384); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void execAggregate(Nd4jPointer *extraPointers, int opNum, void **arguments, int numArguments, Nd4jLong **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, void *realArguments, int numRealArguments, nd4j::DataType dtype) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateKernelGeneric(launchDims, stream, opNum, arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), FLOAT_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void batchExecutor(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { } void execAggregateBatch(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { try { // not implemented yet hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateBatchKernelGeneric(launchDims, stream, opNum, numAggregates, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), FLOAT_TYPES); DEBUG_KERNEL(stream, opNum); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom2(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom3(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // hipStreamSynchronize(*stream); auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev)); buffer->propagateToDevice(buffer, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A"); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B"); return buffer; } void destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice hipDeviceSynchronize(); delete buffer; } void refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); hipStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream); } void reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); hipStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int lengthForShapeBufferPointer(Nd4jPointer buffer) { auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer pointerForAddress(Nd4jLong address) { return reinterpret_cast<Nd4jPointer >(address); } void tear(Nd4jPointer *extras, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]); dim3 launchDims(512, 512, 512); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) { auto stream = reinterpret_cast<hipStream_t *>(extras[1]); auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]); int blockSize = 512; // max size of the thread blocks int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize)))); int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (nd4j::isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = nd4j::floorPow2(numElements); int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2); int np2LastBlock = 0; int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts int extraSpace = numEltsPerBlock / NUM_BANKS; int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); dim3 gridOnes(1, 1, 1); dim3 threadsOnes(numThreadsLastBlock, 1, 1); if (sharedMemSize < 2048) sharedMemSize = 2048; if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048; // execute the scan if (numBlocks > 1) { nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(grid), dim3(threads), 1024, *stream, dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(1), dim3(numThreadsLastBlock), 1024, *stream, dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0); } else { nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0); } nd4j::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed"); } void encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); //encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz); prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){ try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 4096); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){ try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto zType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3All(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParamsVals, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3All(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParamsVals, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sort(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, bool descending) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); auto yType = nd4j::ArrayOptions::dataType(yShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(yShapeInfo); auto yType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); auto yType = nd4j::ArrayOptions::dataType(yShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = nd4j::ArrayOptions::dataType(yShapeInfo); auto yType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTad(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { try { // to be implemented auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 512, 33768); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTad(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) { throw std::runtime_error("sortCooIndices:: Not implemented yet"); } Nd4jLong encodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed"); Nd4jLong dZ = (Nd4jLong) resultPointer[0]; resultPointer[0] = 0; return dZ; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 0; } } void decodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jLong* mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) { return nullptr; } void munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) { } nd4j::graph::ResultWrapper* executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { try { return nd4j::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getResultWrapperSize(nd4j::graph::ResultWrapper* ptr) { return ptr->size(); } Nd4jPointer getResultWrapperPointer(nd4j::graph::ResultWrapper* ptr) { return ptr->pointer(); } const char* getAllCustomOps() { return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations(); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { nd4j::graph::VariableSpace varSpace; Context block(2, &varSpace); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numBArgs; e++) block.getBArguments()->push_back(bArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]); // we shouldn't copy buffer if that's empty array void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes]; auto array = new nd4j::NDArray(buffer_, bufferD_, shape_); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.launchContext()->getWorkspace() != nullptr) shapeList->detach(); return shapeList; } nd4j::ShapeList* calculateOutputShapes2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { Context block(1); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e])); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } nd4j::ShapeList* calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getShapeListSize(nd4j::ShapeList* list) { return list->size(); } Nd4jLong* getShape(nd4j::ShapeList* list, Nd4jLong i) { return list->at(i); } static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<nd4j::NDArray*> inputs(numInputs); std::vector<nd4j::NDArray*> outputs(numOutputs); std::vector<double> ttArgs(numTArgs); std::vector<bool> bbArgs(numBArgs); std::vector<Nd4jLong> iiArgs(numIArgs); // filling block now with inputs for (int e = 0; e < numInputs; e++) { auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs]; inputs[e] = new nd4j::NDArray(buffer, bufferD, shape); } // if not inplace - transferring output arrays if (!isInplace) for (int e = 0; e < numOutputs; e++) { // we want to keep original output shape intact auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e])); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e]; void *bufferD = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs]; // FIXME: revisit this. bool canNullify = true; for (int i = 0; i < numInputs; i++) { void *ibuffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i]; if (ibuffer == buffer) { canNullify = false; break; } } if (canNullify && buffer != nullptr) memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape))); auto array = new nd4j::NDArray(buffer, bufferD, shape); outputs[e] = array; } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; for (int e = 0; e < numBArgs; e++) bbArgs[e] = bArgs[e]; // hypothetically at this point we have everything filled auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace); //auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace); if (!isInplace) for (int e = 0; e < numOutputs; e++) { //shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]); //shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo()); //outputs[e]->printIndexedBuffer("C++ raw output"); //outputs[e]->printBuffer("C++ indexed output"); if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))) outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))); } for (auto v: inputs) delete v; for (auto v: outputs) delete v; return Status::OK(); } int execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int execCustomOp2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); auto context = reinterpret_cast<Context *>(opContext); auto result = op->execute(context); auto res = hipStreamSynchronize(*context->launchContext()->getCudaStream()); if (res != 0) throw nd4j::cuda_exception::build("customOp execution failed", res); for (auto v:context->fastpath_in()) { if (!v->isEmpty()) v->syncToDevice(); } for (auto v:context->fastpath_out()) { if (!v->isEmpty()) v->syncToDevice(); } return result; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) { try { auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<nd4j::NDArray*> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e])); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace); auto varSet = new nd4j::graph::VariablesSet(dZ); if (dZ == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet* executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { try { return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getVariablesSetSize(nd4j::graph::VariablesSet* set) { return set->size(); } Nd4jStatus getVariablesSetStatus(nd4j::graph::VariablesSet* set) { return set->status(); } nd4j::graph::Variable* getVariable(nd4j::graph::VariablesSet* set, Nd4jLong i) { return set->at(i); } int getVariableId(nd4j::graph::Variable* variable) { return variable->id(); } int getVariableIndex(nd4j::graph::Variable* variable) { return variable->index(); } const char* getVariableName(nd4j::graph::Variable* variable) { return variable->getName()->c_str(); } Nd4jLong* getVariableShape(nd4j::graph::Variable* variable) { return variable->getNDArray()->shapeInfo(); } void* getVariableBuffer(nd4j::graph::Variable* variable) { return variable->getNDArray()->buffer(); } int unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) { try { nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId); return ND4J_STATUS_OK; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void deleteCharArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<char *>(pointer); delete[] ptr; } void deleteIntArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } void deleteLongArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<Nd4jLong *>(pointer); delete[] ptr; } void deleteVariablesSet(nd4j::graph::VariablesSet* pointer) { delete pointer; } void deleteShapeList(Nd4jPointer shapeList) { nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList); //list->destroy(); delete list; } const char* getAllOperations() { return nd4j::OpTracker::getInstance()->exportOperations(); } Nd4jPointer getGraphState(Nd4jLong id) { return (Nd4jPointer) new nd4j::graph::GraphState(id); } void deleteGraphState(Nd4jPointer state) { auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state); delete stateP; } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are Node node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = inputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]); auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { // nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto dZ = LogicExecutor::processNode(graph, &node); if (dZ != Status::OK()) return dZ; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = outputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]); NDArray array(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { try { return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deleteResultWrapper(Nd4jPointer ptr) { // just 0 room for compiler s@!t auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr); delete p; } int estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) { throw std::runtime_error("estimateThreshold: Not implemented yet"); } /* * TypeDef: * void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ); */ void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) { try { auto dx = reinterpret_cast<void *>(dX); auto dz = reinterpret_cast<void *>(dZ); if (srcType == ND4J_FLOAT8) { if (dstType == ND4J_FLOAT8) { // convertKernel<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: eventually we might want to add it } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_UINT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: still might want to add } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: .... ^^^ } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float16>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO... } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz); } else { printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT24) { } else if (srcType == ND4J_FLOAT32) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_DOUBLE) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { // } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_THRESHOLD) { if (dstType == ND4J_FLOAT16) { //nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz); } else if (dstType == ND4J_FLOAT32) { //nd4j::convertFromThreshold<float>(nullptr, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::convertFromThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) { auto u = new nd4j::utf8string(string, length); return reinterpret_cast<Nd4jPointer>(u); } Nd4jLong getUtf8StringLength(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<nd4j::utf8string*>(ptr)->_length; } char* getUtf8StringBuffer(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<nd4j::utf8string*>(ptr)->_buffer; } void deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) { delete(reinterpret_cast<nd4j::utf8string*>(ptr)); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; for (int e = 0; e < numOfSubArrs; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo); const auto yOffset = shape::getIndexOffset(i, yShapeInfo); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T> __host__ static void scatterUpdateCudaLauncher(const hipStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { hipLaunchKernelGGL(( scatterUpdateCuda<T>), dim3(512), dim3(256), MAX_NUM_THREADS, *stream, opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs, void* hX, Nd4jLong* hXShapeInfo, Nd4jLong* hXOffsets, void* dX, Nd4jLong* dXShapeInfo, Nd4jLong* dXOffsets, void* hY, Nd4jLong* hYShapeInfo, Nd4jLong* hYOffsets, void* dY, Nd4jLong* dYShapeInfo, Nd4jLong* dYOffsets, int* hIindexes, int* dIndexes) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); nd4j::DataType type = ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(type, scatterUpdateCudaLauncher, (stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIndexes), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); auto p = reinterpret_cast<nd4j::DebugInfo *>(debugInfo); NDArray array(buffer, specialBuffer, shapeInfo, &lc); nd4j::DebugHelper::retrieveDebugStatistics(p, &array); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void __global__ tryPointerKernel(void* p, int len) { auto buf = reinterpret_cast<int8_t*>(p); auto tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int b; if (tid < len) atomicAdd(&b, buf[tid]); __syncthreads(); if (threadIdx.x ==0 && blockIdx.x == 0) printf("Pointer check complete: %i\n", b); } void tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) { try { hipStream_t stream; hipStreamCreate(&stream); tryPointerKernel << < 256, 512, len + 64, stream >> > (p, len); auto e = hipStreamSynchronize(stream); if (e != 0) throw nd4j::cuda_exception::build("tryPointer failed", e); hipStreamDestroy(stream); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } int dataTypeFromNpyHeader(void *header) { return (int) cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header)); } nd4j::ConstantDataBuffer* shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *strides, nd4j::DataType dtype, char order, Nd4jLong ews, bool empty) { try { auto buffer = new ConstantDataBuffer(); *buffer = nd4j::ConstantShapeHelper::getInstance()->bufferForShapeInfo( ShapeDescriptor(dtype, order, shape, strides, rank, ews, empty)); return buffer; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } void deleteShapeBuffer(nd4j::ConstantDataBuffer* ptr) { delete ptr; } void deleteTadPack(nd4j::TadPack* ptr) { delete ptr; } bool isBlasVersionMatches(int major, int minor, int build) { auto result = major == Environment::getInstance()->_blasMajorVersion && minor == Environment::getInstance()->_blasMinorVersion && build == Environment::getInstance()->_blasPatchVersion; if (!result) { nd4j_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n", Environment::getInstance()->_blasMajorVersion, Environment::getInstance()->_blasMinorVersion, Environment::getInstance()->_blasPatchVersion, major, minor, build); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(152); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch"); } return result; } nd4j::ConstantDataBuffer* constantBufferLong(nd4j::DataType dtype, Nd4jLong *data, int length) { return nd4j::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype); } nd4j::ConstantDataBuffer* constantBufferDouble(nd4j::DataType dtype, double *data, int length) { return nd4j::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype); } nd4j::ConstantDataBuffer* constantBuffer(nd4j::DataType dtype, nd4j::ConstantDescriptor *descriptor) { return nd4j::ConstantHelper::getInstance()->constantBuffer(*descriptor, dtype); } Nd4jPointer getConstantDataBufferPrimary(nd4j::ConstantDataBuffer* dbf) { return dbf->primary(); } Nd4jPointer getConstantDataBufferSpecial(nd4j::ConstantDataBuffer* dbf) { return dbf->special(); } Nd4jLong getConstantDataBufferLength(nd4j::ConstantDataBuffer* dbf) { return dbf->length(); } Nd4jLong getConstantDataBufferSizeOf(nd4j::ConstantDataBuffer* dbf) { return dbf->sizeOf(); } nd4j::graph::Context* createGraphContext(int nodeId) { return new nd4j::graph::Context(nodeId); } nd4j::graph::RandomGenerator* getGraphContextRandomGenerator(nd4j::graph::Context* ptr) { return &ptr->randomGenerator(); } void markGraphContextInplace(nd4j::graph::Context* ptr, bool reallyInplace) { ptr->markInplace(reallyInplace); } void setGraphContextCudaContext(nd4j::graph::Context* ptr, void *stream, void *reductionPointer, void *allocationPointer) { ptr->setCudaContext(stream, reductionPointer, allocationPointer); } void setGraphContextInputArray(nd4j::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextOutputArray(nd4j::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextTArguments(nd4j::graph::Context* ptr, double *arguments, int numberOfArguments) { ptr->setTArguments(arguments, numberOfArguments); } void setGraphContextIArguments(nd4j::graph::Context* ptr, Nd4jLong *arguments, int numberOfArguments) { ptr->setIArguments(arguments, numberOfArguments); } void setGraphContextBArguments(nd4j::graph::Context* ptr, bool *arguments, int numberOfArguments) { ptr->setBArguments(arguments, numberOfArguments); } void deleteGraphContext(nd4j::graph::Context* ptr) { delete ptr; } nd4j::graph::RandomGenerator* createRandomGenerator(Nd4jLong rootSeed, Nd4jLong nodeSeed) { return new nd4j::graph::RandomGenerator(rootSeed, nodeSeed); } Nd4jLong getRandomGeneratorRootState(nd4j::graph::RandomGenerator* ptr) { return ptr->rootState(); } Nd4jLong getRandomGeneratorNodeState(nd4j::graph::RandomGenerator* ptr) { return ptr->nodeState(); } void setRandomGeneratorStates(nd4j::graph::RandomGenerator* ptr, Nd4jLong rootSeed, Nd4jLong nodeSeed) { ptr->setStates(rootSeed, nodeSeed); } int getRandomGeneratorRelativeInt(nd4j::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeInt(index); } Nd4jLong getRandomGeneratorRelativeLong(nd4j::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeLong(index); } void deleteRandomGenerator(nd4j::graph::RandomGenerator* ptr) { delete ptr; } Nd4jPointer shapeBufferForNumpy(Nd4jPointer npyArray) { try { cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); unsigned int shapeSize = arr.shape.size(); std::vector<Nd4jLong> shape(shapeSize); bool _empty = false; for (unsigned int i = 0; i < shapeSize; i++) { shape[i] = arr.shape[i]; if (arr.shape[i] == 0) _empty = true; } auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray)); Nd4jLong *shapeBuffer; if (shape.size() == 1 && shape[0] == 0) { // scalar case shapeBuffer = nd4j::ShapeBuilders::createScalarShapeInfo(dtype); } else if (_empty) { if (shapeSize > 0) shapeBuffer = nd4j::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); else shapeBuffer = nd4j::ShapeBuilders::emptyShapeInfo(dtype); } else { shapeBuffer = nd4j::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); } return reinterpret_cast<Nd4jPointer>(nd4j::ConstantShapeHelper::getInstance()->createFromExisting(shapeBuffer, true)); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runLightBenchmarkSuit(bool printOut) { try { nd4j::LightBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runFullBenchmarkSuit(bool printOut) { try { nd4j::FullBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getCachedMemory(int deviceId) { return nd4j::ConstantHelper::getInstance()->getCachedAmount(deviceId); } nd4j::LaunchContext* defaultLaunchContext() { return LaunchContext::defaultContext(); } Nd4jPointer lcScalarPointer(OpaqueLaunchContext* lc) { return lc->getScalarPointer(); } Nd4jPointer lcReductionPointer(OpaqueLaunchContext* lc) { return lc->getReductionPointer(); } Nd4jPointer lcAllocationPointer(OpaqueLaunchContext* lc) { return lc->getAllocationPointer(); } Nd4jPointer lcExecutionStream(OpaqueLaunchContext* lc) { return lc->getCudaStream(); } Nd4jPointer lcCopyStream(OpaqueLaunchContext* lc) { return lc->getCudaSpecialStream(); } Nd4jPointer lcBlasHandle(OpaqueLaunchContext* lc) { return lc->getCublasHandle(); } Nd4jPointer lcSolverHandle(OpaqueLaunchContext* lc) { return lc->getCusolverHandle(); } int lastErrorCode() { return nd4j::LaunchContext::defaultContext()->errorReference()->errorCode(); } const char* lastErrorMessage() { return nd4j::LaunchContext::defaultContext()->errorReference()->errorMessage(); } int binaryLevel() { return 0; } int optimalLevel() { return 0; } bool isMinimalRequirementsMet() { return true; } bool isOptimalRequirementsMet() { return true; }
48b7158346d3a430e8a7429d3dd37d368f0ea11b.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include "NativeOpExecutioner.h" #include "../NativeOps.h" #include <cuda.h> #include <buffer.h> #include <loops/transform_any.h> #include <loops/reduce_bool.h> #include <loops/reduce_long.h> #include <helpers/threshold.h> #include <ops/specials_cuda.h> #include <helpers/DebugHelper.h> #include <AffinityManager.h> #include <exceptions/datatype_exception.h> #include <exceptions/cuda_exception.h> #include <helpers/CudaLaunchHelper.h> // FIXME: we need cuda-specific implementations #include <GraphExecutioner.h> #include <graph/GraphHolder.h> #include <ops/declarable/CustomOperations.h> #include <PointersManager.h> //#include <sys/time.h> #include <curand.h> #include <Status.h> #include <helpers/DebugHelper.h> using namespace nd4j; #include <loops/special_kernels.h> #include <performance/benchmarking/FullBenchmarkSuit.h> #include <performance/benchmarking/LightBenchmarkSuit.h> cudaDeviceProp *deviceProperties; cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __ND4J_EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jLong)ptrToDeviceId; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(cudaStream_t stream) { Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<Nd4jLong> *scalarDimension; nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(cudaStream_t stream) { auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong))); CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(Nd4jLong)); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } Nd4jLong *getShapeInfoHostPointer() { return scalarShapeInfo->data; } Nd4jLong * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } Nd4jLong * getDimensionHostPointer() { return scalarDimension->data; } Nd4jLong * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; cudaStream_t streamRef; public: ScalarInfo(cudaStream_t stream) { T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T))); CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T)); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ Nd4jLong *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the dZ pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ Nd4jLong *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; void execPairwiseTransform( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseTransform(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execPairwiseTransformBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseBoolTransform(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStatsScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execBroadcastBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { //Nd4jLong *tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); //Nd4jLong *tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[1]); //Nd4jLong *tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[2]); //Nd4jLong *tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[3]); auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcastBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void execBroadcast( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcast(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloatScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSameScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSame(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceLong(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hXShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("execReduceBool requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ //////////////////////////////////////////////////////////////////////// void execIndexReduce(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduce(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloat(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ //////////////////////////////////////////////////////////////////////// void execIndexReduceScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo){ try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduceScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformSame(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformSame(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformBool(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformAny(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto streamSpecial = reinterpret_cast<cudaStream_t &>(extraPointers[4]); LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3], reinterpret_cast<int *>(extraPointers[6])); NativeOpExecutioner::execTransformAny(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, nullptr); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformStrict(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformStrict(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformFloat(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformFloat(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void checkP2P() { int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; cudaSetDevice(dX); cudaDeviceCanAccessPeer(&canAccess, dX , dY); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; cudaSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; cudaSetDevice(dX); cudaDeviceCanAccessPeer(&canAccess, dX , dY); if (canAccess) { if (enable) { cudaDeviceEnablePeerAccess(dY, 0); } else { cudaDeviceDisablePeerAccess(dY); } } else { if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY); } } } cudaSetDevice(curDevice); } allowedP2P = enable; cudaSetDevice(curDevice); } bool isP2PAvailable() { return supportedP2P; } void initializeDevicesAndFunctions() { try { int devCnt = 0; cudaGetDeviceCount(&devCnt); deviceProperties = new cudaDeviceProp[devCnt]; for (int i = 0; i < devCnt; i++) { cudaSetDevice(i); cudaGetDeviceProperties(&deviceProperties[i], i); cudaDeviceSetLimit(cudaLimitStackSize, 4096); } cudaSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void initializeFunctions(Nd4jPointer *functions) { nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions); /* cublasSgemv = (CublasSgemv)functions[0]; cublasDgemv = (CublasDgemv)functions[1]; cublasHgemm = (CublasHgemm)functions[2]; cublasSgemm = (CublasSgemm)functions[3]; cublasDgemm = (CublasDgemm)functions[4]; cublasSgemmEx = (CublasSgemmEx)functions[5]; cublasHgemmBatched = (CublasHgemmBatched)functions[6]; cublasSgemmBatched = (CublasSgemmBatched)functions[7]; cublasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer mallocHost(Nd4jLong memorySize, int flags) { Nd4jPointer pointer; // cudaHostAllocMapped |cudaHostAllocPortable auto res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize + 8, cudaHostAllocDefault); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaHostAlloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer mallocDevice(Nd4jLong memorySize, int deviceId, int flags) { Nd4jPointer pointer; auto res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMalloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int freeHost(Nd4jPointer pointer) { auto res = cudaFreeHost(reinterpret_cast<void *>(pointer)); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFreeHost failed"); } return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int freeDevice(Nd4jPointer pointer, int deviceId) { auto res = cudaFree(reinterpret_cast<void *>(pointer)); // we're intentionally skipping if (res != 0 && res != 1) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFree failed"); } return res == 0 ? 1L : 0L; } Nd4jPointer createContext() { return 0L; } Nd4jPointer createStream() { auto stream = new cudaStream_t(); auto dZ = cudaStreamCreate(stream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamCreate failed"); } return stream; } Nd4jPointer createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t)); CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(cudaEvent_t)); auto dZ = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventCreateWithFlags failed"); } return nativeEvent; } int registerEvent(Nd4jPointer event, Nd4jPointer stream) { auto pEvent = reinterpret_cast<cudaEvent_t *>(&event); auto pStream = reinterpret_cast<cudaStream_t *>(stream); auto dZ = cudaEventRecord(*pEvent, *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventRecord failed"); } return 1; } int setDevice(int deviceId) { AffinityManager::setCurrentDevice(deviceId); return 1; } Nd4jLong getDeviceFreeMemoryDefault() { size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); return (Nd4jLong) memFree; } Nd4jLong getDeviceFreeMemory(int device) { int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jLong) memFree; } Nd4jLong getDeviceTotalMemory(int device) { int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jLong) memTotal; } int memcpySync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<cudaStream_t *>(reserved); cudaMemcpyKind kind; //nd4j::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed"); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } break; case 3: { kind = cudaMemcpyDeviceToDevice; } break; default: { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY"); return 0; } } auto dZ = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream); //auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind); if (dZ != 0) { printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyAsync failed"); } return 1; } int memsetSync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto dZ = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size)); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemset failed"); } return 1; } int memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<cudaStream_t *>(reserved); auto dZ = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemsetAsync failed"); } return 1; } int destroyEvent(Nd4jPointer event) { auto pEvent = reinterpret_cast<cudaEvent_t *>(&event); auto dZ = cudaEventDestroy(*pEvent); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventDestroy failed"); } return 1; } int streamSynchronize(Nd4jPointer stream) { auto pStream = reinterpret_cast<cudaStream_t *>(stream); auto dZ = cudaStreamSynchronize(*pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamSynchronize failed"); } return 1L; } int eventSynchronize(Nd4jPointer event) { auto pEvent = reinterpret_cast<cudaEvent_t *>(&event); auto dZ = cudaEventSynchronize(*pEvent); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventSynchronize failed"); } return 1L; } int getAvailableDevices() { int devCnt = 0; cudaGetDeviceCount(&devCnt); return devCnt; } void enableDebugMode(bool reallyEnable) { nd4j::Environment::getInstance()->setDebug(reallyEnable); } void setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int ompGetMaxThreads() { return maxThreads; } int ompGetNumThreads() { return maxThreads; } void setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void enableVerboseMode(bool reallyEnable) { nd4j::Environment::getInstance()->setVerbose(reallyEnable); } int getDeviceMajor(int device) { return deviceProperties[device].major; } int getDeviceMinor(int device) { return deviceProperties[device].minor; } const char * getDeviceName(int device) { return deviceProperties[device].name; } void specialConcat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { try { BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), nd4j::SpecialMethods, ::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo), LIBND4J_TYPES); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * This method saves */ nd4j::TadPack* tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength) { try { auto pack = new TadPack(); *pack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(dXShapeInfo, dimension, dimensionLength); return pack; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong* getPrimaryShapeInfo(nd4j::TadPack* pack) { return pack->primaryShapeInfo(); } Nd4jLong* getPrimaryOffsets(nd4j::TadPack* pack) { return pack->primaryOffsets(); } Nd4jLong* getSpecialShapeInfo(nd4j::TadPack* pack) { return pack->specialShapeInfo(); } Nd4jLong* getSpecialOffsets(nd4j::TadPack* pack) { return pack->specialOffsets(); } Nd4jLong getNumberOfTads(nd4j::TadPack* pack) { return pack->numberOfTads(); } int getShapeInfoLength(nd4j::TadPack* pack) { return pack->shapeInfoLength(); } int memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(reserved); cudaMemcpyKind kind; DEBUG_KERNEL(pStream, -1); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } case 3: { kind = cudaMemcpyDeviceToDevice; } break; } auto dZ = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyToSymbolAsync failed"); } return 1; } Nd4jPointer getConstantSpace() { Nd4jPointer dConstAddr; cudaError_t dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaGetSymbolAddress failed"); } return dConstAddr; } void pullRows(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); dim3 launchDims(64, 256, 1024); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void average(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length, bool propagate) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(256, 256, 4096); BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void accumulate(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length) { try { auto stream = reinterpret_cast<cudaStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(n, 256, 16384); BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void shuffle(Nd4jPointer *extras, Nd4jPointer *x, Nd4jPointer *xShapeInfo, Nd4jPointer *dx, Nd4jPointer *dXShapeInfo, Nd4jPointer *z, Nd4jPointer *zShapeInfo, Nd4jPointer *dz, Nd4jPointer *dZShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]); auto dX = reinterpret_cast<void **>(dx); auto dZ = reinterpret_cast<void **>(dz); auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo); auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo); auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets); auto xType = nd4j::ArrayOptions::dataType(xShape[0]); dim3 launchDims(256, 512, 8192); BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "shuffle(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } bool isExperimentalEnabled() { return nd4j::Environment::getInstance()->isExperimentalBuild(); } void setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int getDevice() { return nd4j::AffinityManager::currentDeviceId(); } void setElementThreshold(int num) { // this is no-op for CUDA } void setTADThreshold(int num) { // this is no-op for CUDA } //////////////////////////////////////////////////////////////////////// void execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, bool biasCorrected, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Tad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); auto tadLength = shape::length(tadPack.primaryShapeInfo()); auto yLength = shape::length(hYShapeInfo); auto xLength = shape::length(hXShapeInfo); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); if (tadLength == yLength || tadLength == xLength) { // nd4j_printf("== way\n",""); NativeOpExecutioner::execReduce3(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } else NativeOpExecutioner::execReduce3TAD(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Scalar(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3Scalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalar, hScalarShapeInfo, dScalar, dScalarShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBoolTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalars, hScalarShapeInfo, dScalars, dScalarShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalar, hScalarShapeInfo, dScalar, dScalarShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != nd4j::DataType::BOOL && !isExperimentalEnabled()) throw nd4j::datatype_exception::build("execScalar both operands must have same data type", xType, yType); dim3 launchDims(256, 256, 16384); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void execAggregate(Nd4jPointer *extraPointers, int opNum, void **arguments, int numArguments, Nd4jLong **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, void *realArguments, int numRealArguments, nd4j::DataType dtype) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateKernelGeneric(launchDims, stream, opNum, arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), FLOAT_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void batchExecutor(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { } void execAggregateBatch(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { try { // not implemented yet cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateBatchKernelGeneric(launchDims, stream, opNum, numAggregates, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), FLOAT_TYPES); DEBUG_KERNEL(stream, opNum); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom2(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom3(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // cudaStreamSynchronize(*stream); auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev)); buffer->propagateToDevice(buffer, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A"); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B"); return buffer; } void destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice cudaDeviceSynchronize(); delete buffer; } void refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); cudaStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream); } void reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); cudaStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int lengthForShapeBufferPointer(Nd4jPointer buffer) { auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer pointerForAddress(Nd4jLong address) { return reinterpret_cast<Nd4jPointer >(address); } void tear(Nd4jPointer *extras, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]); dim3 launchDims(512, 512, 512); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) { auto stream = reinterpret_cast<cudaStream_t *>(extras[1]); auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]); int blockSize = 512; // max size of the thread blocks int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize)))); int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (nd4j::isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = nd4j::floorPow2(numElements); int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2); int np2LastBlock = 0; int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts int extraSpace = numEltsPerBlock / NUM_BANKS; int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); dim3 gridOnes(1, 1, 1); dim3 threadsOnes(numThreadsLastBlock, 1, 1); if (sharedMemSize < 2048) sharedMemSize = 2048; if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048; // execute the scan if (numBlocks > 1) { nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); nd4j::uniformAdd<<<grid, threads, 1024, *stream>>>(dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { nd4j::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0); } else { nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0); } nd4j::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed"); } void encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); //encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz); prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){ try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 4096); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){ try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto zType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3All(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParamsVals, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3All(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParamsVals, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sort(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, bool descending) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); auto yType = nd4j::ArrayOptions::dataType(yShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(yShapeInfo); auto yType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); auto yType = nd4j::ArrayOptions::dataType(yShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = nd4j::ArrayOptions::dataType(yShapeInfo); auto yType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTad(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { try { // to be implemented auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 512, 33768); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTad(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) { throw std::runtime_error("sortCooIndices:: Not implemented yet"); } Nd4jLong encodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed"); Nd4jLong dZ = (Nd4jLong) resultPointer[0]; resultPointer[0] = 0; return dZ; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 0; } } void decodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jLong* mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) { return nullptr; } void munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) { } nd4j::graph::ResultWrapper* executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { try { return nd4j::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getResultWrapperSize(nd4j::graph::ResultWrapper* ptr) { return ptr->size(); } Nd4jPointer getResultWrapperPointer(nd4j::graph::ResultWrapper* ptr) { return ptr->pointer(); } const char* getAllCustomOps() { return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations(); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { nd4j::graph::VariableSpace varSpace; Context block(2, &varSpace); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numBArgs; e++) block.getBArguments()->push_back(bArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]); // we shouldn't copy buffer if that's empty array void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes]; auto array = new nd4j::NDArray(buffer_, bufferD_, shape_); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.launchContext()->getWorkspace() != nullptr) shapeList->detach(); return shapeList; } nd4j::ShapeList* calculateOutputShapes2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { Context block(1); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e])); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } nd4j::ShapeList* calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getShapeListSize(nd4j::ShapeList* list) { return list->size(); } Nd4jLong* getShape(nd4j::ShapeList* list, Nd4jLong i) { return list->at(i); } static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<nd4j::NDArray*> inputs(numInputs); std::vector<nd4j::NDArray*> outputs(numOutputs); std::vector<double> ttArgs(numTArgs); std::vector<bool> bbArgs(numBArgs); std::vector<Nd4jLong> iiArgs(numIArgs); // filling block now with inputs for (int e = 0; e < numInputs; e++) { auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs]; inputs[e] = new nd4j::NDArray(buffer, bufferD, shape); } // if not inplace - transferring output arrays if (!isInplace) for (int e = 0; e < numOutputs; e++) { // we want to keep original output shape intact auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e])); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e]; void *bufferD = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs]; // FIXME: revisit this. bool canNullify = true; for (int i = 0; i < numInputs; i++) { void *ibuffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i]; if (ibuffer == buffer) { canNullify = false; break; } } if (canNullify && buffer != nullptr) memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape))); auto array = new nd4j::NDArray(buffer, bufferD, shape); outputs[e] = array; } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; for (int e = 0; e < numBArgs; e++) bbArgs[e] = bArgs[e]; // hypothetically at this point we have everything filled auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace); //auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace); if (!isInplace) for (int e = 0; e < numOutputs; e++) { //shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]); //shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo()); //outputs[e]->printIndexedBuffer("C++ raw output"); //outputs[e]->printBuffer("C++ indexed output"); if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))) outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))); } for (auto v: inputs) delete v; for (auto v: outputs) delete v; return Status::OK(); } int execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int execCustomOp2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); auto context = reinterpret_cast<Context *>(opContext); auto result = op->execute(context); auto res = cudaStreamSynchronize(*context->launchContext()->getCudaStream()); if (res != 0) throw nd4j::cuda_exception::build("customOp execution failed", res); for (auto v:context->fastpath_in()) { if (!v->isEmpty()) v->syncToDevice(); } for (auto v:context->fastpath_out()) { if (!v->isEmpty()) v->syncToDevice(); } return result; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) { try { auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<nd4j::NDArray*> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e])); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace); auto varSet = new nd4j::graph::VariablesSet(dZ); if (dZ == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet* executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { try { return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getVariablesSetSize(nd4j::graph::VariablesSet* set) { return set->size(); } Nd4jStatus getVariablesSetStatus(nd4j::graph::VariablesSet* set) { return set->status(); } nd4j::graph::Variable* getVariable(nd4j::graph::VariablesSet* set, Nd4jLong i) { return set->at(i); } int getVariableId(nd4j::graph::Variable* variable) { return variable->id(); } int getVariableIndex(nd4j::graph::Variable* variable) { return variable->index(); } const char* getVariableName(nd4j::graph::Variable* variable) { return variable->getName()->c_str(); } Nd4jLong* getVariableShape(nd4j::graph::Variable* variable) { return variable->getNDArray()->shapeInfo(); } void* getVariableBuffer(nd4j::graph::Variable* variable) { return variable->getNDArray()->buffer(); } int unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) { try { nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId); return ND4J_STATUS_OK; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void deleteCharArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<char *>(pointer); delete[] ptr; } void deleteIntArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } void deleteLongArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<Nd4jLong *>(pointer); delete[] ptr; } void deleteVariablesSet(nd4j::graph::VariablesSet* pointer) { delete pointer; } void deleteShapeList(Nd4jPointer shapeList) { nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList); //list->destroy(); delete list; } const char* getAllOperations() { return nd4j::OpTracker::getInstance()->exportOperations(); } Nd4jPointer getGraphState(Nd4jLong id) { return (Nd4jPointer) new nd4j::graph::GraphState(id); } void deleteGraphState(Nd4jPointer state) { auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state); delete stateP; } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are Node node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = inputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]); auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { // nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto dZ = LogicExecutor::processNode(graph, &node); if (dZ != Status::OK()) return dZ; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = outputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]); NDArray array(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { try { return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deleteResultWrapper(Nd4jPointer ptr) { // just 0 room for compiler s@!t auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr); delete p; } int estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) { throw std::runtime_error("estimateThreshold: Not implemented yet"); } /* * TypeDef: * void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ); */ void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) { try { auto dx = reinterpret_cast<void *>(dX); auto dz = reinterpret_cast<void *>(dZ); if (srcType == ND4J_FLOAT8) { if (dstType == ND4J_FLOAT8) { // convertKernel<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: eventually we might want to add it } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_UINT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: still might want to add } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: .... ^^^ } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float16>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO... } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz); } else { printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT24) { } else if (srcType == ND4J_FLOAT32) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_DOUBLE) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { // } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_THRESHOLD) { if (dstType == ND4J_FLOAT16) { //nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz); } else if (dstType == ND4J_FLOAT32) { //nd4j::convertFromThreshold<float>(nullptr, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::convertFromThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) { auto u = new nd4j::utf8string(string, length); return reinterpret_cast<Nd4jPointer>(u); } Nd4jLong getUtf8StringLength(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<nd4j::utf8string*>(ptr)->_length; } char* getUtf8StringBuffer(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<nd4j::utf8string*>(ptr)->_buffer; } void deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) { delete(reinterpret_cast<nd4j::utf8string*>(ptr)); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; for (int e = 0; e < numOfSubArrs; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo); const auto yOffset = shape::getIndexOffset(i, yShapeInfo); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T> __host__ static void scatterUpdateCudaLauncher(const cudaStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { scatterUpdateCuda<T><<<512, 256, MAX_NUM_THREADS, *stream>>>(opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs, void* hX, Nd4jLong* hXShapeInfo, Nd4jLong* hXOffsets, void* dX, Nd4jLong* dXShapeInfo, Nd4jLong* dXOffsets, void* hY, Nd4jLong* hYShapeInfo, Nd4jLong* hYOffsets, void* dY, Nd4jLong* dYShapeInfo, Nd4jLong* dYOffsets, int* hIindexes, int* dIndexes) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); nd4j::DataType type = ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(type, scatterUpdateCudaLauncher, (stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIndexes), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); auto p = reinterpret_cast<nd4j::DebugInfo *>(debugInfo); NDArray array(buffer, specialBuffer, shapeInfo, &lc); nd4j::DebugHelper::retrieveDebugStatistics(p, &array); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void __global__ tryPointerKernel(void* p, int len) { auto buf = reinterpret_cast<int8_t*>(p); auto tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int b; if (tid < len) atomicAdd(&b, buf[tid]); __syncthreads(); if (threadIdx.x ==0 && blockIdx.x == 0) printf("Pointer check complete: %i\n", b); } void tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) { try { cudaStream_t stream; cudaStreamCreate(&stream); tryPointerKernel << < 256, 512, len + 64, stream >> > (p, len); auto e = cudaStreamSynchronize(stream); if (e != 0) throw nd4j::cuda_exception::build("tryPointer failed", e); cudaStreamDestroy(stream); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } int dataTypeFromNpyHeader(void *header) { return (int) cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header)); } nd4j::ConstantDataBuffer* shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *strides, nd4j::DataType dtype, char order, Nd4jLong ews, bool empty) { try { auto buffer = new ConstantDataBuffer(); *buffer = nd4j::ConstantShapeHelper::getInstance()->bufferForShapeInfo( ShapeDescriptor(dtype, order, shape, strides, rank, ews, empty)); return buffer; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } void deleteShapeBuffer(nd4j::ConstantDataBuffer* ptr) { delete ptr; } void deleteTadPack(nd4j::TadPack* ptr) { delete ptr; } bool isBlasVersionMatches(int major, int minor, int build) { auto result = major == Environment::getInstance()->_blasMajorVersion && minor == Environment::getInstance()->_blasMinorVersion && build == Environment::getInstance()->_blasPatchVersion; if (!result) { nd4j_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n", Environment::getInstance()->_blasMajorVersion, Environment::getInstance()->_blasMinorVersion, Environment::getInstance()->_blasPatchVersion, major, minor, build); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(152); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch"); } return result; } nd4j::ConstantDataBuffer* constantBufferLong(nd4j::DataType dtype, Nd4jLong *data, int length) { return nd4j::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype); } nd4j::ConstantDataBuffer* constantBufferDouble(nd4j::DataType dtype, double *data, int length) { return nd4j::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype); } nd4j::ConstantDataBuffer* constantBuffer(nd4j::DataType dtype, nd4j::ConstantDescriptor *descriptor) { return nd4j::ConstantHelper::getInstance()->constantBuffer(*descriptor, dtype); } Nd4jPointer getConstantDataBufferPrimary(nd4j::ConstantDataBuffer* dbf) { return dbf->primary(); } Nd4jPointer getConstantDataBufferSpecial(nd4j::ConstantDataBuffer* dbf) { return dbf->special(); } Nd4jLong getConstantDataBufferLength(nd4j::ConstantDataBuffer* dbf) { return dbf->length(); } Nd4jLong getConstantDataBufferSizeOf(nd4j::ConstantDataBuffer* dbf) { return dbf->sizeOf(); } nd4j::graph::Context* createGraphContext(int nodeId) { return new nd4j::graph::Context(nodeId); } nd4j::graph::RandomGenerator* getGraphContextRandomGenerator(nd4j::graph::Context* ptr) { return &ptr->randomGenerator(); } void markGraphContextInplace(nd4j::graph::Context* ptr, bool reallyInplace) { ptr->markInplace(reallyInplace); } void setGraphContextCudaContext(nd4j::graph::Context* ptr, void *stream, void *reductionPointer, void *allocationPointer) { ptr->setCudaContext(stream, reductionPointer, allocationPointer); } void setGraphContextInputArray(nd4j::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextOutputArray(nd4j::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextTArguments(nd4j::graph::Context* ptr, double *arguments, int numberOfArguments) { ptr->setTArguments(arguments, numberOfArguments); } void setGraphContextIArguments(nd4j::graph::Context* ptr, Nd4jLong *arguments, int numberOfArguments) { ptr->setIArguments(arguments, numberOfArguments); } void setGraphContextBArguments(nd4j::graph::Context* ptr, bool *arguments, int numberOfArguments) { ptr->setBArguments(arguments, numberOfArguments); } void deleteGraphContext(nd4j::graph::Context* ptr) { delete ptr; } nd4j::graph::RandomGenerator* createRandomGenerator(Nd4jLong rootSeed, Nd4jLong nodeSeed) { return new nd4j::graph::RandomGenerator(rootSeed, nodeSeed); } Nd4jLong getRandomGeneratorRootState(nd4j::graph::RandomGenerator* ptr) { return ptr->rootState(); } Nd4jLong getRandomGeneratorNodeState(nd4j::graph::RandomGenerator* ptr) { return ptr->nodeState(); } void setRandomGeneratorStates(nd4j::graph::RandomGenerator* ptr, Nd4jLong rootSeed, Nd4jLong nodeSeed) { ptr->setStates(rootSeed, nodeSeed); } int getRandomGeneratorRelativeInt(nd4j::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeInt(index); } Nd4jLong getRandomGeneratorRelativeLong(nd4j::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeLong(index); } void deleteRandomGenerator(nd4j::graph::RandomGenerator* ptr) { delete ptr; } Nd4jPointer shapeBufferForNumpy(Nd4jPointer npyArray) { try { cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); unsigned int shapeSize = arr.shape.size(); std::vector<Nd4jLong> shape(shapeSize); bool _empty = false; for (unsigned int i = 0; i < shapeSize; i++) { shape[i] = arr.shape[i]; if (arr.shape[i] == 0) _empty = true; } auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray)); Nd4jLong *shapeBuffer; if (shape.size() == 1 && shape[0] == 0) { // scalar case shapeBuffer = nd4j::ShapeBuilders::createScalarShapeInfo(dtype); } else if (_empty) { if (shapeSize > 0) shapeBuffer = nd4j::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); else shapeBuffer = nd4j::ShapeBuilders::emptyShapeInfo(dtype); } else { shapeBuffer = nd4j::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); } return reinterpret_cast<Nd4jPointer>(nd4j::ConstantShapeHelper::getInstance()->createFromExisting(shapeBuffer, true)); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runLightBenchmarkSuit(bool printOut) { try { nd4j::LightBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runFullBenchmarkSuit(bool printOut) { try { nd4j::FullBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getCachedMemory(int deviceId) { return nd4j::ConstantHelper::getInstance()->getCachedAmount(deviceId); } nd4j::LaunchContext* defaultLaunchContext() { return LaunchContext::defaultContext(); } Nd4jPointer lcScalarPointer(OpaqueLaunchContext* lc) { return lc->getScalarPointer(); } Nd4jPointer lcReductionPointer(OpaqueLaunchContext* lc) { return lc->getReductionPointer(); } Nd4jPointer lcAllocationPointer(OpaqueLaunchContext* lc) { return lc->getAllocationPointer(); } Nd4jPointer lcExecutionStream(OpaqueLaunchContext* lc) { return lc->getCudaStream(); } Nd4jPointer lcCopyStream(OpaqueLaunchContext* lc) { return lc->getCudaSpecialStream(); } Nd4jPointer lcBlasHandle(OpaqueLaunchContext* lc) { return lc->getCublasHandle(); } Nd4jPointer lcSolverHandle(OpaqueLaunchContext* lc) { return lc->getCusolverHandle(); } int lastErrorCode() { return nd4j::LaunchContext::defaultContext()->errorReference()->errorCode(); } const char* lastErrorMessage() { return nd4j::LaunchContext::defaultContext()->errorReference()->errorMessage(); } int binaryLevel() { return 0; } int optimalLevel() { return 0; } bool isMinimalRequirementsMet() { return true; } bool isOptimalRequirementsMet() { return true; }
4c9c681178aa4670a0d33bfb67e9196c9435bfc9.hip
// !!! This is a file automatically generated by hipify!!! #define THRUST_ENABLE_FUTURE_RAW_DATA_MEMBER #include <thrust/detail/config.h> #if THRUST_CPP_DIALECT >= 2011 && !defined(THRUST_LEGACY_GCC) #include <unittest/unittest.h> #include <unittest/util_async.h> #include <thrust/async/reduce.h> #include <thrust/async/copy.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> template <typename T> struct custom_plus { __host__ __device__ T operator()(T lhs, T rhs) const { return lhs + rhs; } }; #define DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( \ NAME, MEMBERS, CTOR, DTOR, VALIDATE, ... \ ) \ template <typename T> \ struct NAME \ { \ MEMBERS \ \ NAME() { CTOR } \ \ ~NAME() { DTOR } \ \ template <typename Event> \ void validate_event(Event& e) \ { \ THRUST_UNUSED_VAR(e); \ VALIDATE \ } \ \ template < \ typename ForwardIt, typename Sentinel \ > \ __host__ \ auto operator()( \ ForwardIt&& first, Sentinel&& last \ ) \ THRUST_DECLTYPE_RETURNS( \ ::thrust::async::reduce( \ __VA_ARGS__ \ ) \ ) \ }; \ /**/ #define DEFINE_ASYNC_REDUCE_INVOKER(NAME, ...) \ DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( \ NAME \ , THRUST_PP_EMPTY(), THRUST_PP_EMPTY(), THRUST_PP_EMPTY(), THRUST_PP_EMPTY()\ , __VA_ARGS__ \ ) \ /**/ #define DEFINE_SYNC_REDUCE_INVOKER(NAME, ...) \ template <typename T> \ struct NAME \ { \ \ template < \ typename ForwardIt, typename Sentinel \ > \ __host__ \ auto operator()( \ ForwardIt&& first, Sentinel&& last \ ) \ THRUST_DECLTYPE_RETURNS( \ ::thrust::reduce( \ __VA_ARGS__ \ ) \ ) \ }; \ /**/ DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device , thrust::device , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator , thrust::device(thrust::device_allocator<void>{}) , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_on // Members. , hipStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( hipStreamCreateWithFlags(&stream_, hipStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( hipStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device.on(stream_) , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_on // Members. , hipStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( hipStreamCreateWithFlags(&stream_, hipStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( hipStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device(thrust::device_allocator<void>{}).on(stream_) , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_SYNC_REDUCE_INVOKER( reduce_sync_invoker , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_init , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_init , thrust::device , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_init , thrust::device(thrust::device_allocator<void>{}) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_on_init // Members. , hipStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( hipStreamCreateWithFlags(&stream_, hipStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( hipStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device.on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_on_init // Members. , hipStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( hipStreamCreateWithFlags(&stream_, hipStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( hipStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device(thrust::device_allocator<void>{}).on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_SYNC_REDUCE_INVOKER( reduce_sync_invoker_init , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_init_plus , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_init_plus , thrust::device , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_init_plus , thrust::device(thrust::device_allocator<void>{}) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_on_init_plus // Members. , hipStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( hipStreamCreateWithFlags(&stream_, hipStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( hipStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device.on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_on_init_plus // Members. , hipStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( hipStreamCreateWithFlags(&stream_, hipStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( hipStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device(thrust::device_allocator<void>{}).on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_SYNC_REDUCE_INVOKER( reduce_sync_invoker_init_plus , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_init_custom_plus , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_init_custom_plus , thrust::device , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_init_custom_plus , thrust::device(thrust::device_allocator<void>{}) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_on_init_custom_plus // Members. , hipStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( hipStreamCreateWithFlags(&stream_, hipStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( hipStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device.on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_on_init_custom_plus // Members. , hipStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( hipStreamCreateWithFlags(&stream_, hipStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( hipStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device(thrust::device_allocator<void>{}).on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); DEFINE_SYNC_REDUCE_INVOKER( reduce_sync_invoker_init_custom_plus , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); /////////////////////////////////////////////////////////////////////////////// template < template <typename> class AsyncReduceInvoker , template <typename> class SyncReduceInvoker > struct test_async_reduce { template <typename T> struct tester { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0a(h0); thrust::device_vector<T> d0b(h0); thrust::device_vector<T> d0c(h0); thrust::device_vector<T> d0d(h0); AsyncReduceInvoker<T> invoke_async; SyncReduceInvoker<T> invoke_sync; ASSERT_EQUAL(h0, d0a); ASSERT_EQUAL(h0, d0b); ASSERT_EQUAL(h0, d0c); ASSERT_EQUAL(h0, d0d); auto f0a = invoke_async(d0a.begin(), d0a.end()); auto f0b = invoke_async(d0b.begin(), d0b.end()); auto f0c = invoke_async(d0c.begin(), d0c.end()); auto f0d = invoke_async(d0d.begin(), d0d.end()); invoke_async.validate_event(f0a); invoke_async.validate_event(f0b); invoke_async.validate_event(f0c); invoke_async.validate_event(f0d); // This potentially runs concurrently with the copies. auto const r0 = invoke_sync(h0.begin(), h0.end()); auto const r1a = TEST_FUTURE_VALUE_RETRIEVAL(f0a); auto const r1b = TEST_FUTURE_VALUE_RETRIEVAL(f0b); auto const r1c = TEST_FUTURE_VALUE_RETRIEVAL(f0c); auto const r1d = TEST_FUTURE_VALUE_RETRIEVAL(f0d); ASSERT_EQUAL(r0, r1a); ASSERT_EQUAL(r0, r1b); ASSERT_EQUAL(r0, r1c); ASSERT_EQUAL(r0, r1d); } }; }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker , reduce_sync_invoker >::tester ) , NumericTypes , test_async_reduce ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device , reduce_sync_invoker >::tester ) , NumericTypes , test_async_reduce_policy ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator , reduce_sync_invoker >::tester ) , NumericTypes , test_async_reduce_policy_allocator ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_on , reduce_sync_invoker >::tester ) , NumericTypes , test_async_reduce_policy_on ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_on , reduce_sync_invoker >::tester ) , NumericTypes , test_async_reduce_policy_allocator_on ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_init , reduce_sync_invoker_init >::tester ) , NumericTypes , test_async_reduce_init ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_init , reduce_sync_invoker_init >::tester ) , NumericTypes , test_async_reduce_policy_init ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_init , reduce_sync_invoker_init >::tester ) , NumericTypes , test_async_reduce_policy_allocator_init ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_on_init , reduce_sync_invoker_init >::tester ) , NumericTypes , test_async_reduce_policy_on_init ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_on_init , reduce_sync_invoker_init >::tester ) , NumericTypes , test_async_reduce_policy_allocator_on_init ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_init_plus , reduce_sync_invoker_init_plus >::tester ) , NumericTypes , test_async_reduce_init_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_init_plus , reduce_sync_invoker_init_plus >::tester ) , NumericTypes , test_async_reduce_policy_init_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_init_plus , reduce_sync_invoker_init_plus >::tester ) , NumericTypes , test_async_reduce_policy_allocator_init_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_on_init_plus , reduce_sync_invoker_init_plus >::tester ) , NumericTypes , test_async_reduce_policy_on_init_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_on_init_plus , reduce_sync_invoker_init_plus >::tester ) , NumericTypes , test_async_reduce_policy_allocator_on_init_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , NumericTypes , test_async_reduce_init_custom_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , NumericTypes , test_async_reduce_policy_init_custom_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , NumericTypes , test_async_reduce_policy_allocator_init_custom_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_on_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , NumericTypes , test_async_reduce_policy_on_init_custom_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_on_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , NumericTypes , test_async_reduce_policy_allocator_on_init_custom_plus ); /////////////////////////////////////////////////////////////////////////////// template < template <typename> class AsyncReduceInvoker , template <typename> class SyncReduceInvoker > struct test_async_reduce_counting_iterator { template <typename T> struct tester { __host__ void operator()() { constexpr std::size_t n = 15 * sizeof(T); ASSERT_LEQUAL(T(n), unittest::truncate_to_max_representable<T>(n)); thrust::counting_iterator<T> first(0); thrust::counting_iterator<T> last(n); AsyncReduceInvoker<T> invoke_async; SyncReduceInvoker<T> invoke_sync; auto f0a = invoke_async(first, last); auto f0b = invoke_async(first, last); auto f0c = invoke_async(first, last); auto f0d = invoke_async(first, last); invoke_async.validate_event(f0a); invoke_async.validate_event(f0b); invoke_async.validate_event(f0c); invoke_async.validate_event(f0d); // This potentially runs concurrently with the copies. auto const r0 = invoke_sync(first, last); auto const r1a = TEST_FUTURE_VALUE_RETRIEVAL(f0a); auto const r1b = TEST_FUTURE_VALUE_RETRIEVAL(f0b); auto const r1c = TEST_FUTURE_VALUE_RETRIEVAL(f0c); auto const r1d = TEST_FUTURE_VALUE_RETRIEVAL(f0d); ASSERT_EQUAL(r0, r1a); ASSERT_EQUAL(r0, r1b); ASSERT_EQUAL(r0, r1c); ASSERT_EQUAL(r0, r1d); } }; }; DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker , reduce_sync_invoker >::tester ) , BuiltinNumericTypes , test_async_reduce_counting_iterator ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_device , reduce_sync_invoker >::tester ) , BuiltinNumericTypes , test_async_reduce_policy_counting_iterator ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_init , reduce_sync_invoker_init >::tester ) , BuiltinNumericTypes , test_async_reduce_counting_iterator_init ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_device_init , reduce_sync_invoker_init >::tester ) , BuiltinNumericTypes , test_async_reduce_policy_counting_iterator_init ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_init_plus , reduce_sync_invoker_init_plus >::tester ) , BuiltinNumericTypes , test_async_reduce_counting_iterator_init_plus ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_device_init_plus , reduce_sync_invoker_init_plus >::tester ) , BuiltinNumericTypes , test_async_reduce_policy_counting_iterator_init_plus ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , BuiltinNumericTypes , test_async_reduce_counting_iterator_init_custom_plus ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_device_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , BuiltinNumericTypes , test_async_reduce_policy_counting_iterator_init_custom_plus ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_reduce_using { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0a(h0); thrust::device_vector<T> d0b(h0); ASSERT_EQUAL(h0, d0a); ASSERT_EQUAL(h0, d0b); thrust::device_future<T> f0a; thrust::device_future<T> f0b; // When you import the customization points into the global namespace, // they should be selected instead of the synchronous algorithms. { using namespace thrust::async; f0a = reduce(d0a.begin(), d0a.end()); } { using thrust::async::reduce; f0b = reduce(d0b.begin(), d0b.end()); } // ADL should find the synchronous algorithms. // This potentially runs concurrently with the copies. T const r0 = reduce(h0.begin(), h0.end()); T const r1a = TEST_FUTURE_VALUE_RETRIEVAL(f0a); T const r1b = TEST_FUTURE_VALUE_RETRIEVAL(f0b); ASSERT_EQUAL(r0, r1a); ASSERT_EQUAL(r0, r1b); } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_reduce_using , NumericTypes ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_reduce_after { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0(h0); ASSERT_EQUAL(h0, d0); auto f0 = thrust::async::reduce( d0.begin(), d0.end() ); ASSERT_EQUAL(true, f0.valid_stream()); auto const f0_stream = f0.stream().native_handle(); auto f1 = thrust::async::reduce( thrust::device.after(f0), d0.begin(), d0.end() ); // Verify that double consumption of a future produces an exception. ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( thrust::device.after(f0), d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); ASSERT_EQUAL_QUIET(f0_stream, f1.stream().native_handle()); auto after_policy2 = thrust::device.after(f1); auto f2 = thrust::async::reduce( after_policy2, d0.begin(), d0.end() ); // Verify that double consumption of a policy produces an exception. ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( after_policy2, d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); ASSERT_EQUAL_QUIET(f0_stream, f2.stream().native_handle()); // This potentially runs concurrently with the copies. T const r0 = thrust::reduce(h0.begin(), h0.end()); T const r1 = TEST_FUTURE_VALUE_RETRIEVAL(f2); ASSERT_EQUAL(r0, r1); } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_reduce_after , NumericTypes ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_reduce_on_then_after { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0(h0); ASSERT_EQUAL(h0, d0); hipStream_t stream; thrust::cuda_cub::throw_on_error( hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) ); auto f0 = thrust::async::reduce( thrust::device.on(stream), d0.begin(), d0.end() ); ASSERT_EQUAL_QUIET(stream, f0.stream().native_handle()); auto f1 = thrust::async::reduce( thrust::device.after(f0), d0.begin(), d0.end() ); // Verify that double consumption of a future produces an exception. ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( thrust::device.after(f0), d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); ASSERT_EQUAL_QUIET(stream, f1.stream().native_handle()); auto after_policy2 = thrust::device.after(f1); auto f2 = thrust::async::reduce( after_policy2, d0.begin(), d0.end() ); // Verify that double consumption of a policy produces an exception. ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( after_policy2, d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); ASSERT_EQUAL_QUIET(stream, f2.stream().native_handle()); // This potentially runs concurrently with the copies. T const r0 = thrust::reduce(h0.begin(), h0.end()); T const r1 = TEST_FUTURE_VALUE_RETRIEVAL(f2); ASSERT_EQUAL(r0, r1); thrust::cuda_cub::throw_on_error( hipStreamDestroy(stream) ); } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_reduce_on_then_after , NumericTypes ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_reduce_allocator_on_then_after { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0(h0); ASSERT_EQUAL(h0, d0); hipStream_t stream0; thrust::cuda_cub::throw_on_error( hipStreamCreateWithFlags(&stream0, hipStreamNonBlocking) ); hipStream_t stream1; thrust::cuda_cub::throw_on_error( hipStreamCreateWithFlags(&stream1, hipStreamNonBlocking) ); auto f0 = thrust::async::reduce( thrust::device(thrust::device_allocator<void>{}).on(stream0) , d0.begin(), d0.end() ); ASSERT_EQUAL_QUIET(stream0, f0.stream().native_handle()); auto f1 = thrust::async::reduce( thrust::device(thrust::device_allocator<void>{}).after(f0) , d0.begin(), d0.end() ); ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( thrust::device(thrust::device_allocator<void>{}).after(f0) , d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); ASSERT_EQUAL_QUIET(stream0, f1.stream().native_handle()); auto f2 = thrust::async::reduce( thrust::device(thrust::device_allocator<void>{}).on(stream1).after(f1) , d0.begin(), d0.end() ); ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( thrust::device(thrust::device_allocator<void>{}).on(stream1).after(f1) , d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); KNOWN_FAILURE; // FIXME: The below fails because you can't combine allocator attachment, // `.on`, and `.after`. ASSERT_EQUAL_QUIET(stream1, f2.stream().native_handle()); // This potentially runs concurrently with the copies. T const r0 = thrust::reduce(h0.begin(), h0.end()); T const r1 = TEST_FUTURE_VALUE_RETRIEVAL(f2); ASSERT_EQUAL(r0, r1); thrust::cuda_cub::throw_on_error(hipStreamDestroy(stream0)); thrust::cuda_cub::throw_on_error(hipStreamDestroy(stream1)); } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_reduce_allocator_on_then_after , NumericTypes ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_reduce_caching { __host__ void operator()(std::size_t n) { constexpr std::int64_t m = 32; thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0(h0); ASSERT_EQUAL(h0, d0); T const* f0_raw_data; { // Perform one reduction to ensure there's an entry in the caching // allocator. auto f0 = thrust::async::reduce(d0.begin(), d0.end()); TEST_EVENT_WAIT(f0); f0_raw_data = f0.raw_data(); } for (std::int64_t i = 0; i < m; ++i) { auto f1 = thrust::async::reduce(d0.begin(), d0.end()); ASSERT_EQUAL(true, f1.valid_stream()); ASSERT_EQUAL(true, f1.valid_content()); ASSERT_EQUAL_QUIET(f0_raw_data, f1.raw_data()); // This potentially runs concurrently with the copies. T const r0 = thrust::reduce(h0.begin(), h0.end()); T const r1 = TEST_FUTURE_VALUE_RETRIEVAL(f1); ASSERT_EQUAL(r0, r1); } } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_reduce_caching , NumericTypes ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_copy_then_reduce { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0a(unittest::random_integers<T>(n)); thrust::host_vector<T> h0b(unittest::random_integers<T>(n)); thrust::host_vector<T> h0c(unittest::random_integers<T>(n)); thrust::host_vector<T> h0d(unittest::random_integers<T>(n)); thrust::device_vector<T> d0a(n); thrust::device_vector<T> d0b(n); thrust::device_vector<T> d0c(n); thrust::device_vector<T> d0d(n); auto f0a = thrust::async::copy(h0a.begin(), h0a.end(), d0a.begin()); auto f0b = thrust::async::copy(h0b.begin(), h0b.end(), d0b.begin()); auto f0c = thrust::async::copy(h0c.begin(), h0c.end(), d0c.begin()); auto f0d = thrust::async::copy(h0d.begin(), h0d.end(), d0d.begin()); ASSERT_EQUAL(true, f0a.valid_stream()); ASSERT_EQUAL(true, f0b.valid_stream()); ASSERT_EQUAL(true, f0c.valid_stream()); ASSERT_EQUAL(true, f0d.valid_stream()); auto const f0a_stream = f0a.stream().native_handle(); auto const f0b_stream = f0b.stream().native_handle(); auto const f0c_stream = f0c.stream().native_handle(); auto const f0d_stream = f0d.stream().native_handle(); auto f1a = thrust::async::reduce( thrust::device.after(f0a), d0a.begin(), d0a.end() ); auto f1b = thrust::async::reduce( thrust::device.after(f0b), d0b.begin(), d0b.end() ); auto f1c = thrust::async::reduce( thrust::device.after(f0c), d0c.begin(), d0c.end() ); auto f1d = thrust::async::reduce( thrust::device.after(f0d), d0d.begin(), d0d.end() ); ASSERT_EQUAL(false, f0a.valid_stream()); ASSERT_EQUAL(false, f0b.valid_stream()); ASSERT_EQUAL(false, f0c.valid_stream()); ASSERT_EQUAL(false, f0d.valid_stream()); ASSERT_EQUAL(true, f1a.valid_stream()); ASSERT_EQUAL(true, f1a.valid_content()); ASSERT_EQUAL(true, f1b.valid_stream()); ASSERT_EQUAL(true, f1b.valid_content()); ASSERT_EQUAL(true, f1c.valid_stream()); ASSERT_EQUAL(true, f1c.valid_content()); ASSERT_EQUAL(true, f1d.valid_stream()); ASSERT_EQUAL(true, f1d.valid_content()); // Verify that streams were stolen. ASSERT_EQUAL_QUIET(f0a_stream, f1a.stream().native_handle()); ASSERT_EQUAL_QUIET(f0b_stream, f1b.stream().native_handle()); ASSERT_EQUAL_QUIET(f0c_stream, f1c.stream().native_handle()); ASSERT_EQUAL_QUIET(f0d_stream, f1d.stream().native_handle()); // This potentially runs concurrently with the copies. T const r0 = thrust::reduce(h0a.begin(), h0a.end()); T const r1a = TEST_FUTURE_VALUE_RETRIEVAL(f1a); T const r1b = TEST_FUTURE_VALUE_RETRIEVAL(f1b); T const r1c = TEST_FUTURE_VALUE_RETRIEVAL(f1c); T const r1d = TEST_FUTURE_VALUE_RETRIEVAL(f1d); ASSERT_EQUAL(r0, r1a); ASSERT_EQUAL(r0, r1b); ASSERT_EQUAL(r0, r1c); ASSERT_EQUAL(r0, r1d); } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_copy_then_reduce , BuiltinNumericTypes ); /////////////////////////////////////////////////////////////////////////////// // TODO: when_all from reductions. #endif
4c9c681178aa4670a0d33bfb67e9196c9435bfc9.cu
#define THRUST_ENABLE_FUTURE_RAW_DATA_MEMBER #include <thrust/detail/config.h> #if THRUST_CPP_DIALECT >= 2011 && !defined(THRUST_LEGACY_GCC) #include <unittest/unittest.h> #include <unittest/util_async.h> #include <thrust/async/reduce.h> #include <thrust/async/copy.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> template <typename T> struct custom_plus { __host__ __device__ T operator()(T lhs, T rhs) const { return lhs + rhs; } }; #define DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( \ NAME, MEMBERS, CTOR, DTOR, VALIDATE, ... \ ) \ template <typename T> \ struct NAME \ { \ MEMBERS \ \ NAME() { CTOR } \ \ ~NAME() { DTOR } \ \ template <typename Event> \ void validate_event(Event& e) \ { \ THRUST_UNUSED_VAR(e); \ VALIDATE \ } \ \ template < \ typename ForwardIt, typename Sentinel \ > \ __host__ \ auto operator()( \ ForwardIt&& first, Sentinel&& last \ ) \ THRUST_DECLTYPE_RETURNS( \ ::thrust::async::reduce( \ __VA_ARGS__ \ ) \ ) \ }; \ /**/ #define DEFINE_ASYNC_REDUCE_INVOKER(NAME, ...) \ DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( \ NAME \ , THRUST_PP_EMPTY(), THRUST_PP_EMPTY(), THRUST_PP_EMPTY(), THRUST_PP_EMPTY()\ , __VA_ARGS__ \ ) \ /**/ #define DEFINE_SYNC_REDUCE_INVOKER(NAME, ...) \ template <typename T> \ struct NAME \ { \ \ template < \ typename ForwardIt, typename Sentinel \ > \ __host__ \ auto operator()( \ ForwardIt&& first, Sentinel&& last \ ) \ THRUST_DECLTYPE_RETURNS( \ ::thrust::reduce( \ __VA_ARGS__ \ ) \ ) \ }; \ /**/ DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device , thrust::device , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator , thrust::device(thrust::device_allocator<void>{}) , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_on // Members. , cudaStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( cudaStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device.on(stream_) , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_on // Members. , cudaStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( cudaStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device(thrust::device_allocator<void>{}).on(stream_) , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_SYNC_REDUCE_INVOKER( reduce_sync_invoker , THRUST_FWD(first), THRUST_FWD(last) ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_init , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_init , thrust::device , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_init , thrust::device(thrust::device_allocator<void>{}) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_on_init // Members. , cudaStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( cudaStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device.on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_on_init // Members. , cudaStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( cudaStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device(thrust::device_allocator<void>{}).on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_SYNC_REDUCE_INVOKER( reduce_sync_invoker_init , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_init_plus , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_init_plus , thrust::device , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_init_plus , thrust::device(thrust::device_allocator<void>{}) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_on_init_plus // Members. , cudaStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( cudaStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device.on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_on_init_plus // Members. , cudaStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( cudaStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device(thrust::device_allocator<void>{}).on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_SYNC_REDUCE_INVOKER( reduce_sync_invoker_init_plus , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , thrust::plus<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_init_custom_plus , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_init_custom_plus , thrust::device , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); DEFINE_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_init_custom_plus , thrust::device(thrust::device_allocator<void>{}) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_on_init_custom_plus // Members. , cudaStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( cudaStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device.on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); DEFINE_STATEFUL_ASYNC_REDUCE_INVOKER( reduce_async_invoker_device_allocator_on_init_custom_plus // Members. , cudaStream_t stream_; // Constructor. , thrust::cuda_cub::throw_on_error( cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking) ); // Destructor. , thrust::cuda_cub::throw_on_error( cudaStreamDestroy(stream_) ); // `validate_event` member. , ASSERT_EQUAL_QUIET(stream_, e.stream().native_handle()); // Arguments to `thrust::async::reduce`. , thrust::device(thrust::device_allocator<void>{}).on(stream_) , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); DEFINE_SYNC_REDUCE_INVOKER( reduce_sync_invoker_init_custom_plus , THRUST_FWD(first), THRUST_FWD(last) , unittest::random_integer<T>() , custom_plus<T>() ); /////////////////////////////////////////////////////////////////////////////// template < template <typename> class AsyncReduceInvoker , template <typename> class SyncReduceInvoker > struct test_async_reduce { template <typename T> struct tester { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0a(h0); thrust::device_vector<T> d0b(h0); thrust::device_vector<T> d0c(h0); thrust::device_vector<T> d0d(h0); AsyncReduceInvoker<T> invoke_async; SyncReduceInvoker<T> invoke_sync; ASSERT_EQUAL(h0, d0a); ASSERT_EQUAL(h0, d0b); ASSERT_EQUAL(h0, d0c); ASSERT_EQUAL(h0, d0d); auto f0a = invoke_async(d0a.begin(), d0a.end()); auto f0b = invoke_async(d0b.begin(), d0b.end()); auto f0c = invoke_async(d0c.begin(), d0c.end()); auto f0d = invoke_async(d0d.begin(), d0d.end()); invoke_async.validate_event(f0a); invoke_async.validate_event(f0b); invoke_async.validate_event(f0c); invoke_async.validate_event(f0d); // This potentially runs concurrently with the copies. auto const r0 = invoke_sync(h0.begin(), h0.end()); auto const r1a = TEST_FUTURE_VALUE_RETRIEVAL(f0a); auto const r1b = TEST_FUTURE_VALUE_RETRIEVAL(f0b); auto const r1c = TEST_FUTURE_VALUE_RETRIEVAL(f0c); auto const r1d = TEST_FUTURE_VALUE_RETRIEVAL(f0d); ASSERT_EQUAL(r0, r1a); ASSERT_EQUAL(r0, r1b); ASSERT_EQUAL(r0, r1c); ASSERT_EQUAL(r0, r1d); } }; }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker , reduce_sync_invoker >::tester ) , NumericTypes , test_async_reduce ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device , reduce_sync_invoker >::tester ) , NumericTypes , test_async_reduce_policy ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator , reduce_sync_invoker >::tester ) , NumericTypes , test_async_reduce_policy_allocator ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_on , reduce_sync_invoker >::tester ) , NumericTypes , test_async_reduce_policy_on ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_on , reduce_sync_invoker >::tester ) , NumericTypes , test_async_reduce_policy_allocator_on ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_init , reduce_sync_invoker_init >::tester ) , NumericTypes , test_async_reduce_init ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_init , reduce_sync_invoker_init >::tester ) , NumericTypes , test_async_reduce_policy_init ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_init , reduce_sync_invoker_init >::tester ) , NumericTypes , test_async_reduce_policy_allocator_init ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_on_init , reduce_sync_invoker_init >::tester ) , NumericTypes , test_async_reduce_policy_on_init ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_on_init , reduce_sync_invoker_init >::tester ) , NumericTypes , test_async_reduce_policy_allocator_on_init ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_init_plus , reduce_sync_invoker_init_plus >::tester ) , NumericTypes , test_async_reduce_init_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_init_plus , reduce_sync_invoker_init_plus >::tester ) , NumericTypes , test_async_reduce_policy_init_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_init_plus , reduce_sync_invoker_init_plus >::tester ) , NumericTypes , test_async_reduce_policy_allocator_init_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_on_init_plus , reduce_sync_invoker_init_plus >::tester ) , NumericTypes , test_async_reduce_policy_on_init_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_on_init_plus , reduce_sync_invoker_init_plus >::tester ) , NumericTypes , test_async_reduce_policy_allocator_on_init_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , NumericTypes , test_async_reduce_init_custom_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , NumericTypes , test_async_reduce_policy_init_custom_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , NumericTypes , test_async_reduce_policy_allocator_init_custom_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_on_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , NumericTypes , test_async_reduce_policy_on_init_custom_plus ); DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce< reduce_async_invoker_device_allocator_on_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , NumericTypes , test_async_reduce_policy_allocator_on_init_custom_plus ); /////////////////////////////////////////////////////////////////////////////// template < template <typename> class AsyncReduceInvoker , template <typename> class SyncReduceInvoker > struct test_async_reduce_counting_iterator { template <typename T> struct tester { __host__ void operator()() { constexpr std::size_t n = 15 * sizeof(T); ASSERT_LEQUAL(T(n), unittest::truncate_to_max_representable<T>(n)); thrust::counting_iterator<T> first(0); thrust::counting_iterator<T> last(n); AsyncReduceInvoker<T> invoke_async; SyncReduceInvoker<T> invoke_sync; auto f0a = invoke_async(first, last); auto f0b = invoke_async(first, last); auto f0c = invoke_async(first, last); auto f0d = invoke_async(first, last); invoke_async.validate_event(f0a); invoke_async.validate_event(f0b); invoke_async.validate_event(f0c); invoke_async.validate_event(f0d); // This potentially runs concurrently with the copies. auto const r0 = invoke_sync(first, last); auto const r1a = TEST_FUTURE_VALUE_RETRIEVAL(f0a); auto const r1b = TEST_FUTURE_VALUE_RETRIEVAL(f0b); auto const r1c = TEST_FUTURE_VALUE_RETRIEVAL(f0c); auto const r1d = TEST_FUTURE_VALUE_RETRIEVAL(f0d); ASSERT_EQUAL(r0, r1a); ASSERT_EQUAL(r0, r1b); ASSERT_EQUAL(r0, r1c); ASSERT_EQUAL(r0, r1d); } }; }; DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker , reduce_sync_invoker >::tester ) , BuiltinNumericTypes , test_async_reduce_counting_iterator ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_device , reduce_sync_invoker >::tester ) , BuiltinNumericTypes , test_async_reduce_policy_counting_iterator ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_init , reduce_sync_invoker_init >::tester ) , BuiltinNumericTypes , test_async_reduce_counting_iterator_init ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_device_init , reduce_sync_invoker_init >::tester ) , BuiltinNumericTypes , test_async_reduce_policy_counting_iterator_init ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_init_plus , reduce_sync_invoker_init_plus >::tester ) , BuiltinNumericTypes , test_async_reduce_counting_iterator_init_plus ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_device_init_plus , reduce_sync_invoker_init_plus >::tester ) , BuiltinNumericTypes , test_async_reduce_policy_counting_iterator_init_plus ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , BuiltinNumericTypes , test_async_reduce_counting_iterator_init_custom_plus ); DECLARE_GENERIC_UNITTEST_WITH_TYPES_AND_NAME( THRUST_PP_EXPAND_ARGS( test_async_reduce_counting_iterator< reduce_async_invoker_device_init_custom_plus , reduce_sync_invoker_init_custom_plus >::tester ) , BuiltinNumericTypes , test_async_reduce_policy_counting_iterator_init_custom_plus ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_reduce_using { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0a(h0); thrust::device_vector<T> d0b(h0); ASSERT_EQUAL(h0, d0a); ASSERT_EQUAL(h0, d0b); thrust::device_future<T> f0a; thrust::device_future<T> f0b; // When you import the customization points into the global namespace, // they should be selected instead of the synchronous algorithms. { using namespace thrust::async; f0a = reduce(d0a.begin(), d0a.end()); } { using thrust::async::reduce; f0b = reduce(d0b.begin(), d0b.end()); } // ADL should find the synchronous algorithms. // This potentially runs concurrently with the copies. T const r0 = reduce(h0.begin(), h0.end()); T const r1a = TEST_FUTURE_VALUE_RETRIEVAL(f0a); T const r1b = TEST_FUTURE_VALUE_RETRIEVAL(f0b); ASSERT_EQUAL(r0, r1a); ASSERT_EQUAL(r0, r1b); } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_reduce_using , NumericTypes ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_reduce_after { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0(h0); ASSERT_EQUAL(h0, d0); auto f0 = thrust::async::reduce( d0.begin(), d0.end() ); ASSERT_EQUAL(true, f0.valid_stream()); auto const f0_stream = f0.stream().native_handle(); auto f1 = thrust::async::reduce( thrust::device.after(f0), d0.begin(), d0.end() ); // Verify that double consumption of a future produces an exception. ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( thrust::device.after(f0), d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); ASSERT_EQUAL_QUIET(f0_stream, f1.stream().native_handle()); auto after_policy2 = thrust::device.after(f1); auto f2 = thrust::async::reduce( after_policy2, d0.begin(), d0.end() ); // Verify that double consumption of a policy produces an exception. ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( after_policy2, d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); ASSERT_EQUAL_QUIET(f0_stream, f2.stream().native_handle()); // This potentially runs concurrently with the copies. T const r0 = thrust::reduce(h0.begin(), h0.end()); T const r1 = TEST_FUTURE_VALUE_RETRIEVAL(f2); ASSERT_EQUAL(r0, r1); } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_reduce_after , NumericTypes ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_reduce_on_then_after { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0(h0); ASSERT_EQUAL(h0, d0); cudaStream_t stream; thrust::cuda_cub::throw_on_error( cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) ); auto f0 = thrust::async::reduce( thrust::device.on(stream), d0.begin(), d0.end() ); ASSERT_EQUAL_QUIET(stream, f0.stream().native_handle()); auto f1 = thrust::async::reduce( thrust::device.after(f0), d0.begin(), d0.end() ); // Verify that double consumption of a future produces an exception. ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( thrust::device.after(f0), d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); ASSERT_EQUAL_QUIET(stream, f1.stream().native_handle()); auto after_policy2 = thrust::device.after(f1); auto f2 = thrust::async::reduce( after_policy2, d0.begin(), d0.end() ); // Verify that double consumption of a policy produces an exception. ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( after_policy2, d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); ASSERT_EQUAL_QUIET(stream, f2.stream().native_handle()); // This potentially runs concurrently with the copies. T const r0 = thrust::reduce(h0.begin(), h0.end()); T const r1 = TEST_FUTURE_VALUE_RETRIEVAL(f2); ASSERT_EQUAL(r0, r1); thrust::cuda_cub::throw_on_error( cudaStreamDestroy(stream) ); } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_reduce_on_then_after , NumericTypes ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_reduce_allocator_on_then_after { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0(h0); ASSERT_EQUAL(h0, d0); cudaStream_t stream0; thrust::cuda_cub::throw_on_error( cudaStreamCreateWithFlags(&stream0, cudaStreamNonBlocking) ); cudaStream_t stream1; thrust::cuda_cub::throw_on_error( cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking) ); auto f0 = thrust::async::reduce( thrust::device(thrust::device_allocator<void>{}).on(stream0) , d0.begin(), d0.end() ); ASSERT_EQUAL_QUIET(stream0, f0.stream().native_handle()); auto f1 = thrust::async::reduce( thrust::device(thrust::device_allocator<void>{}).after(f0) , d0.begin(), d0.end() ); ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( thrust::device(thrust::device_allocator<void>{}).after(f0) , d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); ASSERT_EQUAL_QUIET(stream0, f1.stream().native_handle()); auto f2 = thrust::async::reduce( thrust::device(thrust::device_allocator<void>{}).on(stream1).after(f1) , d0.begin(), d0.end() ); ASSERT_THROWS_EQUAL( auto x = thrust::async::reduce( thrust::device(thrust::device_allocator<void>{}).on(stream1).after(f1) , d0.begin(), d0.end() ); THRUST_UNUSED_VAR(x) , thrust::event_error , thrust::event_error(thrust::event_errc::no_state) ); KNOWN_FAILURE; // FIXME: The below fails because you can't combine allocator attachment, // `.on`, and `.after`. ASSERT_EQUAL_QUIET(stream1, f2.stream().native_handle()); // This potentially runs concurrently with the copies. T const r0 = thrust::reduce(h0.begin(), h0.end()); T const r1 = TEST_FUTURE_VALUE_RETRIEVAL(f2); ASSERT_EQUAL(r0, r1); thrust::cuda_cub::throw_on_error(cudaStreamDestroy(stream0)); thrust::cuda_cub::throw_on_error(cudaStreamDestroy(stream1)); } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_reduce_allocator_on_then_after , NumericTypes ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_reduce_caching { __host__ void operator()(std::size_t n) { constexpr std::int64_t m = 32; thrust::host_vector<T> h0(unittest::random_integers<T>(n)); thrust::device_vector<T> d0(h0); ASSERT_EQUAL(h0, d0); T const* f0_raw_data; { // Perform one reduction to ensure there's an entry in the caching // allocator. auto f0 = thrust::async::reduce(d0.begin(), d0.end()); TEST_EVENT_WAIT(f0); f0_raw_data = f0.raw_data(); } for (std::int64_t i = 0; i < m; ++i) { auto f1 = thrust::async::reduce(d0.begin(), d0.end()); ASSERT_EQUAL(true, f1.valid_stream()); ASSERT_EQUAL(true, f1.valid_content()); ASSERT_EQUAL_QUIET(f0_raw_data, f1.raw_data()); // This potentially runs concurrently with the copies. T const r0 = thrust::reduce(h0.begin(), h0.end()); T const r1 = TEST_FUTURE_VALUE_RETRIEVAL(f1); ASSERT_EQUAL(r0, r1); } } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_reduce_caching , NumericTypes ); /////////////////////////////////////////////////////////////////////////////// template <typename T> struct test_async_copy_then_reduce { __host__ void operator()(std::size_t n) { thrust::host_vector<T> h0a(unittest::random_integers<T>(n)); thrust::host_vector<T> h0b(unittest::random_integers<T>(n)); thrust::host_vector<T> h0c(unittest::random_integers<T>(n)); thrust::host_vector<T> h0d(unittest::random_integers<T>(n)); thrust::device_vector<T> d0a(n); thrust::device_vector<T> d0b(n); thrust::device_vector<T> d0c(n); thrust::device_vector<T> d0d(n); auto f0a = thrust::async::copy(h0a.begin(), h0a.end(), d0a.begin()); auto f0b = thrust::async::copy(h0b.begin(), h0b.end(), d0b.begin()); auto f0c = thrust::async::copy(h0c.begin(), h0c.end(), d0c.begin()); auto f0d = thrust::async::copy(h0d.begin(), h0d.end(), d0d.begin()); ASSERT_EQUAL(true, f0a.valid_stream()); ASSERT_EQUAL(true, f0b.valid_stream()); ASSERT_EQUAL(true, f0c.valid_stream()); ASSERT_EQUAL(true, f0d.valid_stream()); auto const f0a_stream = f0a.stream().native_handle(); auto const f0b_stream = f0b.stream().native_handle(); auto const f0c_stream = f0c.stream().native_handle(); auto const f0d_stream = f0d.stream().native_handle(); auto f1a = thrust::async::reduce( thrust::device.after(f0a), d0a.begin(), d0a.end() ); auto f1b = thrust::async::reduce( thrust::device.after(f0b), d0b.begin(), d0b.end() ); auto f1c = thrust::async::reduce( thrust::device.after(f0c), d0c.begin(), d0c.end() ); auto f1d = thrust::async::reduce( thrust::device.after(f0d), d0d.begin(), d0d.end() ); ASSERT_EQUAL(false, f0a.valid_stream()); ASSERT_EQUAL(false, f0b.valid_stream()); ASSERT_EQUAL(false, f0c.valid_stream()); ASSERT_EQUAL(false, f0d.valid_stream()); ASSERT_EQUAL(true, f1a.valid_stream()); ASSERT_EQUAL(true, f1a.valid_content()); ASSERT_EQUAL(true, f1b.valid_stream()); ASSERT_EQUAL(true, f1b.valid_content()); ASSERT_EQUAL(true, f1c.valid_stream()); ASSERT_EQUAL(true, f1c.valid_content()); ASSERT_EQUAL(true, f1d.valid_stream()); ASSERT_EQUAL(true, f1d.valid_content()); // Verify that streams were stolen. ASSERT_EQUAL_QUIET(f0a_stream, f1a.stream().native_handle()); ASSERT_EQUAL_QUIET(f0b_stream, f1b.stream().native_handle()); ASSERT_EQUAL_QUIET(f0c_stream, f1c.stream().native_handle()); ASSERT_EQUAL_QUIET(f0d_stream, f1d.stream().native_handle()); // This potentially runs concurrently with the copies. T const r0 = thrust::reduce(h0a.begin(), h0a.end()); T const r1a = TEST_FUTURE_VALUE_RETRIEVAL(f1a); T const r1b = TEST_FUTURE_VALUE_RETRIEVAL(f1b); T const r1c = TEST_FUTURE_VALUE_RETRIEVAL(f1c); T const r1d = TEST_FUTURE_VALUE_RETRIEVAL(f1d); ASSERT_EQUAL(r0, r1a); ASSERT_EQUAL(r0, r1b); ASSERT_EQUAL(r0, r1c); ASSERT_EQUAL(r0, r1d); } }; DECLARE_GENERIC_SIZED_UNITTEST_WITH_TYPES( test_async_copy_then_reduce , BuiltinNumericTypes ); /////////////////////////////////////////////////////////////////////////////// // TODO: when_all from reductions. #endif
eca988eecb265106297118bac5da3d243719aac9.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example demonstrates several CUTLASS utilities in the context of a mixed-precision floating-point matrix product computation. These utilities are intended to be useful supporting components for managing tensor and matrix memory allocations, initializing and comparing results, and computing reference output. CUTLASS utilities are defined in the directory `tools/util`, and definitions appear namespace `cutlass::` or an inner namespace therein. Operations in `cutlass::reference::` have both host-side and device-side implementations, and the choice to use device-side initialization and host-side verification in this example was arbitrary. cutlass::half_t This is a numeric type implementing IEEE half-precision quantities. It is functional in host and device code. In host-side code, CUTLASS_ENABLE_F16C optionally enables harware-accelerated numeric conversion on x86-64 CPUs support F16C extensions. In device code, all available hardware is used to implement conversion and numeric operations. cutlass::HostTensor<> This template class simplifies the creation of tensors for all supported layouts. It simplifies allocation and management of host- and device- memory allocations. This class offers methods device_view() and host_view() to provide TensorView objects for device- and host-side memory allocations. cutlass::reference::device::TensorFillRandomGaussian() This template function initializes elementsof a tensor to a random Gaussian distribution. It uses cuRAND in device code to compute random numbers. cutlass::reference::host::Gemm<> This template function computes the general matrix product. This template supports unique data types for each matrix operand, the internal accumulation type, and the scalar parameters alpha and beta. cutlass::reference::host::TensorEquals() Compares two tensors of identical rank and returns true if values are bit equivalent. */ // Standard Library includes #include <iostream> #include <sstream> #include <vector> #include <fstream> // CUTLASS includes needed for half-precision GEMM kernel #include "cutlass/cutlass.h" #include "cutlass/core_io.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/device/gemm.h" // // CUTLASS utility includes // // Defines operator<<() to write TensorView objects to std::ostream #include "cutlass/util/tensor_view_io.h" // Defines cutlass::HostTensor<> #include "cutlass/util/host_tensor.h" // Defines cutlass::half_t #include "cutlass/numeric_types.h" // Defines device_memory::copy_device_to_device() #include "cutlass/util/device_memory.h" // Defines cutlass::reference::device::TensorFillRandomGaussian() #include "cutlass/util/reference/device/tensor_fill.h" // Defines cutlass::reference::host::TensorEquals() #include "cutlass/util/reference/host/tensor_compare.h" // Defines cutlass::reference::host::Gemm() #include "cutlass/util/reference/host/gemm.h" #pragma warning( disable : 4503) /////////////////////////////////////////////////////////////////////////////////////////////////// /// Define a CUTLASS GEMM template and launch a GEMM kernel. hipError_t cutlass_hgemm_nn( int M, int N, int K, cutlass::half_t alpha, cutlass::half_t const *A, int lda, cutlass::half_t const *B, int ldb, cutlass::half_t beta, cutlass::half_t *C, int ldc) { // Define the GEMM operation using Gemm = cutlass::gemm::device::Gemm< cutlass::half_t, // ElementA cutlass::layout::ColumnMajor, // LayoutA cutlass::half_t, // ElementB cutlass::layout::ColumnMajor, // LayoutB cutlass::half_t, // ElementOutput cutlass::layout::ColumnMajor // LayoutOutput >; Gemm gemm_op; cutlass::cutStatus status = gemm_op({ {M, N, K}, {A, lda}, {B, ldb}, {C, ldc}, {C, ldc}, {alpha, beta} }); if (status != cutlass::cutStatus::kSuccess) { return hipErrorUnknown; } return hipSuccess; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Allocate several matrices in GPU device memory and call a single-precision /// CUTLASS GEMM kernel. hipError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha, cutlass::half_t beta) { hipError_t result; // // Construct cutlass::HostTensor<> using the half-precision host-side type. // // cutlass::HostTensor<> allocates memory on both the host and device corresponding to rank=2 // tensors in column-major layout. Explicit synchronization methods are offered to copy the // tensor to the device or to the host. // // M-by-K matrix of cutlass::half_t cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A(cutlass::MatrixCoord(M, K)); // K-by-N matrix of cutlass::half_t cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B(cutlass::MatrixCoord(K, N)); // M-by-N matrix of cutlass::half_t cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_cutlass(cutlass::MatrixCoord(M, N)); // M-by-N matrix of cutlass::half_t cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_reference(cutlass::MatrixCoord(M, N)); // // Initialize matrices with small, random integers. // // Arbitrary RNG seed value. Hard-coded for deterministic results. uint64_t seed = 2080; // Gaussian random distribution cutlass::half_t mean = 0.0_hf; cutlass::half_t stddev = 5.0_hf; // Specify the number of bits right of the binary decimal that are permitted // to be non-zero. A value of "0" here truncates random values to integers int bits_less_than_one = 0; cutlass::reference::device::TensorFillRandomGaussian( A.device_view(), seed, mean, stddev, bits_less_than_one ); cutlass::reference::device::TensorFillRandomGaussian( B.device_view(), seed * 2019, mean, stddev, bits_less_than_one ); cutlass::reference::device::TensorFillRandomGaussian( C_cutlass.device_view(), seed * 1993, mean, stddev, bits_less_than_one ); // Copy C_cutlass into C_reference so the GEMM is correct when beta != 0. cutlass::device_memory::copy_device_to_device( C_reference.device_data(), C_cutlass.device_data(), C_cutlass.capacity()); // Copy the device-side view into host memory C_reference.sync_host(); // // Launch the CUTLASS GEMM kernel // result = cutlass_hgemm_nn( M, N, K, alpha, A.device_data(), A.stride(0), B.device_data(), B.stride(0), beta, C_cutlass.device_data(), C_cutlass.stride(0) ); if (result != hipSuccess) { return result; } // // Verify the result using a host-side reference // // A and B were initialized using device-side procedures. The intent of this example is to // use the host-side reference GEMM, so we must perform a device-to-host copy. A.sync_host(); B.sync_host(); // Copy CUTLASS's GEMM results into host memory. C_cutlass.sync_host(); // Compute the reference result using the host-side GEMM reference implementation. cutlass::reference::host::Gemm< cutlass::half_t, // ElementA cutlass::layout::ColumnMajor, // LayoutA cutlass::half_t, // ElementB cutlass::layout::ColumnMajor, // LayoutB cutlass::half_t, // ElementOutput cutlass::layout::ColumnMajor, // LayoutOutput cutlass::half_t, cutlass::half_t > gemm_ref; gemm_ref( {M, N, K}, // problem size (type: cutlass::gemm::GemmCoord) alpha, // alpha (type: cutlass::half_t) A.host_ref(), // A (type: TensorRef<half_t, ColumnMajor>) B.host_ref(), // B (type: TensorRef<half_t, ColumnMajor>) beta, // beta (type: cutlass::half_t) C_reference.host_ref() // C (type: TensorRef<half_t, ColumnMajor>) ); // Compare reference to computed results. if (!cutlass::reference::host::TensorEquals( C_reference.host_view(), C_cutlass.host_view())) { char const *filename = "errors_01_cutlass_utilities.csv"; std::cerr << "Error - CUTLASS GEMM kernel differs from reference. Wrote computed and reference results to '" << filename << "'" << std::endl; // // On error, print C_cutlass and C_reference to std::cerr. // // Note, these are matrices of half-precision elements stored in host memory as // arrays of type cutlass::half_t. // std::ofstream file(filename); // Result of CUTLASS GEMM kernel file << "\n\nCUTLASS =\n" << C_cutlass.host_view() << std::endl; // Result of reference computation file << "\n\nReference =\n" << C_reference.host_view() << std::endl; // Return error code. return hipErrorUnknown; } // Passed error check return hipSuccess; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to cutlass_utilities example. // // usage: // // 01_cutlass_utilities <M> <N> <K> <alpha> <beta> // int main(int argc, const char *arg[]) { // // This example uses half-precision and is only suitable for devices with compute capabitliy 5.3 or greater. // hipDeviceProp_t prop; hipError_t result = hipGetDeviceProperties(&prop, 0); if (result != hipSuccess) { std::cerr << "Failed to query device properties with error " << hipGetErrorString(result) << std::endl; return -1; } if (!(prop.major > 5 || (prop.major == 5 && prop.minor >= 3))) { std::cerr << "This example uses half precision and is only suitable for devices with compute capability 5.3 or greater.\n"; std::cerr << "You are using a CUDA device with compute capability " << prop.major << "." << prop.minor << std::endl; return -1; } // // Parse the command line to obtain GEMM dimensions and scalar values. // // GEMM problem dimensions: <M> <N> <K> int problem[3] = { 128, 128, 128 }; for (int i = 1; i < argc && i < 4; ++i) { std::stringstream ss(arg[i]); ss >> problem[i - 1]; } // Linear scale factors in GEMM. Note, these are half-precision values stored as // cutlass::half_t. // // Values outside the range of IEEE FP16 will overflow to infinity or underflow to zero. // cutlass::half_t scalars[2] = { 1.0_hf, 0.0_hf }; for (int i = 4; i < argc && i < 6; ++i) { std::stringstream ss(arg[i]); ss >> scalars[i - 4]; // lexical cast to cutlass::half_t } // // Run the CUTLASS GEMM test. // result = TestCutlassGemm( problem[0], // GEMM M dimension problem[1], // GEMM N dimension problem[2], // GEMM K dimension scalars[0], // alpha scalars[1] // beta ); if (result == hipSuccess) { std::cout << "Passed." << std::endl; } // Exit. return result == hipSuccess ? 0 : -1; } ///////////////////////////////////////////////////////////////////////////////////////////////////
eca988eecb265106297118bac5da3d243719aac9.cu
/*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example demonstrates several CUTLASS utilities in the context of a mixed-precision floating-point matrix product computation. These utilities are intended to be useful supporting components for managing tensor and matrix memory allocations, initializing and comparing results, and computing reference output. CUTLASS utilities are defined in the directory `tools/util`, and definitions appear namespace `cutlass::` or an inner namespace therein. Operations in `cutlass::reference::` have both host-side and device-side implementations, and the choice to use device-side initialization and host-side verification in this example was arbitrary. cutlass::half_t This is a numeric type implementing IEEE half-precision quantities. It is functional in host and device code. In host-side code, CUTLASS_ENABLE_F16C optionally enables harware-accelerated numeric conversion on x86-64 CPUs support F16C extensions. In device code, all available hardware is used to implement conversion and numeric operations. cutlass::HostTensor<> This template class simplifies the creation of tensors for all supported layouts. It simplifies allocation and management of host- and device- memory allocations. This class offers methods device_view() and host_view() to provide TensorView objects for device- and host-side memory allocations. cutlass::reference::device::TensorFillRandomGaussian() This template function initializes elementsof a tensor to a random Gaussian distribution. It uses cuRAND in device code to compute random numbers. cutlass::reference::host::Gemm<> This template function computes the general matrix product. This template supports unique data types for each matrix operand, the internal accumulation type, and the scalar parameters alpha and beta. cutlass::reference::host::TensorEquals() Compares two tensors of identical rank and returns true if values are bit equivalent. */ // Standard Library includes #include <iostream> #include <sstream> #include <vector> #include <fstream> // CUTLASS includes needed for half-precision GEMM kernel #include "cutlass/cutlass.h" #include "cutlass/core_io.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/device/gemm.h" // // CUTLASS utility includes // // Defines operator<<() to write TensorView objects to std::ostream #include "cutlass/util/tensor_view_io.h" // Defines cutlass::HostTensor<> #include "cutlass/util/host_tensor.h" // Defines cutlass::half_t #include "cutlass/numeric_types.h" // Defines device_memory::copy_device_to_device() #include "cutlass/util/device_memory.h" // Defines cutlass::reference::device::TensorFillRandomGaussian() #include "cutlass/util/reference/device/tensor_fill.h" // Defines cutlass::reference::host::TensorEquals() #include "cutlass/util/reference/host/tensor_compare.h" // Defines cutlass::reference::host::Gemm() #include "cutlass/util/reference/host/gemm.h" #pragma warning( disable : 4503) /////////////////////////////////////////////////////////////////////////////////////////////////// /// Define a CUTLASS GEMM template and launch a GEMM kernel. cudaError_t cutlass_hgemm_nn( int M, int N, int K, cutlass::half_t alpha, cutlass::half_t const *A, int lda, cutlass::half_t const *B, int ldb, cutlass::half_t beta, cutlass::half_t *C, int ldc) { // Define the GEMM operation using Gemm = cutlass::gemm::device::Gemm< cutlass::half_t, // ElementA cutlass::layout::ColumnMajor, // LayoutA cutlass::half_t, // ElementB cutlass::layout::ColumnMajor, // LayoutB cutlass::half_t, // ElementOutput cutlass::layout::ColumnMajor // LayoutOutput >; Gemm gemm_op; cutlass::cutStatus status = gemm_op({ {M, N, K}, {A, lda}, {B, ldb}, {C, ldc}, {C, ldc}, {alpha, beta} }); if (status != cutlass::cutStatus::kSuccess) { return cudaErrorUnknown; } return cudaSuccess; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Allocate several matrices in GPU device memory and call a single-precision /// CUTLASS GEMM kernel. cudaError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha, cutlass::half_t beta) { cudaError_t result; // // Construct cutlass::HostTensor<> using the half-precision host-side type. // // cutlass::HostTensor<> allocates memory on both the host and device corresponding to rank=2 // tensors in column-major layout. Explicit synchronization methods are offered to copy the // tensor to the device or to the host. // // M-by-K matrix of cutlass::half_t cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A(cutlass::MatrixCoord(M, K)); // K-by-N matrix of cutlass::half_t cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B(cutlass::MatrixCoord(K, N)); // M-by-N matrix of cutlass::half_t cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_cutlass(cutlass::MatrixCoord(M, N)); // M-by-N matrix of cutlass::half_t cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_reference(cutlass::MatrixCoord(M, N)); // // Initialize matrices with small, random integers. // // Arbitrary RNG seed value. Hard-coded for deterministic results. uint64_t seed = 2080; // Gaussian random distribution cutlass::half_t mean = 0.0_hf; cutlass::half_t stddev = 5.0_hf; // Specify the number of bits right of the binary decimal that are permitted // to be non-zero. A value of "0" here truncates random values to integers int bits_less_than_one = 0; cutlass::reference::device::TensorFillRandomGaussian( A.device_view(), seed, mean, stddev, bits_less_than_one ); cutlass::reference::device::TensorFillRandomGaussian( B.device_view(), seed * 2019, mean, stddev, bits_less_than_one ); cutlass::reference::device::TensorFillRandomGaussian( C_cutlass.device_view(), seed * 1993, mean, stddev, bits_less_than_one ); // Copy C_cutlass into C_reference so the GEMM is correct when beta != 0. cutlass::device_memory::copy_device_to_device( C_reference.device_data(), C_cutlass.device_data(), C_cutlass.capacity()); // Copy the device-side view into host memory C_reference.sync_host(); // // Launch the CUTLASS GEMM kernel // result = cutlass_hgemm_nn( M, N, K, alpha, A.device_data(), A.stride(0), B.device_data(), B.stride(0), beta, C_cutlass.device_data(), C_cutlass.stride(0) ); if (result != cudaSuccess) { return result; } // // Verify the result using a host-side reference // // A and B were initialized using device-side procedures. The intent of this example is to // use the host-side reference GEMM, so we must perform a device-to-host copy. A.sync_host(); B.sync_host(); // Copy CUTLASS's GEMM results into host memory. C_cutlass.sync_host(); // Compute the reference result using the host-side GEMM reference implementation. cutlass::reference::host::Gemm< cutlass::half_t, // ElementA cutlass::layout::ColumnMajor, // LayoutA cutlass::half_t, // ElementB cutlass::layout::ColumnMajor, // LayoutB cutlass::half_t, // ElementOutput cutlass::layout::ColumnMajor, // LayoutOutput cutlass::half_t, cutlass::half_t > gemm_ref; gemm_ref( {M, N, K}, // problem size (type: cutlass::gemm::GemmCoord) alpha, // alpha (type: cutlass::half_t) A.host_ref(), // A (type: TensorRef<half_t, ColumnMajor>) B.host_ref(), // B (type: TensorRef<half_t, ColumnMajor>) beta, // beta (type: cutlass::half_t) C_reference.host_ref() // C (type: TensorRef<half_t, ColumnMajor>) ); // Compare reference to computed results. if (!cutlass::reference::host::TensorEquals( C_reference.host_view(), C_cutlass.host_view())) { char const *filename = "errors_01_cutlass_utilities.csv"; std::cerr << "Error - CUTLASS GEMM kernel differs from reference. Wrote computed and reference results to '" << filename << "'" << std::endl; // // On error, print C_cutlass and C_reference to std::cerr. // // Note, these are matrices of half-precision elements stored in host memory as // arrays of type cutlass::half_t. // std::ofstream file(filename); // Result of CUTLASS GEMM kernel file << "\n\nCUTLASS =\n" << C_cutlass.host_view() << std::endl; // Result of reference computation file << "\n\nReference =\n" << C_reference.host_view() << std::endl; // Return error code. return cudaErrorUnknown; } // Passed error check return cudaSuccess; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to cutlass_utilities example. // // usage: // // 01_cutlass_utilities <M> <N> <K> <alpha> <beta> // int main(int argc, const char *arg[]) { // // This example uses half-precision and is only suitable for devices with compute capabitliy 5.3 or greater. // cudaDeviceProp prop; cudaError_t result = cudaGetDeviceProperties(&prop, 0); if (result != cudaSuccess) { std::cerr << "Failed to query device properties with error " << cudaGetErrorString(result) << std::endl; return -1; } if (!(prop.major > 5 || (prop.major == 5 && prop.minor >= 3))) { std::cerr << "This example uses half precision and is only suitable for devices with compute capability 5.3 or greater.\n"; std::cerr << "You are using a CUDA device with compute capability " << prop.major << "." << prop.minor << std::endl; return -1; } // // Parse the command line to obtain GEMM dimensions and scalar values. // // GEMM problem dimensions: <M> <N> <K> int problem[3] = { 128, 128, 128 }; for (int i = 1; i < argc && i < 4; ++i) { std::stringstream ss(arg[i]); ss >> problem[i - 1]; } // Linear scale factors in GEMM. Note, these are half-precision values stored as // cutlass::half_t. // // Values outside the range of IEEE FP16 will overflow to infinity or underflow to zero. // cutlass::half_t scalars[2] = { 1.0_hf, 0.0_hf }; for (int i = 4; i < argc && i < 6; ++i) { std::stringstream ss(arg[i]); ss >> scalars[i - 4]; // lexical cast to cutlass::half_t } // // Run the CUTLASS GEMM test. // result = TestCutlassGemm( problem[0], // GEMM M dimension problem[1], // GEMM N dimension problem[2], // GEMM K dimension scalars[0], // alpha scalars[1] // beta ); if (result == cudaSuccess) { std::cout << "Passed." << std::endl; } // Exit. return result == cudaSuccess ? 0 : -1; } ///////////////////////////////////////////////////////////////////////////////////////////////////
e05081ecac6b9c5f74674d91d61cdf3ba175e982.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * _reg_blockMatching_kernels.cu * * * Created by Marc Modat and Pankaj Daga on 24/03/2009. * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef __REG_BLOCKMATCHING_KERNELS_CU__ #define __REG_BLOCKMATCHING_KERNELS_CU__ // Some parameters that we need for the kernel execution. // The caller is supposed to ensure that the values are set // Number of blocks in each dimension __device__ __constant__ int3 c_BlockDim; __device__ __constant__ int c_StepSize; __device__ __constant__ int3 c_ImageSize; __device__ __constant__ float r1c1; // Transformation matrix from nifti header __device__ __constant__ float4 t_m_a; __device__ __constant__ float4 t_m_b; __device__ __constant__ float4 t_m_c; #define BLOCK_WIDTH 4 #define BLOCK_SIZE 64 #define OVERLAP_SIZE 3 #define STEP_SIZE 1 #include "_reg_blockMatching_gpu.h" texture<float, 1, hipReadModeElementType> targetImageArray_texture; texture<float, 1, hipReadModeElementType> resultImageArray_texture; texture<int, 1, hipReadModeElementType> activeBlock_texture; // Apply the transformation matrix __device__ inline void apply_affine(const float4 &pt, float * result) { float4 mat = t_m_a; result[0] = (mat.x * pt.x) + (mat.y*pt.y) + (mat.z*pt.z) + (mat.w); mat = t_m_b; result[1] = (mat.x * pt.x) + (mat.y*pt.y) + (mat.z*pt.z) + (mat.w); mat = t_m_c; result[2] = (mat.x * pt.x) + (mat.y*pt.y) + (mat.z*pt.z) + (mat.w); } // CUDA kernel to process the target values __global__ void process_target_blocks_gpu(float *targetPosition_d, float *targetValues) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; const int3 bDim = c_BlockDim; if (tid < bDim.x * bDim.y * bDim.z){ const int currentBlockIndex = tex1Dfetch(activeBlock_texture,tid); if (currentBlockIndex >= 0){ // Get the corresponding (i, j, k) indices int tempIndex = currentBlockIndex; const int k =(int)(tempIndex/(bDim.x * bDim.y)); tempIndex -= k * bDim.x * bDim.y; const int j =(int)(tempIndex/(bDim.x)); const int i = tempIndex - j * (bDim.x); const int offset = tid * BLOCK_SIZE; const int targetIndex_start_x = i * BLOCK_WIDTH; const int targetIndex_start_y = j * BLOCK_WIDTH; const int targetIndex_start_z = k * BLOCK_WIDTH; int targetIndex_end_x = targetIndex_start_x + BLOCK_WIDTH; int targetIndex_end_y = targetIndex_start_y + BLOCK_WIDTH; int targetIndex_end_z = targetIndex_start_z + BLOCK_WIDTH; const int3 imageSize = c_ImageSize; for (int count = 0; count < BLOCK_SIZE; ++count) targetValues[count + offset] = 0.0f; unsigned int index = 0; for(int z = targetIndex_start_z; z< targetIndex_end_z; ++z){ if (z>=0 && z<imageSize.z) { int indexZ = z * imageSize.x * imageSize.y; for(int y = targetIndex_start_y; y < targetIndex_end_y; ++y){ if (y>=0 && y<imageSize.y) { int indexXYZ = indexZ + y * imageSize.x + targetIndex_start_x; for(int x = targetIndex_start_x; x < targetIndex_end_x; ++x){ if(x>=0 && x<imageSize.x) { targetValues[index + offset] = tex1Dfetch(targetImageArray_texture, indexXYZ); } indexXYZ++; index++; } } else index += BLOCK_WIDTH; } } else index += BLOCK_WIDTH * BLOCK_WIDTH; } float4 targetPosition; targetPosition.x = i * BLOCK_WIDTH; targetPosition.y = j * BLOCK_WIDTH; targetPosition.z = k * BLOCK_WIDTH; apply_affine(targetPosition, &(targetPosition_d[tid * 3])); } } } // CUDA kernel to process the result blocks __global__ void process_result_blocks_gpu(float *resultPosition_d, float *targetValues) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; const int3 bDim = c_BlockDim; int tempIndex = tid % NUM_BLOCKS_TO_COMPARE; __shared__ int ctid; if (tempIndex == 0) ctid = (int)(tid / NUM_BLOCKS_TO_COMPARE); __syncthreads(); //const int ctid = (int)(tid / NUM_BLOCKS_TO_COMPARE); __shared__ float4 localCC [NUM_BLOCKS_TO_COMPARE]; __shared__ int3 indexes; localCC[tempIndex] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); __shared__ int updateThreadID; updateThreadID = -1; if (ctid < bDim.x * bDim.y * bDim.z) { const int activeBlockIndex = tex1Dfetch(activeBlock_texture, ctid); tempIndex = activeBlockIndex; int k =(int)(tempIndex/(bDim.x * bDim.y)); tempIndex -= k * bDim.x * bDim.y; int j =(int)(tempIndex/(bDim.x)); int i = tempIndex - j * (bDim.x); tempIndex = tid % NUM_BLOCKS_TO_COMPARE; if (tempIndex == 0) { indexes.x = i * BLOCK_WIDTH; indexes.y = j * BLOCK_WIDTH; indexes.z = k * BLOCK_WIDTH; } __syncthreads(); if (activeBlockIndex >= 0) { const int block_offset = ctid * BLOCK_SIZE; const int3 imageSize = c_ImageSize; int k = (int)tempIndex /NUM_BLOCKS_TO_COMPARE_2D; tempIndex -= k * NUM_BLOCKS_TO_COMPARE_2D; int j = (int)tempIndex /NUM_BLOCKS_TO_COMPARE_1D; int i = tempIndex - j * NUM_BLOCKS_TO_COMPARE_1D; k -= OVERLAP_SIZE; j -= OVERLAP_SIZE; i -= OVERLAP_SIZE; tempIndex = tid % NUM_BLOCKS_TO_COMPARE; int resultIndex_start_z = indexes.z + k; int resultIndex_end_z = resultIndex_start_z + BLOCK_WIDTH; int resultIndex_start_y = indexes.y + j; int resultIndex_end_y = resultIndex_start_y + BLOCK_WIDTH; int resultIndex_start_x = indexes.x + i; int resultIndex_end_x = resultIndex_start_x + BLOCK_WIDTH; __shared__ float4 cc_vars [NUM_BLOCKS_TO_COMPARE]; cc_vars[tempIndex].x = 0.0f; cc_vars[tempIndex].y = 0.0f; unsigned int index = 0; for(int z = resultIndex_start_z; z< resultIndex_end_z; ++z){ if (z>=0 && z<imageSize.z) { int indexZ = z * imageSize.y * imageSize.x; for(int y = resultIndex_start_y; y < resultIndex_end_y; ++y){ if (y>=0 && y<imageSize.y) { int indexXYZ = indexZ + y * imageSize.x + resultIndex_start_x; for(int x = resultIndex_start_x; x < resultIndex_end_x; ++x){ if (x>=0 && x<imageSize.x) { cc_vars[tempIndex].x = tex1Dfetch(resultImageArray_texture, indexXYZ); cc_vars[tempIndex].y = targetValues[block_offset + index]; if (cc_vars[tempIndex].x != 0.0f && cc_vars[tempIndex].y != 0.0f) { localCC[tempIndex].x += cc_vars[tempIndex].x; localCC[tempIndex].y += cc_vars[tempIndex].y; ++localCC[tempIndex].z; } } ++indexXYZ; ++index; } } else index += BLOCK_WIDTH; } } else index += BLOCK_WIDTH * BLOCK_WIDTH; } if (localCC[tempIndex].z > 0) { localCC[tempIndex].x /= localCC[tempIndex].z; localCC[tempIndex].y /= localCC[tempIndex].z; } cc_vars[tempIndex].z = 0.0f; cc_vars[tempIndex].w = 0.0f; index = 0; for(int z = resultIndex_start_z; z< resultIndex_end_z; ++z){ if (z>=0 && z<imageSize.z) { int indexZ = z * imageSize.y * imageSize.x; for(int y = resultIndex_start_y; y < resultIndex_end_y; ++y){ if(y>=0 && y<imageSize.y) { int indexXYZ = indexZ + y * imageSize.x + resultIndex_start_x; for(int x = resultIndex_start_x; x < resultIndex_end_x; ++x){ if (x>=0 && x<imageSize.x) { cc_vars[tempIndex].x = tex1Dfetch(resultImageArray_texture, indexXYZ); cc_vars[tempIndex].y = targetValues[block_offset + index]; if (cc_vars[tempIndex].x != 0.0f && cc_vars[tempIndex].y != 0.0f) { cc_vars[tempIndex].x -= localCC[tempIndex].x; cc_vars[tempIndex].y -= localCC[tempIndex].y; cc_vars[tempIndex].z += cc_vars[tempIndex].x * cc_vars[tempIndex].x; cc_vars[tempIndex].w += cc_vars[tempIndex].y * cc_vars[tempIndex].y; localCC[tempIndex].w += cc_vars[tempIndex].x * cc_vars[tempIndex].y; } } ++indexXYZ; ++index; } } else index += BLOCK_WIDTH; } } else index += BLOCK_WIDTH * BLOCK_WIDTH; } if (localCC[tempIndex].z > (float)(BLOCK_SIZE/2)) { if (cc_vars[tempIndex].z > 0.0f && cc_vars[tempIndex].w > 0.0f) { localCC[tempIndex].w = fabsf(localCC[tempIndex].w/sqrt(cc_vars[tempIndex].z * cc_vars[tempIndex].w)); } } else { localCC[tempIndex].w = 0.0f; } localCC[tempIndex].x = i; localCC[tempIndex].y = j; localCC[tempIndex].z = k; // Just take ownership of updating the final value if (updateThreadID == -1) updateThreadID = tid; } __syncthreads(); // Just let one thread do the final update if (tid == updateThreadID) { __shared__ float4 bestCC; bestCC = make_float4(0.0f, 0.0f, 0.0f, 0.0f); for (int i = 0; i < NUM_BLOCKS_TO_COMPARE; ++i) { if (localCC[i].w > bestCC.w) { bestCC.x = localCC[i].x; bestCC.y = localCC[i].y; bestCC.z = localCC[i].z; bestCC.w = localCC[i].w; } } bestCC.x += indexes.x; bestCC.y += indexes.y; bestCC.z += indexes.z; apply_affine(bestCC, &(resultPosition_d[ctid * 3])); } } } #endif
e05081ecac6b9c5f74674d91d61cdf3ba175e982.cu
/* * _reg_blockMatching_kernels.cu * * * Created by Marc Modat and Pankaj Daga on 24/03/2009. * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef __REG_BLOCKMATCHING_KERNELS_CU__ #define __REG_BLOCKMATCHING_KERNELS_CU__ // Some parameters that we need for the kernel execution. // The caller is supposed to ensure that the values are set // Number of blocks in each dimension __device__ __constant__ int3 c_BlockDim; __device__ __constant__ int c_StepSize; __device__ __constant__ int3 c_ImageSize; __device__ __constant__ float r1c1; // Transformation matrix from nifti header __device__ __constant__ float4 t_m_a; __device__ __constant__ float4 t_m_b; __device__ __constant__ float4 t_m_c; #define BLOCK_WIDTH 4 #define BLOCK_SIZE 64 #define OVERLAP_SIZE 3 #define STEP_SIZE 1 #include "_reg_blockMatching_gpu.h" texture<float, 1, cudaReadModeElementType> targetImageArray_texture; texture<float, 1, cudaReadModeElementType> resultImageArray_texture; texture<int, 1, cudaReadModeElementType> activeBlock_texture; // Apply the transformation matrix __device__ inline void apply_affine(const float4 &pt, float * result) { float4 mat = t_m_a; result[0] = (mat.x * pt.x) + (mat.y*pt.y) + (mat.z*pt.z) + (mat.w); mat = t_m_b; result[1] = (mat.x * pt.x) + (mat.y*pt.y) + (mat.z*pt.z) + (mat.w); mat = t_m_c; result[2] = (mat.x * pt.x) + (mat.y*pt.y) + (mat.z*pt.z) + (mat.w); } // CUDA kernel to process the target values __global__ void process_target_blocks_gpu(float *targetPosition_d, float *targetValues) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; const int3 bDim = c_BlockDim; if (tid < bDim.x * bDim.y * bDim.z){ const int currentBlockIndex = tex1Dfetch(activeBlock_texture,tid); if (currentBlockIndex >= 0){ // Get the corresponding (i, j, k) indices int tempIndex = currentBlockIndex; const int k =(int)(tempIndex/(bDim.x * bDim.y)); tempIndex -= k * bDim.x * bDim.y; const int j =(int)(tempIndex/(bDim.x)); const int i = tempIndex - j * (bDim.x); const int offset = tid * BLOCK_SIZE; const int targetIndex_start_x = i * BLOCK_WIDTH; const int targetIndex_start_y = j * BLOCK_WIDTH; const int targetIndex_start_z = k * BLOCK_WIDTH; int targetIndex_end_x = targetIndex_start_x + BLOCK_WIDTH; int targetIndex_end_y = targetIndex_start_y + BLOCK_WIDTH; int targetIndex_end_z = targetIndex_start_z + BLOCK_WIDTH; const int3 imageSize = c_ImageSize; for (int count = 0; count < BLOCK_SIZE; ++count) targetValues[count + offset] = 0.0f; unsigned int index = 0; for(int z = targetIndex_start_z; z< targetIndex_end_z; ++z){ if (z>=0 && z<imageSize.z) { int indexZ = z * imageSize.x * imageSize.y; for(int y = targetIndex_start_y; y < targetIndex_end_y; ++y){ if (y>=0 && y<imageSize.y) { int indexXYZ = indexZ + y * imageSize.x + targetIndex_start_x; for(int x = targetIndex_start_x; x < targetIndex_end_x; ++x){ if(x>=0 && x<imageSize.x) { targetValues[index + offset] = tex1Dfetch(targetImageArray_texture, indexXYZ); } indexXYZ++; index++; } } else index += BLOCK_WIDTH; } } else index += BLOCK_WIDTH * BLOCK_WIDTH; } float4 targetPosition; targetPosition.x = i * BLOCK_WIDTH; targetPosition.y = j * BLOCK_WIDTH; targetPosition.z = k * BLOCK_WIDTH; apply_affine(targetPosition, &(targetPosition_d[tid * 3])); } } } // CUDA kernel to process the result blocks __global__ void process_result_blocks_gpu(float *resultPosition_d, float *targetValues) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; const int3 bDim = c_BlockDim; int tempIndex = tid % NUM_BLOCKS_TO_COMPARE; __shared__ int ctid; if (tempIndex == 0) ctid = (int)(tid / NUM_BLOCKS_TO_COMPARE); __syncthreads(); //const int ctid = (int)(tid / NUM_BLOCKS_TO_COMPARE); __shared__ float4 localCC [NUM_BLOCKS_TO_COMPARE]; __shared__ int3 indexes; localCC[tempIndex] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); __shared__ int updateThreadID; updateThreadID = -1; if (ctid < bDim.x * bDim.y * bDim.z) { const int activeBlockIndex = tex1Dfetch(activeBlock_texture, ctid); tempIndex = activeBlockIndex; int k =(int)(tempIndex/(bDim.x * bDim.y)); tempIndex -= k * bDim.x * bDim.y; int j =(int)(tempIndex/(bDim.x)); int i = tempIndex - j * (bDim.x); tempIndex = tid % NUM_BLOCKS_TO_COMPARE; if (tempIndex == 0) { indexes.x = i * BLOCK_WIDTH; indexes.y = j * BLOCK_WIDTH; indexes.z = k * BLOCK_WIDTH; } __syncthreads(); if (activeBlockIndex >= 0) { const int block_offset = ctid * BLOCK_SIZE; const int3 imageSize = c_ImageSize; int k = (int)tempIndex /NUM_BLOCKS_TO_COMPARE_2D; tempIndex -= k * NUM_BLOCKS_TO_COMPARE_2D; int j = (int)tempIndex /NUM_BLOCKS_TO_COMPARE_1D; int i = tempIndex - j * NUM_BLOCKS_TO_COMPARE_1D; k -= OVERLAP_SIZE; j -= OVERLAP_SIZE; i -= OVERLAP_SIZE; tempIndex = tid % NUM_BLOCKS_TO_COMPARE; int resultIndex_start_z = indexes.z + k; int resultIndex_end_z = resultIndex_start_z + BLOCK_WIDTH; int resultIndex_start_y = indexes.y + j; int resultIndex_end_y = resultIndex_start_y + BLOCK_WIDTH; int resultIndex_start_x = indexes.x + i; int resultIndex_end_x = resultIndex_start_x + BLOCK_WIDTH; __shared__ float4 cc_vars [NUM_BLOCKS_TO_COMPARE]; cc_vars[tempIndex].x = 0.0f; cc_vars[tempIndex].y = 0.0f; unsigned int index = 0; for(int z = resultIndex_start_z; z< resultIndex_end_z; ++z){ if (z>=0 && z<imageSize.z) { int indexZ = z * imageSize.y * imageSize.x; for(int y = resultIndex_start_y; y < resultIndex_end_y; ++y){ if (y>=0 && y<imageSize.y) { int indexXYZ = indexZ + y * imageSize.x + resultIndex_start_x; for(int x = resultIndex_start_x; x < resultIndex_end_x; ++x){ if (x>=0 && x<imageSize.x) { cc_vars[tempIndex].x = tex1Dfetch(resultImageArray_texture, indexXYZ); cc_vars[tempIndex].y = targetValues[block_offset + index]; if (cc_vars[tempIndex].x != 0.0f && cc_vars[tempIndex].y != 0.0f) { localCC[tempIndex].x += cc_vars[tempIndex].x; localCC[tempIndex].y += cc_vars[tempIndex].y; ++localCC[tempIndex].z; } } ++indexXYZ; ++index; } } else index += BLOCK_WIDTH; } } else index += BLOCK_WIDTH * BLOCK_WIDTH; } if (localCC[tempIndex].z > 0) { localCC[tempIndex].x /= localCC[tempIndex].z; localCC[tempIndex].y /= localCC[tempIndex].z; } cc_vars[tempIndex].z = 0.0f; cc_vars[tempIndex].w = 0.0f; index = 0; for(int z = resultIndex_start_z; z< resultIndex_end_z; ++z){ if (z>=0 && z<imageSize.z) { int indexZ = z * imageSize.y * imageSize.x; for(int y = resultIndex_start_y; y < resultIndex_end_y; ++y){ if(y>=0 && y<imageSize.y) { int indexXYZ = indexZ + y * imageSize.x + resultIndex_start_x; for(int x = resultIndex_start_x; x < resultIndex_end_x; ++x){ if (x>=0 && x<imageSize.x) { cc_vars[tempIndex].x = tex1Dfetch(resultImageArray_texture, indexXYZ); cc_vars[tempIndex].y = targetValues[block_offset + index]; if (cc_vars[tempIndex].x != 0.0f && cc_vars[tempIndex].y != 0.0f) { cc_vars[tempIndex].x -= localCC[tempIndex].x; cc_vars[tempIndex].y -= localCC[tempIndex].y; cc_vars[tempIndex].z += cc_vars[tempIndex].x * cc_vars[tempIndex].x; cc_vars[tempIndex].w += cc_vars[tempIndex].y * cc_vars[tempIndex].y; localCC[tempIndex].w += cc_vars[tempIndex].x * cc_vars[tempIndex].y; } } ++indexXYZ; ++index; } } else index += BLOCK_WIDTH; } } else index += BLOCK_WIDTH * BLOCK_WIDTH; } if (localCC[tempIndex].z > (float)(BLOCK_SIZE/2)) { if (cc_vars[tempIndex].z > 0.0f && cc_vars[tempIndex].w > 0.0f) { localCC[tempIndex].w = fabsf(localCC[tempIndex].w/sqrt(cc_vars[tempIndex].z * cc_vars[tempIndex].w)); } } else { localCC[tempIndex].w = 0.0f; } localCC[tempIndex].x = i; localCC[tempIndex].y = j; localCC[tempIndex].z = k; // Just take ownership of updating the final value if (updateThreadID == -1) updateThreadID = tid; } __syncthreads(); // Just let one thread do the final update if (tid == updateThreadID) { __shared__ float4 bestCC; bestCC = make_float4(0.0f, 0.0f, 0.0f, 0.0f); for (int i = 0; i < NUM_BLOCKS_TO_COMPARE; ++i) { if (localCC[i].w > bestCC.w) { bestCC.x = localCC[i].x; bestCC.y = localCC[i].y; bestCC.z = localCC[i].z; bestCC.w = localCC[i].w; } } bestCC.x += indexes.x; bestCC.y += indexes.y; bestCC.z += indexes.z; apply_affine(bestCC, &(resultPosition_d[ctid * 3])); } } } #endif
8eefaa4d95ce084b191ba527d1c5f2c15ede5c8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include <iostream> // endl # include <fstream> // for ofstream # include <string> // for string # include <sstream> // for stringstream # include <math.h> # include "PFSips.h" # include "PFSipsKernels.h" # include "../utils/cudaErrorMacros.h" // for cudaCheckErrors & cudaCheckAsyncErrors using std::string; using std::stringstream; using std::cout; using std::endl; using std::ofstream; // ------------------------------------------------------------------------- // Constructor: // ------------------------------------------------------------------------- PFSips::PFSips(const GetPot& input_params) : rng(1234) { // --------------------------------------- // Assign variables from 'input_params': // --------------------------------------- nx = input_params("Domain/nx",1); ny = input_params("Domain/ny",1); nz = input_params("Domain/nz",1); nxyz = nx*ny*nz; dx = input_params("Domain/dx",1.0); dy = input_params("Domain/dy",1.0); dz = input_params("Domain/dz",1.0); dt = input_params("Time/dt",1.0); bx = input_params("PFSips/bx",0); by = input_params("PFSips/by",1); bz = input_params("PFSips/bz",1); numSteps = input_params("Time/nstep",1); co = input_params("PFSips/co",0.20); M = input_params("PFSips/M",1.0); mobReSize = input_params("PFSips/mobReSize",0.35); kap = input_params("PFSips/kap",1.0); water_CB = input_params("PFSips/water_CB",1.0); NS_in_dope = input_params("PFSips/NS_in_dope",0.0); mobReSize = input_params("PFSips/mobReSize",0.35); chiPS = input_params("PFSips/chiPS",0.034); chiPN = input_params("PFSips/chiPN",1.5); phiCutoff = input_params("PFSips/phiCutoff",0.75); N = input_params("PFSips/N",100.0); A = input_params("PFSips/A",1.0); Tinit = input_params("PFSips/Tinit",298.0); Tcast = input_params("PFSips/Tcast",298.0); noiseStr = input_params("PFSips/noiseStr",0.1); D0 = input_params("PFSips/D0",1.0); Dw = input_params("PFSips/Dw",1.0); nu = input_params("PFSips/nu",1.0); nuDw = input_params("PFSips/nuDw",1.0); gamma = input_params("PFSips/gamma",1.0); gammaDw = input_params("PFSips/gammaDw",1.0); Mweight = input_params("PFSips/Mweight",100.0); Mvolume = input_params("PFSips/Mvolume",0.1); numOutputs = input_params("Output/numOutputs",1); outInterval = numSteps/numOutputs; // --------------------------------------- // Set up cuda kernel launch variables: // --------------------------------------- blockSize.x = input_params("GPU/blockSize.x",0); blockSize.y = input_params("GPU/blockSize.y",0); blockSize.z = input_params("GPU/blockSize.z",0); // set default kernel launch parameters if(blockSize.x == 0) blockSize.x = 32; if(blockSize.y == 0) blockSize.y = 32; if(blockSize.z == 0) blockSize.z = 1; // calculate the number of blocks to be used (3-D block grid) int totalBlockSize = blockSize.x*blockSize.y*blockSize.z; blocks.x = (nx + blockSize.x - 1)/blockSize.x; blocks.y = (ny + blockSize.y - 1)/blockSize.y; blocks.z = (nz + blockSize.z - 1)/blockSize.z; // perform some assumption checking int numBlocks = blocks.x*blocks.y*blocks.z; int totalNumThreads = numBlocks*totalBlockSize; if(totalNumThreads < nxyz) throw "GPU Kernel Launch setup lacks sufficient threads!\n"; if(totalBlockSize > 1024) throw "Total number of threads per block exceeds 1024"; } // ------------------------------------------------------------------------- // Destructor: // ------------------------------------------------------------------------- PFSips::~PFSips() { // ---------------------------------------- // free up device memory: // ---------------------------------------- hipFree(c_d); hipFree(df_d); hipFree(Mob_d); hipFree(w_d); hipFree(muNS_d); hipFree(nonUniformLap_d); hipFree(cpyBuff_d); hipFree(devState); } // ------------------------------------------------------------------------- // Initialize system: // ------------------------------------------------------------------------- void PFSips::initSystem() { // ---------------------------------------- // Initialize concentration fields: // ---------------------------------------- srand(time(NULL)); // setting the seed double r = 0.0; for(size_t i=0;i<nxyz;i++) { r = (double)rand()/RAND_MAX; // initialize polymer phase c.push_back(co + 0.1*(r-0.5)); // initialize nonsolvent phase water.push_back(NS_in_dope); } // ---------------------------------------- // Allocate memory on device and copy data // and copy data from host to device // ---------------------------------------- // allocate memory on device size = nxyz*sizeof(double); // allocate polymer species hipMalloc((void**) &c_d,size); cudaCheckErrors("hipMalloc fail"); // allocate space for laplacian hipMalloc((void**) &df_d,size); cudaCheckErrors("hipMalloc fail"); // allocate water concentration hipMalloc((void**) &w_d,size); cudaCheckErrors("hipMalloc fail"); // allocate space for laplacian hipMalloc((void**) &muNS_d,size); cudaCheckErrors("hipMalloc fail"); // copy buffer hipMalloc((void**) &cpyBuff_d,size); cudaCheckErrors("hipMalloc fail"); // allocate mobility hipMalloc((void**) &Mob_d,size); cudaCheckErrors("hipMalloc fail"); // allocate nonuniform laplacian for mobility // and water diffusion coefficient hipMalloc((void**) &nonUniformLap_d,size); cudaCheckErrors("hipMalloc fail"); // allocate memory for cuRAND state hipMalloc((void**) &devState,nxyz*sizeof(hiprandState_t)); cudaCheckErrors("hipMalloc fail"); // copy concentration and water array to device hipMemcpy(c_d,&c[0],size,hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy H2D fail"); hipMemcpy(w_d,&water[0],size,hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy H2D fail"); // ---------------------------------------- // Initialize thermal fluctuations of // polymer concentration // ---------------------------------------- hipLaunchKernelGGL(( init_cuRAND), dim3(blocks),dim3(blockSize), 0, 0, time(NULL),devState,nx,ny,nz); } // ------------------------------------------------------------------------- // Take one step forward in time: // ------------------------------------------------------------------------- void PFSips::computeInterval(int interval) { // ---------------------------------------- // Set the time step: // ---------------------------------------- current_step = interval*outInterval; // ---------------------------------------- // Evolve system by solving CH equation: // ---------------------------------------- for(size_t i=0;i<outInterval;i++) { // calculate the laplacian of c_d and store in df_d hipLaunchKernelGGL(( calculateLapBoundaries), dim3(blocks),dim3(blockSize), 0, 0, c_d,df_d,nx,ny,nz,dx,bx,by,bz); cudaCheckAsyncErrors("calculateLap polymer kernel fail"); hipDeviceSynchronize(); // calculate the chemical potential and store in df_d hipLaunchKernelGGL(( calculateChemPotFH), dim3(blocks),dim3(blockSize), 0, 0, c_d,w_d,df_d,kap,A,chiPS,chiPN,N,nx,ny,nz,current_step,dt); cudaCheckAsyncErrors("calculateChemPotFH kernel fail"); hipDeviceSynchronize(); // calculate mobility and store it in Mob_d hipLaunchKernelGGL(( calculateMobility), dim3(blocks),dim3(blockSize), 0, 0, c_d,Mob_d,M,mobReSize,nx,ny,nz,phiCutoff,N,gamma,nu,D0,Mweight,Mvolume,Tcast); cudaCheckAsyncErrors("calculateMobility kernel fail"); hipDeviceSynchronize(); // calculate the laplacian of the chemical potential, then update c_d // using an Euler update hipLaunchKernelGGL(( lapChemPotAndUpdateBoundaries), dim3(blocks),dim3(blockSize), 0, 0, c_d,df_d,Mob_d,nonUniformLap_d, dt,nx,ny,nz,dx,bx,by,bz); cudaCheckAsyncErrors("lapChemPotAndUpdateBoundaries kernel fail"); hipDeviceSynchronize(); // calculate mu for Nonsolvent diffusion hipLaunchKernelGGL(( calculate_muNS), dim3(blocks),dim3(blockSize), 0, 0, w_d,c_d,muNS_d,Mob_d,Dw,water_CB,gammaDw,nuDw,Mweight,Mvolume,nx,ny,nz); cudaCheckAsyncErrors('calculate muNS kernel fail'); hipDeviceSynchronize(); // calculate laplacian for diffusing water hipLaunchKernelGGL(( calculateLapBoundaries_muNS), dim3(blocks),dim3(blockSize), 0, 0, df_d,muNS_d,nx,ny,nz,dx,bx,by,bz); cudaCheckAsyncErrors('calculateLap water kernel fail'); hipDeviceSynchronize(); // calculate nonuniform laplacian for diffusion hipLaunchKernelGGL(( calculateNonUniformLapBoundaries_muNS), dim3(blocks),dim3(blockSize), 0, 0, muNS_d,Mob_d,nonUniformLap_d,nx,ny,nz,dx,bx,by,bz); cudaCheckAsyncErrors('calculateNonUniformLap muNS kernel fail'); hipDeviceSynchronize(); // euler update water diffusing hipLaunchKernelGGL(( update_water), dim3(blocks),dim3(blockSize), 0, 0, w_d,df_d,Mob_d,nonUniformLap_d,dt,nx,ny,nz,dx,bx,by,bz); cudaCheckAsyncErrors("updateWater kernel fail"); hipDeviceSynchronize(); // add thermal fluctuations of polymer concentration hipLaunchKernelGGL(( addNoise), dim3(blocks),dim3(blockSize), 0, 0, c_d, nx, ny, nz, dt, current_step, water_CB, phiCutoff, devState); cudaCheckAsyncErrors("addNoise kernel fail"); hipDeviceSynchronize(); } // ---------------------------------------- // Copy data back to host for writing: // ---------------------------------------- // polymer concentration hipLaunchKernelGGL(( populateCopyBufferSIPS), dim3(blocks),dim3(blockSize), 0, 0, c_d,cpyBuff_d,nx,ny,nz); hipMemcpyAsync(&c[0],c_d,size,hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpyAsync D2H fail"); hipDeviceSynchronize(); // nonsolvent concentration hipLaunchKernelGGL(( populateCopyBufferSIPS), dim3(blocks),dim3(blockSize), 0, 0, w_d,cpyBuff_d,nx,ny,nz); hipMemcpyAsync(&water[0],w_d,size,hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpyAsync D2H fail"); hipDeviceSynchronize(); } // ------------------------------------------------------------------------- // Write output: // ------------------------------------------------------------------------- void PFSips::writeOutput(int step) { // ----------------------------------- // Define the file location and name: // ----------------------------------- ofstream outfile; ofstream outfile2; stringstream filenamecombine; stringstream filenamecombine2; filenamecombine << "vtkoutput/c_" << step << ".vtk"; string filename = filenamecombine.str(); outfile.open(filename.c_str(), std::ios::out); // ----------------------------------- // Write the 'vtk' file header: // ----------------------------------- string d = " "; outfile << "# vtk DataFile Version 3.1" << endl; outfile << "VTK file containing grid data" << endl; outfile << "ASCII" << endl; outfile << " " << endl; outfile << "DATASET STRUCTURED_POINTS" << endl; outfile << "DIMENSIONS" << d << nx << d << ny << d << nz << endl; outfile << "ORIGIN " << d << 0 << d << 0 << d << 0 << endl; outfile << "SPACING" << d << 1.0 << d << 1.0 << d << 1.0 << endl; outfile << " " << endl; outfile << "POINT_DATA " << nxyz << endl; outfile << "SCALARS c float" << endl; outfile << "LOOKUP_TABLE default" << endl; // ----------------------------------- // Write the data: // NOTE: x-data increases fastest, // then y-data, then z-data // ----------------------------------- for(size_t k=0;k<nz;k++) for(size_t j=0;j<ny;j++) for(size_t i=0;i<nx;i++) { int id = nx*ny*k + nx*j + i; double point = c[id]; //if (point < 1e-10) point = 0.0; // making really small numbers == 0 outfile << point << endl; } // ----------------------------------- // Close the file: // ----------------------------------- outfile.close(); // vtkoutput for water // ----------------------------------- // Define the file location and name: // ----------------------------------- filenamecombine2 << "vtkoutput/w_" << step << ".vtk"; string filename2 = filenamecombine2.str(); outfile2.open(filename2.c_str(), std::ios::out); // ----------------------------------- // Write the 'vtk' file header: // ----------------------------------- outfile2 << "# vtk DataFile Version 3.1" << endl; outfile2 << "VTK file containing grid data" << endl; outfile2 << "ASCII" << endl; outfile2 << " " << endl; outfile2 << "DATASET STRUCTURED_POINTS" << endl; outfile2 << "DIMENSIONS" << d << nx << d << ny << d << nz << endl; outfile2 << "ORIGIN " << d << 0 << d << 0 << d << 0 << endl; outfile2 << "SPACING" << d << 1.0 << d << 1.0 << d << 1.0 << endl; outfile2 << " " << endl; outfile2 << "POINT_DATA " << nxyz << endl; outfile2 << "SCALARS w float" << endl; outfile2 << "LOOKUP_TABLE default" << endl; // ----------------------------------- // Write the data: // NOTE: x-data increases fastest, // then y-data, then z-data // ----------------------------------- for(size_t k=0;k<nz;k++) for(size_t j=0;j<ny;j++) for(size_t i=0;i<nx;i++) { int id = nx*ny*k + nx*j + i; double point = water[id]; // for paraview if (point < 1e-30) point = 0.0; // making really small numbers == 0 outfile2 << point << endl; } // ----------------------------------- // Close the file: // ----------------------------------- outfile2.close(); } // ------------------------------------------------------------------------- // Run unit tests for this App: // ------------------------------------------------------------------------- void PFSips::runUnitTests() { bool pass; pass = lapKernUnitTest(); if(pass) cout << "\t- lapKernUnitTest -------------- PASSED\n"; else cout << "\t- lapKernUnitTest -------------- FAILED\n"; } // ------------------------------------------------------------------------- // Unit tests for this App: // ------------------------------------------------------------------------- bool PFSips::lapKernUnitTest() { // 3X3X3 scalar field with ones except the central node double sf[27] = {1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1}; double solution[27] = {0,0,0,0,-1,0,0,0,0,0,-1,0,-1,6,-1,0,-1,0,0,0,0,0,-1,0,0,0,0}; // allocate space on device double* sf_d; hipMalloc((void**) &sf_d,27*sizeof(double)); cudaCheckErrors("hipMalloc fail"); // copy sf to device hipMemcpy(sf_d,sf,27*sizeof(double),hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy H2D fail"); // launch kernel dim3 grid(1,1,3); dim3 TpB(32,32,1); hipLaunchKernelGGL(( testLapSIPS), dim3(grid),dim3(TpB), 0, 0, sf_d,3,3,3,1.0,bx,by,bz); // copy data back to host hipMemcpy(sf,sf_d,27*sizeof(double),hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy D2H fail"); // print out results for(size_t i=0;i<27;i++) /* cout << "i=" << i << " sf=" << sf[i] << " sol=" << solution[i] << endl; */ if( sf[i] != solution[i]) { cout << "i=" << i << " sf=" << sf[i] << " sol=" << solution[i] << endl; return false; } return true; }
8eefaa4d95ce084b191ba527d1c5f2c15ede5c8d.cu
# include <iostream> // endl # include <fstream> // for ofstream # include <string> // for string # include <sstream> // for stringstream # include <math.h> # include "PFSips.h" # include "PFSipsKernels.h" # include "../utils/cudaErrorMacros.h" // for cudaCheckErrors & cudaCheckAsyncErrors using std::string; using std::stringstream; using std::cout; using std::endl; using std::ofstream; // ------------------------------------------------------------------------- // Constructor: // ------------------------------------------------------------------------- PFSips::PFSips(const GetPot& input_params) : rng(1234) { // --------------------------------------- // Assign variables from 'input_params': // --------------------------------------- nx = input_params("Domain/nx",1); ny = input_params("Domain/ny",1); nz = input_params("Domain/nz",1); nxyz = nx*ny*nz; dx = input_params("Domain/dx",1.0); dy = input_params("Domain/dy",1.0); dz = input_params("Domain/dz",1.0); dt = input_params("Time/dt",1.0); bx = input_params("PFSips/bx",0); by = input_params("PFSips/by",1); bz = input_params("PFSips/bz",1); numSteps = input_params("Time/nstep",1); co = input_params("PFSips/co",0.20); M = input_params("PFSips/M",1.0); mobReSize = input_params("PFSips/mobReSize",0.35); kap = input_params("PFSips/kap",1.0); water_CB = input_params("PFSips/water_CB",1.0); NS_in_dope = input_params("PFSips/NS_in_dope",0.0); mobReSize = input_params("PFSips/mobReSize",0.35); chiPS = input_params("PFSips/chiPS",0.034); chiPN = input_params("PFSips/chiPN",1.5); phiCutoff = input_params("PFSips/phiCutoff",0.75); N = input_params("PFSips/N",100.0); A = input_params("PFSips/A",1.0); Tinit = input_params("PFSips/Tinit",298.0); Tcast = input_params("PFSips/Tcast",298.0); noiseStr = input_params("PFSips/noiseStr",0.1); D0 = input_params("PFSips/D0",1.0); Dw = input_params("PFSips/Dw",1.0); nu = input_params("PFSips/nu",1.0); nuDw = input_params("PFSips/nuDw",1.0); gamma = input_params("PFSips/gamma",1.0); gammaDw = input_params("PFSips/gammaDw",1.0); Mweight = input_params("PFSips/Mweight",100.0); Mvolume = input_params("PFSips/Mvolume",0.1); numOutputs = input_params("Output/numOutputs",1); outInterval = numSteps/numOutputs; // --------------------------------------- // Set up cuda kernel launch variables: // --------------------------------------- blockSize.x = input_params("GPU/blockSize.x",0); blockSize.y = input_params("GPU/blockSize.y",0); blockSize.z = input_params("GPU/blockSize.z",0); // set default kernel launch parameters if(blockSize.x == 0) blockSize.x = 32; if(blockSize.y == 0) blockSize.y = 32; if(blockSize.z == 0) blockSize.z = 1; // calculate the number of blocks to be used (3-D block grid) int totalBlockSize = blockSize.x*blockSize.y*blockSize.z; blocks.x = (nx + blockSize.x - 1)/blockSize.x; blocks.y = (ny + blockSize.y - 1)/blockSize.y; blocks.z = (nz + blockSize.z - 1)/blockSize.z; // perform some assumption checking int numBlocks = blocks.x*blocks.y*blocks.z; int totalNumThreads = numBlocks*totalBlockSize; if(totalNumThreads < nxyz) throw "GPU Kernel Launch setup lacks sufficient threads!\n"; if(totalBlockSize > 1024) throw "Total number of threads per block exceeds 1024"; } // ------------------------------------------------------------------------- // Destructor: // ------------------------------------------------------------------------- PFSips::~PFSips() { // ---------------------------------------- // free up device memory: // ---------------------------------------- cudaFree(c_d); cudaFree(df_d); cudaFree(Mob_d); cudaFree(w_d); cudaFree(muNS_d); cudaFree(nonUniformLap_d); cudaFree(cpyBuff_d); cudaFree(devState); } // ------------------------------------------------------------------------- // Initialize system: // ------------------------------------------------------------------------- void PFSips::initSystem() { // ---------------------------------------- // Initialize concentration fields: // ---------------------------------------- srand(time(NULL)); // setting the seed double r = 0.0; for(size_t i=0;i<nxyz;i++) { r = (double)rand()/RAND_MAX; // initialize polymer phase c.push_back(co + 0.1*(r-0.5)); // initialize nonsolvent phase water.push_back(NS_in_dope); } // ---------------------------------------- // Allocate memory on device and copy data // and copy data from host to device // ---------------------------------------- // allocate memory on device size = nxyz*sizeof(double); // allocate polymer species cudaMalloc((void**) &c_d,size); cudaCheckErrors("cudaMalloc fail"); // allocate space for laplacian cudaMalloc((void**) &df_d,size); cudaCheckErrors("cudaMalloc fail"); // allocate water concentration cudaMalloc((void**) &w_d,size); cudaCheckErrors("cudaMalloc fail"); // allocate space for laplacian cudaMalloc((void**) &muNS_d,size); cudaCheckErrors("cudaMalloc fail"); // copy buffer cudaMalloc((void**) &cpyBuff_d,size); cudaCheckErrors("cudaMalloc fail"); // allocate mobility cudaMalloc((void**) &Mob_d,size); cudaCheckErrors("cudaMalloc fail"); // allocate nonuniform laplacian for mobility // and water diffusion coefficient cudaMalloc((void**) &nonUniformLap_d,size); cudaCheckErrors("cudaMalloc fail"); // allocate memory for cuRAND state cudaMalloc((void**) &devState,nxyz*sizeof(curandState)); cudaCheckErrors("cudaMalloc fail"); // copy concentration and water array to device cudaMemcpy(c_d,&c[0],size,cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy H2D fail"); cudaMemcpy(w_d,&water[0],size,cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy H2D fail"); // ---------------------------------------- // Initialize thermal fluctuations of // polymer concentration // ---------------------------------------- init_cuRAND<<<blocks,blockSize>>>(time(NULL),devState,nx,ny,nz); } // ------------------------------------------------------------------------- // Take one step forward in time: // ------------------------------------------------------------------------- void PFSips::computeInterval(int interval) { // ---------------------------------------- // Set the time step: // ---------------------------------------- current_step = interval*outInterval; // ---------------------------------------- // Evolve system by solving CH equation: // ---------------------------------------- for(size_t i=0;i<outInterval;i++) { // calculate the laplacian of c_d and store in df_d calculateLapBoundaries<<<blocks,blockSize>>>(c_d,df_d,nx,ny,nz,dx,bx,by,bz); cudaCheckAsyncErrors("calculateLap polymer kernel fail"); cudaDeviceSynchronize(); // calculate the chemical potential and store in df_d calculateChemPotFH<<<blocks,blockSize>>>(c_d,w_d,df_d,kap,A,chiPS,chiPN,N,nx,ny,nz,current_step,dt); cudaCheckAsyncErrors("calculateChemPotFH kernel fail"); cudaDeviceSynchronize(); // calculate mobility and store it in Mob_d calculateMobility<<<blocks,blockSize>>>(c_d,Mob_d,M,mobReSize,nx,ny,nz,phiCutoff,N,gamma,nu,D0,Mweight,Mvolume,Tcast); cudaCheckAsyncErrors("calculateMobility kernel fail"); cudaDeviceSynchronize(); // calculate the laplacian of the chemical potential, then update c_d // using an Euler update lapChemPotAndUpdateBoundaries<<<blocks,blockSize>>>(c_d,df_d,Mob_d,nonUniformLap_d, dt,nx,ny,nz,dx,bx,by,bz); cudaCheckAsyncErrors("lapChemPotAndUpdateBoundaries kernel fail"); cudaDeviceSynchronize(); // calculate mu for Nonsolvent diffusion calculate_muNS<<<blocks,blockSize>>>(w_d,c_d,muNS_d,Mob_d,Dw,water_CB,gammaDw,nuDw,Mweight,Mvolume,nx,ny,nz); cudaCheckAsyncErrors('calculate muNS kernel fail'); cudaDeviceSynchronize(); // calculate laplacian for diffusing water calculateLapBoundaries_muNS<<<blocks,blockSize>>>(df_d,muNS_d,nx,ny,nz,dx,bx,by,bz); cudaCheckAsyncErrors('calculateLap water kernel fail'); cudaDeviceSynchronize(); // calculate nonuniform laplacian for diffusion calculateNonUniformLapBoundaries_muNS<<<blocks,blockSize>>>(muNS_d,Mob_d,nonUniformLap_d,nx,ny,nz,dx,bx,by,bz); cudaCheckAsyncErrors('calculateNonUniformLap muNS kernel fail'); cudaDeviceSynchronize(); // euler update water diffusing update_water<<<blocks,blockSize>>>(w_d,df_d,Mob_d,nonUniformLap_d,dt,nx,ny,nz,dx,bx,by,bz); cudaCheckAsyncErrors("updateWater kernel fail"); cudaDeviceSynchronize(); // add thermal fluctuations of polymer concentration addNoise<<<blocks,blockSize>>>(c_d, nx, ny, nz, dt, current_step, water_CB, phiCutoff, devState); cudaCheckAsyncErrors("addNoise kernel fail"); cudaDeviceSynchronize(); } // ---------------------------------------- // Copy data back to host for writing: // ---------------------------------------- // polymer concentration populateCopyBufferSIPS<<<blocks,blockSize>>>(c_d,cpyBuff_d,nx,ny,nz); cudaMemcpyAsync(&c[0],c_d,size,cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpyAsync D2H fail"); cudaDeviceSynchronize(); // nonsolvent concentration populateCopyBufferSIPS<<<blocks,blockSize>>>(w_d,cpyBuff_d,nx,ny,nz); cudaMemcpyAsync(&water[0],w_d,size,cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpyAsync D2H fail"); cudaDeviceSynchronize(); } // ------------------------------------------------------------------------- // Write output: // ------------------------------------------------------------------------- void PFSips::writeOutput(int step) { // ----------------------------------- // Define the file location and name: // ----------------------------------- ofstream outfile; ofstream outfile2; stringstream filenamecombine; stringstream filenamecombine2; filenamecombine << "vtkoutput/c_" << step << ".vtk"; string filename = filenamecombine.str(); outfile.open(filename.c_str(), std::ios::out); // ----------------------------------- // Write the 'vtk' file header: // ----------------------------------- string d = " "; outfile << "# vtk DataFile Version 3.1" << endl; outfile << "VTK file containing grid data" << endl; outfile << "ASCII" << endl; outfile << " " << endl; outfile << "DATASET STRUCTURED_POINTS" << endl; outfile << "DIMENSIONS" << d << nx << d << ny << d << nz << endl; outfile << "ORIGIN " << d << 0 << d << 0 << d << 0 << endl; outfile << "SPACING" << d << 1.0 << d << 1.0 << d << 1.0 << endl; outfile << " " << endl; outfile << "POINT_DATA " << nxyz << endl; outfile << "SCALARS c float" << endl; outfile << "LOOKUP_TABLE default" << endl; // ----------------------------------- // Write the data: // NOTE: x-data increases fastest, // then y-data, then z-data // ----------------------------------- for(size_t k=0;k<nz;k++) for(size_t j=0;j<ny;j++) for(size_t i=0;i<nx;i++) { int id = nx*ny*k + nx*j + i; double point = c[id]; //if (point < 1e-10) point = 0.0; // making really small numbers == 0 outfile << point << endl; } // ----------------------------------- // Close the file: // ----------------------------------- outfile.close(); // vtkoutput for water // ----------------------------------- // Define the file location and name: // ----------------------------------- filenamecombine2 << "vtkoutput/w_" << step << ".vtk"; string filename2 = filenamecombine2.str(); outfile2.open(filename2.c_str(), std::ios::out); // ----------------------------------- // Write the 'vtk' file header: // ----------------------------------- outfile2 << "# vtk DataFile Version 3.1" << endl; outfile2 << "VTK file containing grid data" << endl; outfile2 << "ASCII" << endl; outfile2 << " " << endl; outfile2 << "DATASET STRUCTURED_POINTS" << endl; outfile2 << "DIMENSIONS" << d << nx << d << ny << d << nz << endl; outfile2 << "ORIGIN " << d << 0 << d << 0 << d << 0 << endl; outfile2 << "SPACING" << d << 1.0 << d << 1.0 << d << 1.0 << endl; outfile2 << " " << endl; outfile2 << "POINT_DATA " << nxyz << endl; outfile2 << "SCALARS w float" << endl; outfile2 << "LOOKUP_TABLE default" << endl; // ----------------------------------- // Write the data: // NOTE: x-data increases fastest, // then y-data, then z-data // ----------------------------------- for(size_t k=0;k<nz;k++) for(size_t j=0;j<ny;j++) for(size_t i=0;i<nx;i++) { int id = nx*ny*k + nx*j + i; double point = water[id]; // for paraview if (point < 1e-30) point = 0.0; // making really small numbers == 0 outfile2 << point << endl; } // ----------------------------------- // Close the file: // ----------------------------------- outfile2.close(); } // ------------------------------------------------------------------------- // Run unit tests for this App: // ------------------------------------------------------------------------- void PFSips::runUnitTests() { bool pass; pass = lapKernUnitTest(); if(pass) cout << "\t- lapKernUnitTest -------------- PASSED\n"; else cout << "\t- lapKernUnitTest -------------- FAILED\n"; } // ------------------------------------------------------------------------- // Unit tests for this App: // ------------------------------------------------------------------------- bool PFSips::lapKernUnitTest() { // 3X3X3 scalar field with ones except the central node double sf[27] = {1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1}; double solution[27] = {0,0,0,0,-1,0,0,0,0,0,-1,0,-1,6,-1,0,-1,0,0,0,0,0,-1,0,0,0,0}; // allocate space on device double* sf_d; cudaMalloc((void**) &sf_d,27*sizeof(double)); cudaCheckErrors("cudaMalloc fail"); // copy sf to device cudaMemcpy(sf_d,sf,27*sizeof(double),cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy H2D fail"); // launch kernel dim3 grid(1,1,3); dim3 TpB(32,32,1); testLapSIPS<<<grid,TpB>>>(sf_d,3,3,3,1.0,bx,by,bz); // copy data back to host cudaMemcpy(sf,sf_d,27*sizeof(double),cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy D2H fail"); // print out results for(size_t i=0;i<27;i++) /* cout << "i=" << i << " sf=" << sf[i] << " sol=" << solution[i] << endl; */ if( sf[i] != solution[i]) { cout << "i=" << i << " sf=" << sf[i] << " sol=" << solution[i] << endl; return false; } return true; }
ea2dc4b97d9a3da940f256d4751e31d24f0ff2df.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <time.h> #ifndef Size #define Size 1000 #endif #define b 4 void metric_mul_gold(int A[Size][Size], int B[Size][Size], int C[Size][Size]) { int i,j,k; for(i=0; i<Size; i++) for(j=0; j<Size; j++) for(k=0; k<Size; k++) C[i][j] += A[i][k]*B[k][j]; return; } void metric_mul(int A[Size][Size], int B[Size][Size], int C[Size][Size]); int main(void) { int i, j, k; int size = sizeof(int) * Size * Size; int *aptr, *bptr, *cptr; int *host_A, *host_B, *host_C; srand(time(NULL)); host_A = (int *)malloc(size); host_B = (int *)malloc(size); host_C = (int *)malloc(size); aptr = host_A; bptr = host_B; cptr = host_C; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) { *aptr++ = rand() % 10; *bptr++ = rand() % 10; *cptr++ = 0; } int *gold_C; gold_C = (int *)malloc(size); metric_mul_gold((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])gold_C); hipEvent_t start_time, stop_time; float exectime; hipEventCreate(&start_time); hipEventCreate(&stop_time); hipEventRecord(start_time, 0); metric_mul((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])host_C); hipEventRecord(stop_time, 0); hipEventSynchronize(stop_time); hipEventElapsedTime(&exectime, start_time, stop_time); printf("real %f ms\n ", exectime); hipEventDestroy(start_time); hipEventDestroy(stop_time); //check result if (!memcmp(host_C, gold_C, size)) printf("AC!\n"); else printf("Failed!\n"); /*k = 0; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) printf("host_C[%d][%d] = %d\n", i, j, host_C[k++]);*/ free(host_A); free(host_B); free(host_C); free(gold_C); return 0; }
ea2dc4b97d9a3da940f256d4751e31d24f0ff2df.cu
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <time.h> #ifndef Size #define Size 1000 #endif #define b 4 void metric_mul_gold(int A[Size][Size], int B[Size][Size], int C[Size][Size]) { int i,j,k; for(i=0; i<Size; i++) for(j=0; j<Size; j++) for(k=0; k<Size; k++) C[i][j] += A[i][k]*B[k][j]; return; } void metric_mul(int A[Size][Size], int B[Size][Size], int C[Size][Size]); int main(void) { int i, j, k; int size = sizeof(int) * Size * Size; int *aptr, *bptr, *cptr; int *host_A, *host_B, *host_C; srand(time(NULL)); host_A = (int *)malloc(size); host_B = (int *)malloc(size); host_C = (int *)malloc(size); aptr = host_A; bptr = host_B; cptr = host_C; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) { *aptr++ = rand() % 10; *bptr++ = rand() % 10; *cptr++ = 0; } int *gold_C; gold_C = (int *)malloc(size); metric_mul_gold((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])gold_C); cudaEvent_t start_time, stop_time; float exectime; cudaEventCreate(&start_time); cudaEventCreate(&stop_time); cudaEventRecord(start_time, 0); metric_mul((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])host_C); cudaEventRecord(stop_time, 0); cudaEventSynchronize(stop_time); cudaEventElapsedTime(&exectime, start_time, stop_time); printf("real %f ms\n ", exectime); cudaEventDestroy(start_time); cudaEventDestroy(stop_time); //check result if (!memcmp(host_C, gold_C, size)) printf("AC!\n"); else printf("Failed!\n"); /*k = 0; for (i = 0; i < Size; i++) for (j = 0; j < Size; j++) printf("host_C[%d][%d] = %d\n", i, j, host_C[k++]);*/ free(host_A); free(host_B); free(host_C); free(gold_C); return 0; }
2ec6d2d3784e19dd6dd7f204137812035f0d7efd.hip
// !!! This is a file automatically generated by hipify!!! #ifndef VIENNACL_LINALG_CUDA_SCALAR_OPERATIONS_HPP_ #define VIENNACL_LINALG_CUDA_SCALAR_OPERATIONS_HPP_ /* ========================================================================= Copyright (c) 2010-2016, Institute for Microelectronics, Institute for Analysis and Scientific Computing, TU Wien. Portions of this software are copyright by UChicago Argonne, LLC. ----------------- ViennaCL - The Vienna Computing Library ----------------- Project Head: Karl Rupp rupp@iue.tuwien.ac.at (A list of authors and contributors can be found in the manual) License: MIT (X11), see file LICENSE in the base directory ============================================================================= */ /** @file viennacl/linalg/cuda/scalar_operations.hpp @brief Implementations of scalar operations using CUDA */ #include "viennacl/forwards.h" #include "viennacl/tools/tools.hpp" #include "viennacl/meta/predicate.hpp" #include "viennacl/meta/enable_if.hpp" #include "viennacl/traits/size.hpp" #include "viennacl/traits/start.hpp" #include "viennacl/traits/stride.hpp" #include "viennacl/linalg/cuda/common.cu" // includes CUDA #include <hip/hip_runtime.h> namespace viennacl { namespace linalg { namespace cuda { /////////////////// as ///////////////////////////// template<typename NumericT> __global__ void as_kernel(NumericT * s1, const NumericT * fac2, unsigned int options2, const NumericT * s2) { NumericT alpha = *fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; *s1 = *s2 * alpha; } template<typename NumericT> __global__ void as_kernel(NumericT * s1, NumericT fac2, unsigned int options2, const NumericT * s2) { NumericT alpha = fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; *s1 = *s2 * alpha; } template<typename ScalarT1, typename ScalarT2, typename NumericT> typename viennacl::enable_if< viennacl::is_scalar<ScalarT1>::value && viennacl::is_scalar<ScalarT2>::value && viennacl::is_any_scalar<NumericT>::value >::type as(ScalarT1 & s1, ScalarT2 const & s2, NumericT const & alpha, vcl_size_t len_alpha, bool reciprocal_alpha, bool flip_sign_alpha) { typedef typename viennacl::result_of::cpu_value_type<ScalarT1>::type value_type; unsigned int options_alpha = detail::make_options(len_alpha, reciprocal_alpha, flip_sign_alpha); value_type temporary_alpha = 0; if (viennacl::is_cpu_scalar<NumericT>::value) temporary_alpha = alpha; hipLaunchKernelGGL(( as_kernel), dim3(1), dim3(1), 0, 0, viennacl::cuda_arg(s1), viennacl::cuda_arg<value_type>(detail::arg_reference(alpha, temporary_alpha)), options_alpha, viennacl::cuda_arg(s2)); VIENNACL_CUDA_LAST_ERROR_CHECK("as_kernel"); } //////////////////// asbs //////////////////////////// // alpha and beta on GPU template<typename NumericT> __global__ void asbs_kernel(NumericT * s1, const NumericT * fac2, unsigned int options2, const NumericT * s2, const NumericT * fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = *fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = *fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 = *s2 * alpha + *s3 * beta; } // alpha on CPU, beta on GPU template<typename NumericT> __global__ void asbs_kernel(NumericT * s1, NumericT fac2, unsigned int options2, const NumericT * s2, NumericT const * fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = *fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 = *s2 * alpha + *s3 * beta; } // alpha on GPU, beta on CPU template<typename NumericT> __global__ void asbs_kernel(NumericT * s1, NumericT const * fac2, unsigned int options2, const NumericT * s2, NumericT fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = *fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 = *s2 * alpha + *s3 * beta; } // alpha and beta on CPU template<typename NumericT> __global__ void asbs_kernel(NumericT * s1, NumericT fac2, unsigned int options2, const NumericT * s2, NumericT fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 = *s2 * alpha + *s3 * beta; } template<typename ScalarT1, typename ScalarT2, typename NumericT1, typename ScalarT3, typename NumericT2> typename viennacl::enable_if< viennacl::is_scalar<ScalarT1>::value && viennacl::is_scalar<ScalarT2>::value && viennacl::is_scalar<ScalarT3>::value && viennacl::is_any_scalar<NumericT1>::value && viennacl::is_any_scalar<NumericT2>::value >::type asbs(ScalarT1 & s1, ScalarT2 const & s2, NumericT1 const & alpha, vcl_size_t len_alpha, bool reciprocal_alpha, bool flip_sign_alpha, ScalarT3 const & s3, NumericT2 const & beta, vcl_size_t len_beta, bool reciprocal_beta, bool flip_sign_beta) { typedef typename viennacl::result_of::cpu_value_type<ScalarT1>::type value_type; unsigned int options_alpha = detail::make_options(len_alpha, reciprocal_alpha, flip_sign_alpha); unsigned int options_beta = detail::make_options(len_beta, reciprocal_beta, flip_sign_beta); value_type temporary_alpha = 0; if (viennacl::is_cpu_scalar<NumericT1>::value) temporary_alpha = alpha; value_type temporary_beta = 0; if (viennacl::is_cpu_scalar<NumericT2>::value) temporary_beta = beta; hipLaunchKernelGGL(( asbs_kernel), dim3(1), dim3(1), 0, 0, viennacl::cuda_arg(s1), viennacl::cuda_arg<value_type>(detail::arg_reference(alpha, temporary_alpha)), options_alpha, viennacl::cuda_arg(s2), viennacl::cuda_arg<value_type>(detail::arg_reference(beta, temporary_beta)), options_beta, viennacl::cuda_arg(s3) ); VIENNACL_CUDA_LAST_ERROR_CHECK("asbs_kernel"); } //////////////////// asbs_s //////////////////// // alpha and beta on GPU template<typename NumericT> __global__ void asbs_s_kernel(NumericT * s1, const NumericT * fac2, unsigned int options2, const NumericT * s2, const NumericT * fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = *fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = *fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 += *s2 * alpha + *s3 * beta; } // alpha on CPU, beta on GPU template<typename NumericT> __global__ void asbs_s_kernel(NumericT * s1, NumericT fac2, unsigned int options2, const NumericT * s2, NumericT const * fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = *fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 += *s2 * alpha + *s3 * beta; } // alpha on GPU, beta on CPU template<typename NumericT> __global__ void asbs_s_kernel(NumericT * s1, NumericT const * fac2, unsigned int options2, const NumericT * s2, NumericT fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = *fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 += *s2 * alpha + *s3 * beta; } // alpha and beta on CPU template<typename NumericT> __global__ void asbs_s_kernel(NumericT * s1, NumericT fac2, unsigned int options2, const NumericT * s2, NumericT fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 += *s2 * alpha + *s3 * beta; } template<typename ScalarT1, typename ScalarT2, typename NumericT1, typename ScalarT3, typename NumericT2> typename viennacl::enable_if< viennacl::is_scalar<ScalarT1>::value && viennacl::is_scalar<ScalarT2>::value && viennacl::is_scalar<ScalarT3>::value && viennacl::is_any_scalar<NumericT1>::value && viennacl::is_any_scalar<NumericT2>::value >::type asbs_s(ScalarT1 & s1, ScalarT2 const & s2, NumericT1 const & alpha, vcl_size_t len_alpha, bool reciprocal_alpha, bool flip_sign_alpha, ScalarT3 const & s3, NumericT2 const & beta, vcl_size_t len_beta, bool reciprocal_beta, bool flip_sign_beta) { typedef typename viennacl::result_of::cpu_value_type<ScalarT1>::type value_type; unsigned int options_alpha = detail::make_options(len_alpha, reciprocal_alpha, flip_sign_alpha); unsigned int options_beta = detail::make_options(len_beta, reciprocal_beta, flip_sign_beta); value_type temporary_alpha = 0; if (viennacl::is_cpu_scalar<NumericT1>::value) temporary_alpha = alpha; value_type temporary_beta = 0; if (viennacl::is_cpu_scalar<NumericT2>::value) temporary_beta = beta; std::cout << "Launching asbs_s_kernel..." << std::endl; hipLaunchKernelGGL(( asbs_s_kernel), dim3(1), dim3(1), 0, 0, viennacl::cuda_arg(s1), viennacl::cuda_arg<value_type>(detail::arg_reference(alpha, temporary_alpha)), options_alpha, viennacl::cuda_arg(s2), viennacl::cuda_arg<value_type>(detail::arg_reference(beta, temporary_beta)), options_beta, viennacl::cuda_arg(s3) ); VIENNACL_CUDA_LAST_ERROR_CHECK("asbs_s_kernel"); } ///////////////// swap ////////////////// template<typename NumericT> __global__ void scalar_swap_kernel(NumericT * s1, NumericT * s2) { NumericT tmp = *s2; *s2 = *s1; *s1 = tmp; } /** @brief Swaps the contents of two scalars, data is copied * * @param s1 The first scalar * @param s2 The second scalar */ template<typename ScalarT1, typename ScalarT2> typename viennacl::enable_if< viennacl::is_scalar<ScalarT1>::value && viennacl::is_scalar<ScalarT2>::value >::type swap(ScalarT1 & s1, ScalarT2 & s2) { typedef typename viennacl::result_of::cpu_value_type<ScalarT1>::type value_type; hipLaunchKernelGGL(( scalar_swap_kernel), dim3(1), dim3(1), 0, 0, viennacl::cuda_arg(s1), viennacl::cuda_arg(s2)); } } //namespace single_threaded } //namespace linalg } //namespace viennacl #endif
2ec6d2d3784e19dd6dd7f204137812035f0d7efd.cu
#ifndef VIENNACL_LINALG_CUDA_SCALAR_OPERATIONS_HPP_ #define VIENNACL_LINALG_CUDA_SCALAR_OPERATIONS_HPP_ /* ========================================================================= Copyright (c) 2010-2016, Institute for Microelectronics, Institute for Analysis and Scientific Computing, TU Wien. Portions of this software are copyright by UChicago Argonne, LLC. ----------------- ViennaCL - The Vienna Computing Library ----------------- Project Head: Karl Rupp rupp@iue.tuwien.ac.at (A list of authors and contributors can be found in the manual) License: MIT (X11), see file LICENSE in the base directory ============================================================================= */ /** @file viennacl/linalg/cuda/scalar_operations.hpp @brief Implementations of scalar operations using CUDA */ #include "viennacl/forwards.h" #include "viennacl/tools/tools.hpp" #include "viennacl/meta/predicate.hpp" #include "viennacl/meta/enable_if.hpp" #include "viennacl/traits/size.hpp" #include "viennacl/traits/start.hpp" #include "viennacl/traits/stride.hpp" #include "viennacl/linalg/cuda/common.cu" // includes CUDA #include <cuda_runtime.h> namespace viennacl { namespace linalg { namespace cuda { /////////////////// as ///////////////////////////// template<typename NumericT> __global__ void as_kernel(NumericT * s1, const NumericT * fac2, unsigned int options2, const NumericT * s2) { NumericT alpha = *fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; *s1 = *s2 * alpha; } template<typename NumericT> __global__ void as_kernel(NumericT * s1, NumericT fac2, unsigned int options2, const NumericT * s2) { NumericT alpha = fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; *s1 = *s2 * alpha; } template<typename ScalarT1, typename ScalarT2, typename NumericT> typename viennacl::enable_if< viennacl::is_scalar<ScalarT1>::value && viennacl::is_scalar<ScalarT2>::value && viennacl::is_any_scalar<NumericT>::value >::type as(ScalarT1 & s1, ScalarT2 const & s2, NumericT const & alpha, vcl_size_t len_alpha, bool reciprocal_alpha, bool flip_sign_alpha) { typedef typename viennacl::result_of::cpu_value_type<ScalarT1>::type value_type; unsigned int options_alpha = detail::make_options(len_alpha, reciprocal_alpha, flip_sign_alpha); value_type temporary_alpha = 0; if (viennacl::is_cpu_scalar<NumericT>::value) temporary_alpha = alpha; as_kernel<<<1, 1>>>(viennacl::cuda_arg(s1), viennacl::cuda_arg<value_type>(detail::arg_reference(alpha, temporary_alpha)), options_alpha, viennacl::cuda_arg(s2)); VIENNACL_CUDA_LAST_ERROR_CHECK("as_kernel"); } //////////////////// asbs //////////////////////////// // alpha and beta on GPU template<typename NumericT> __global__ void asbs_kernel(NumericT * s1, const NumericT * fac2, unsigned int options2, const NumericT * s2, const NumericT * fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = *fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = *fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 = *s2 * alpha + *s3 * beta; } // alpha on CPU, beta on GPU template<typename NumericT> __global__ void asbs_kernel(NumericT * s1, NumericT fac2, unsigned int options2, const NumericT * s2, NumericT const * fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = *fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 = *s2 * alpha + *s3 * beta; } // alpha on GPU, beta on CPU template<typename NumericT> __global__ void asbs_kernel(NumericT * s1, NumericT const * fac2, unsigned int options2, const NumericT * s2, NumericT fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = *fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 = *s2 * alpha + *s3 * beta; } // alpha and beta on CPU template<typename NumericT> __global__ void asbs_kernel(NumericT * s1, NumericT fac2, unsigned int options2, const NumericT * s2, NumericT fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 = *s2 * alpha + *s3 * beta; } template<typename ScalarT1, typename ScalarT2, typename NumericT1, typename ScalarT3, typename NumericT2> typename viennacl::enable_if< viennacl::is_scalar<ScalarT1>::value && viennacl::is_scalar<ScalarT2>::value && viennacl::is_scalar<ScalarT3>::value && viennacl::is_any_scalar<NumericT1>::value && viennacl::is_any_scalar<NumericT2>::value >::type asbs(ScalarT1 & s1, ScalarT2 const & s2, NumericT1 const & alpha, vcl_size_t len_alpha, bool reciprocal_alpha, bool flip_sign_alpha, ScalarT3 const & s3, NumericT2 const & beta, vcl_size_t len_beta, bool reciprocal_beta, bool flip_sign_beta) { typedef typename viennacl::result_of::cpu_value_type<ScalarT1>::type value_type; unsigned int options_alpha = detail::make_options(len_alpha, reciprocal_alpha, flip_sign_alpha); unsigned int options_beta = detail::make_options(len_beta, reciprocal_beta, flip_sign_beta); value_type temporary_alpha = 0; if (viennacl::is_cpu_scalar<NumericT1>::value) temporary_alpha = alpha; value_type temporary_beta = 0; if (viennacl::is_cpu_scalar<NumericT2>::value) temporary_beta = beta; asbs_kernel<<<1, 1>>>(viennacl::cuda_arg(s1), viennacl::cuda_arg<value_type>(detail::arg_reference(alpha, temporary_alpha)), options_alpha, viennacl::cuda_arg(s2), viennacl::cuda_arg<value_type>(detail::arg_reference(beta, temporary_beta)), options_beta, viennacl::cuda_arg(s3) ); VIENNACL_CUDA_LAST_ERROR_CHECK("asbs_kernel"); } //////////////////// asbs_s //////////////////// // alpha and beta on GPU template<typename NumericT> __global__ void asbs_s_kernel(NumericT * s1, const NumericT * fac2, unsigned int options2, const NumericT * s2, const NumericT * fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = *fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = *fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 += *s2 * alpha + *s3 * beta; } // alpha on CPU, beta on GPU template<typename NumericT> __global__ void asbs_s_kernel(NumericT * s1, NumericT fac2, unsigned int options2, const NumericT * s2, NumericT const * fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = *fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 += *s2 * alpha + *s3 * beta; } // alpha on GPU, beta on CPU template<typename NumericT> __global__ void asbs_s_kernel(NumericT * s1, NumericT const * fac2, unsigned int options2, const NumericT * s2, NumericT fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = *fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 += *s2 * alpha + *s3 * beta; } // alpha and beta on CPU template<typename NumericT> __global__ void asbs_s_kernel(NumericT * s1, NumericT fac2, unsigned int options2, const NumericT * s2, NumericT fac3, unsigned int options3, const NumericT * s3) { NumericT alpha = fac2; if (options2 & (1 << 0)) alpha = -alpha; if (options2 & (1 << 1)) alpha = NumericT(1) / alpha; NumericT beta = fac3; if (options3 & (1 << 0)) beta = -beta; if (options3 & (1 << 1)) beta = NumericT(1) / beta; *s1 += *s2 * alpha + *s3 * beta; } template<typename ScalarT1, typename ScalarT2, typename NumericT1, typename ScalarT3, typename NumericT2> typename viennacl::enable_if< viennacl::is_scalar<ScalarT1>::value && viennacl::is_scalar<ScalarT2>::value && viennacl::is_scalar<ScalarT3>::value && viennacl::is_any_scalar<NumericT1>::value && viennacl::is_any_scalar<NumericT2>::value >::type asbs_s(ScalarT1 & s1, ScalarT2 const & s2, NumericT1 const & alpha, vcl_size_t len_alpha, bool reciprocal_alpha, bool flip_sign_alpha, ScalarT3 const & s3, NumericT2 const & beta, vcl_size_t len_beta, bool reciprocal_beta, bool flip_sign_beta) { typedef typename viennacl::result_of::cpu_value_type<ScalarT1>::type value_type; unsigned int options_alpha = detail::make_options(len_alpha, reciprocal_alpha, flip_sign_alpha); unsigned int options_beta = detail::make_options(len_beta, reciprocal_beta, flip_sign_beta); value_type temporary_alpha = 0; if (viennacl::is_cpu_scalar<NumericT1>::value) temporary_alpha = alpha; value_type temporary_beta = 0; if (viennacl::is_cpu_scalar<NumericT2>::value) temporary_beta = beta; std::cout << "Launching asbs_s_kernel..." << std::endl; asbs_s_kernel<<<1, 1>>>(viennacl::cuda_arg(s1), viennacl::cuda_arg<value_type>(detail::arg_reference(alpha, temporary_alpha)), options_alpha, viennacl::cuda_arg(s2), viennacl::cuda_arg<value_type>(detail::arg_reference(beta, temporary_beta)), options_beta, viennacl::cuda_arg(s3) ); VIENNACL_CUDA_LAST_ERROR_CHECK("asbs_s_kernel"); } ///////////////// swap ////////////////// template<typename NumericT> __global__ void scalar_swap_kernel(NumericT * s1, NumericT * s2) { NumericT tmp = *s2; *s2 = *s1; *s1 = tmp; } /** @brief Swaps the contents of two scalars, data is copied * * @param s1 The first scalar * @param s2 The second scalar */ template<typename ScalarT1, typename ScalarT2> typename viennacl::enable_if< viennacl::is_scalar<ScalarT1>::value && viennacl::is_scalar<ScalarT2>::value >::type swap(ScalarT1 & s1, ScalarT2 & s2) { typedef typename viennacl::result_of::cpu_value_type<ScalarT1>::type value_type; scalar_swap_kernel<<<1, 1>>>(viennacl::cuda_arg(s1), viennacl::cuda_arg(s2)); } } //namespace single_threaded } //namespace linalg } //namespace viennacl #endif
2ea5b599f98e0dc1538f40eb2d88a4d34d366c15.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void coalesced2(float *A, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x)*2; if (i+1 < N) { C[i] = A[i]; C[i+1] = A[i+1];} }
2ea5b599f98e0dc1538f40eb2d88a4d34d366c15.cu
#include "includes.h" __global__ void coalesced2(float *A, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x)*2; if (i+1 < N) { C[i] = A[i]; C[i+1] = A[i+1];} }
43baac580b429356cea0c44545da6eb9340f02a7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> void f(double *res, double *vec1, double *vec2, int n) { int i = 0; for(i = 0; i < n; i++) res[i] = vec1[i] + vec2[i]; } int main() { int i, n; scanf("%d", &n); double *res = (double *)malloc(sizeof(double) * n); double *vec1 = (double *)malloc(sizeof(double) * n); double *vec2 = (double *)malloc(sizeof(double) * n); for(i = 0; i < n; i++) scanf("%lf", &vec1[i]); for(i = 0; i < n; i++) scanf("%lf", &vec2[i]); hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); f(res, vec1, vec2, n); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); fprintf(stderr, "time = %f\n", time); hipEventDestroy(stop); hipEventDestroy(start); // for(i = 0; i < n; i++) // printf("%f ", res[i]); // printf("\n"); free(res); free(vec1); free(vec2); return 0; }
43baac580b429356cea0c44545da6eb9340f02a7.cu
#include <stdio.h> #include <stdlib.h> void f(double *res, double *vec1, double *vec2, int n) { int i = 0; for(i = 0; i < n; i++) res[i] = vec1[i] + vec2[i]; } int main() { int i, n; scanf("%d", &n); double *res = (double *)malloc(sizeof(double) * n); double *vec1 = (double *)malloc(sizeof(double) * n); double *vec2 = (double *)malloc(sizeof(double) * n); for(i = 0; i < n; i++) scanf("%lf", &vec1[i]); for(i = 0; i < n; i++) scanf("%lf", &vec2[i]); cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); f(res, vec1, vec2, n); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); fprintf(stderr, "time = %f\n", time); cudaEventDestroy(stop); cudaEventDestroy(start); // for(i = 0; i < n; i++) // printf("%f ", res[i]); // printf("\n"); free(res); free(vec1); free(vec2); return 0; }
1dd465703510c3da3ca81da03b26c56ac9adcf67.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020 Savely Pototsky (SavaLione) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Based on: */ /****************************************************************************** * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * * Code and text by Sean Baxter, NVIDIA Research * See http://nvlabs.github.io/moderngpu for repository and documentation. * ******************************************************************************/ #include <newmoderngpu/moderngpu.cuh> using namespace mgpu; template<int NT, bool HasValues, typename InputIt1, typename InputIt2, typename AValsIt, typename OutputIt, typename ValsIt, typename Comp> __global__ void ParallelMergeA(InputIt1 a_global, AValsIt aVals_global, int aCount, InputIt2 b_global, int bCount, OutputIt dest_global, ValsIt vals_global, Comp comp) { typedef typename std::iterator_traits<InputIt1>::value_type T; int gid = threadIdx.x + NT * blockIdx.x; if(gid < aCount) { T aKey = a_global[gid]; int lb = BinarySearch<MgpuBoundsLower>(b_global, bCount, aKey, comp); dest_global[gid + lb] = aKey; if(HasValues) vals_global[gid + lb] = aVals_global[gid]; } } template<int NT, bool HasValues, typename InputIt1, typename InputIt2, typename BValsIt, typename OutputIt, typename ValsIt, typename Comp> __global__ void ParallelMergeB(InputIt1 a_global, int aCount, InputIt2 b_global, BValsIt bVals_global, int bCount, OutputIt dest_global, ValsIt vals_global, Comp comp) { typedef typename std::iterator_traits<InputIt2>::value_type T; int gid = threadIdx.x + NT * blockIdx.x; if(gid < bCount) { T bKey = b_global[gid]; int ub = BinarySearch<MgpuBoundsUpper>(a_global, aCount, bKey, comp); dest_global[gid + ub] = bKey; if(HasValues) vals_global[gid + ub] = bVals_global[gid]; } } template<typename InputIt1, typename InputIt2, typename OutputIt, typename Comp> void ParallelMergeKeys(InputIt1 a_global, int aCount, InputIt2 b_global, int bCount, OutputIt dest_global, Comp comp, CudaContext& context) { // NOTE: With NT = 512, limit of 33553920 in either array for Fermi arch. const int NT = 512; int aBlocks = MGPU_DIV_UP(aCount, NT); int bBlocks = MGPU_DIV_UP(bCount, NT); hipLaunchKernelGGL(( ParallelMergeA<NT, false>), dim3(aBlocks), dim3(NT), 0, 0, a_global, (const int*)0, aCount, b_global, bCount, dest_global, (int*)0, comp); hipLaunchKernelGGL(( ParallelMergeB<NT, false>), dim3(bBlocks), dim3(NT), 0, 0, a_global, aCount, b_global, (const int*)0, bCount, dest_global, (int*)0, comp); } template<typename InputIt1, typename InputIt2, typename AValsIt, typename BValsIt, typename OutputIt, typename ValsIt, typename Comp> void ParallelMergePairs(InputIt1 a_global, AValsIt aVals_global, int aCount, InputIt2 b_global, BValsIt bVals_global, int bCount, OutputIt dest_global, ValsIt vals_global, Comp comp, CudaContext& context) { // NOTE: With NT = 512, limit of 33553920 in either array for Fermi arch. const int NT = 512; int aBlocks = MGPU_DIV_UP(aCount, NT); int bBlocks = MGPU_DIV_UP(bCount, NT); hipLaunchKernelGGL(( ParallelMergeA<NT, true>), dim3(aBlocks), dim3(NT), 0, 0, a_global, aVals_global, aCount, b_global, bCount, dest_global, vals_global, comp); hipLaunchKernelGGL(( ParallelMergeB<NT, true>), dim3(bBlocks), dim3(NT), 0, 0, a_global, aCount, b_global, bVals_global, bCount, dest_global, vals_global, comp); } //////////////////////////////////////////////////////////////////////////////// // Benchmark template<typename T> void BenchmarkMergeKeysNaive(int count, int numIt, CudaContext& context) { int aCount = count / 2; int bCount = count - aCount; MGPU_MEM(T) a = context.SortRandom<T>(aCount, 0, (T)count); MGPU_MEM(T) b = context.SortRandom<T>(bCount, 0, (T)count); MGPU_MEM(T) c = context.Malloc<T>(count); std::vector<T> aHost, bHost; a->ToHost(aHost); b->ToHost(bHost); std::vector<T> cHost(count); // Benchmark MGPU context.Start(); for(int it = 0; it < numIt; ++it) ParallelMergeKeys(a->get(), aCount, b->get(), bCount, c->get(), mgpu::less<T>(), context); double naiveElapsed = context.Split(); // Verify std::merge(aHost.begin(), aHost.end(), bHost.begin(), bHost.end(), cHost.begin()); // Compare naive to STL. std::vector<T> cHost2; c->ToHost(cHost2); for(int i = 0; i < count; ++i) if(cHost[i] != cHost2[i]) { printf("MERGE ERROR AT COUNT = %d ITEM = %d\n", count, i); exit(0); } double bytes = 2 * sizeof(T) * count; double naiveThroughput = count * numIt / naiveElapsed; double naiveBandwidth = bytes * numIt / naiveElapsed; printf("%s: %9.3lf M/s %7.3lf GB/s \n", FormatInteger(count).c_str(), naiveThroughput / 1.0e6, naiveBandwidth / 1.0e9); } template<typename T> void BenchmarkMergePairsNaive(int count, int numIt, CudaContext& context) { int aCount = count / 2; int bCount = count - aCount; MGPU_MEM(T) a = context.SortRandom<T>(aCount, 0, (T)count); MGPU_MEM(T) b = context.SortRandom<T>(bCount, 0, (T)count); MGPU_MEM(T) aVals = context.FillAscending<T>(aCount, 0, 1); MGPU_MEM(T) bVals = context.FillAscending<T>(bCount, aCount, 1); MGPU_MEM(T) c = context.Malloc<T>(count); MGPU_MEM(T) cVals = context.Malloc<T>(count); std::vector<T> aHost, bHost; a->ToHost(aHost); b->ToHost(bHost); std::vector<T> cHost(count); // Benchmark MGPU context.Start(); for(int it = 0; it < numIt; ++it) ParallelMergePairs(a->get(), aVals->get(), aCount, b->get(), bVals->get(), bCount, c->get(), cVals->get(), mgpu::less<T>(), context); double naiveElapsed = context.Split(); // Verify std::merge(aHost.begin(), aHost.end(), bHost.begin(), bHost.end(), cHost.begin()); // Compare naive to STL. std::vector<T> cHost2; c->ToHost(cHost2); for(int i = 0; i < count; ++i) if(cHost[i] != cHost2[i]) { printf("MERGE ERROR AT COUNT = %d ITEM = %d\n", count, i); exit(0); } double bytes = 4 * sizeof(T) * count; double naiveThroughput = count * numIt / naiveElapsed; double naiveBandwidth = bytes * numIt / naiveElapsed; printf("%s: %9.3lf M/s %7.3lf GB/s \n", FormatInteger(count).c_str(), naiveThroughput / 1.0e6, naiveBandwidth / 1.0e9); } const int Tests[][2] = { { 10000, 1000 }, { 50000, 1000 }, { 100000, 1000 }, { 200000, 500 }, { 500000, 200 }, { 1000000, 200 }, { 2000000, 200 }, { 5000000, 200 }, { 10000000, 100 }, { 20000000, 100 } }; const int NumTests = sizeof(Tests) / sizeof(*Tests); int main(int argc, char** argv) { ContextPtr context = CreateCudaDevice(argc, argv, true); typedef int T1; typedef int64 T2; printf("Benchmarking merge-keys on type %s.\n", TypeIdName<T1>()); for(int test = 0; test < NumTests; ++test) BenchmarkMergeKeysNaive<T1>(Tests[test][0], Tests[test][1], *context); printf("Benchmarking merge-pairs on type %s.\n", TypeIdName<T1>()); for(int test = 0; test < NumTests; ++test) BenchmarkMergePairsNaive<T1>(Tests[test][0], Tests[test][1], *context); // printf("\nBenchmarking merge-keys on type %s.\n", TypeIdName<T2>()); // for(int test = 0; test < NumTests; ++test) // BenchmarkMergeKeysNaive<T2>(Tests[test][0], Tests[test][1], *context); return 0; }
1dd465703510c3da3ca81da03b26c56ac9adcf67.cu
/* * Copyright (c) 2020 Savely Pototsky (SavaLione) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Based on: */ /****************************************************************************** * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * * Code and text by Sean Baxter, NVIDIA Research * See http://nvlabs.github.io/moderngpu for repository and documentation. * ******************************************************************************/ #include <newmoderngpu/moderngpu.cuh> using namespace mgpu; template<int NT, bool HasValues, typename InputIt1, typename InputIt2, typename AValsIt, typename OutputIt, typename ValsIt, typename Comp> __global__ void ParallelMergeA(InputIt1 a_global, AValsIt aVals_global, int aCount, InputIt2 b_global, int bCount, OutputIt dest_global, ValsIt vals_global, Comp comp) { typedef typename std::iterator_traits<InputIt1>::value_type T; int gid = threadIdx.x + NT * blockIdx.x; if(gid < aCount) { T aKey = a_global[gid]; int lb = BinarySearch<MgpuBoundsLower>(b_global, bCount, aKey, comp); dest_global[gid + lb] = aKey; if(HasValues) vals_global[gid + lb] = aVals_global[gid]; } } template<int NT, bool HasValues, typename InputIt1, typename InputIt2, typename BValsIt, typename OutputIt, typename ValsIt, typename Comp> __global__ void ParallelMergeB(InputIt1 a_global, int aCount, InputIt2 b_global, BValsIt bVals_global, int bCount, OutputIt dest_global, ValsIt vals_global, Comp comp) { typedef typename std::iterator_traits<InputIt2>::value_type T; int gid = threadIdx.x + NT * blockIdx.x; if(gid < bCount) { T bKey = b_global[gid]; int ub = BinarySearch<MgpuBoundsUpper>(a_global, aCount, bKey, comp); dest_global[gid + ub] = bKey; if(HasValues) vals_global[gid + ub] = bVals_global[gid]; } } template<typename InputIt1, typename InputIt2, typename OutputIt, typename Comp> void ParallelMergeKeys(InputIt1 a_global, int aCount, InputIt2 b_global, int bCount, OutputIt dest_global, Comp comp, CudaContext& context) { // NOTE: With NT = 512, limit of 33553920 in either array for Fermi arch. const int NT = 512; int aBlocks = MGPU_DIV_UP(aCount, NT); int bBlocks = MGPU_DIV_UP(bCount, NT); ParallelMergeA<NT, false><<<aBlocks, NT>>>(a_global, (const int*)0, aCount, b_global, bCount, dest_global, (int*)0, comp); ParallelMergeB<NT, false><<<bBlocks, NT>>>(a_global, aCount, b_global, (const int*)0, bCount, dest_global, (int*)0, comp); } template<typename InputIt1, typename InputIt2, typename AValsIt, typename BValsIt, typename OutputIt, typename ValsIt, typename Comp> void ParallelMergePairs(InputIt1 a_global, AValsIt aVals_global, int aCount, InputIt2 b_global, BValsIt bVals_global, int bCount, OutputIt dest_global, ValsIt vals_global, Comp comp, CudaContext& context) { // NOTE: With NT = 512, limit of 33553920 in either array for Fermi arch. const int NT = 512; int aBlocks = MGPU_DIV_UP(aCount, NT); int bBlocks = MGPU_DIV_UP(bCount, NT); ParallelMergeA<NT, true><<<aBlocks, NT>>>(a_global, aVals_global, aCount, b_global, bCount, dest_global, vals_global, comp); ParallelMergeB<NT, true><<<bBlocks, NT>>>(a_global, aCount, b_global, bVals_global, bCount, dest_global, vals_global, comp); } //////////////////////////////////////////////////////////////////////////////// // Benchmark template<typename T> void BenchmarkMergeKeysNaive(int count, int numIt, CudaContext& context) { int aCount = count / 2; int bCount = count - aCount; MGPU_MEM(T) a = context.SortRandom<T>(aCount, 0, (T)count); MGPU_MEM(T) b = context.SortRandom<T>(bCount, 0, (T)count); MGPU_MEM(T) c = context.Malloc<T>(count); std::vector<T> aHost, bHost; a->ToHost(aHost); b->ToHost(bHost); std::vector<T> cHost(count); // Benchmark MGPU context.Start(); for(int it = 0; it < numIt; ++it) ParallelMergeKeys(a->get(), aCount, b->get(), bCount, c->get(), mgpu::less<T>(), context); double naiveElapsed = context.Split(); // Verify std::merge(aHost.begin(), aHost.end(), bHost.begin(), bHost.end(), cHost.begin()); // Compare naive to STL. std::vector<T> cHost2; c->ToHost(cHost2); for(int i = 0; i < count; ++i) if(cHost[i] != cHost2[i]) { printf("MERGE ERROR AT COUNT = %d ITEM = %d\n", count, i); exit(0); } double bytes = 2 * sizeof(T) * count; double naiveThroughput = count * numIt / naiveElapsed; double naiveBandwidth = bytes * numIt / naiveElapsed; printf("%s: %9.3lf M/s %7.3lf GB/s \n", FormatInteger(count).c_str(), naiveThroughput / 1.0e6, naiveBandwidth / 1.0e9); } template<typename T> void BenchmarkMergePairsNaive(int count, int numIt, CudaContext& context) { int aCount = count / 2; int bCount = count - aCount; MGPU_MEM(T) a = context.SortRandom<T>(aCount, 0, (T)count); MGPU_MEM(T) b = context.SortRandom<T>(bCount, 0, (T)count); MGPU_MEM(T) aVals = context.FillAscending<T>(aCount, 0, 1); MGPU_MEM(T) bVals = context.FillAscending<T>(bCount, aCount, 1); MGPU_MEM(T) c = context.Malloc<T>(count); MGPU_MEM(T) cVals = context.Malloc<T>(count); std::vector<T> aHost, bHost; a->ToHost(aHost); b->ToHost(bHost); std::vector<T> cHost(count); // Benchmark MGPU context.Start(); for(int it = 0; it < numIt; ++it) ParallelMergePairs(a->get(), aVals->get(), aCount, b->get(), bVals->get(), bCount, c->get(), cVals->get(), mgpu::less<T>(), context); double naiveElapsed = context.Split(); // Verify std::merge(aHost.begin(), aHost.end(), bHost.begin(), bHost.end(), cHost.begin()); // Compare naive to STL. std::vector<T> cHost2; c->ToHost(cHost2); for(int i = 0; i < count; ++i) if(cHost[i] != cHost2[i]) { printf("MERGE ERROR AT COUNT = %d ITEM = %d\n", count, i); exit(0); } double bytes = 4 * sizeof(T) * count; double naiveThroughput = count * numIt / naiveElapsed; double naiveBandwidth = bytes * numIt / naiveElapsed; printf("%s: %9.3lf M/s %7.3lf GB/s \n", FormatInteger(count).c_str(), naiveThroughput / 1.0e6, naiveBandwidth / 1.0e9); } const int Tests[][2] = { { 10000, 1000 }, { 50000, 1000 }, { 100000, 1000 }, { 200000, 500 }, { 500000, 200 }, { 1000000, 200 }, { 2000000, 200 }, { 5000000, 200 }, { 10000000, 100 }, { 20000000, 100 } }; const int NumTests = sizeof(Tests) / sizeof(*Tests); int main(int argc, char** argv) { ContextPtr context = CreateCudaDevice(argc, argv, true); typedef int T1; typedef int64 T2; printf("Benchmarking merge-keys on type %s.\n", TypeIdName<T1>()); for(int test = 0; test < NumTests; ++test) BenchmarkMergeKeysNaive<T1>(Tests[test][0], Tests[test][1], *context); printf("Benchmarking merge-pairs on type %s.\n", TypeIdName<T1>()); for(int test = 0; test < NumTests; ++test) BenchmarkMergePairsNaive<T1>(Tests[test][0], Tests[test][1], *context); // printf("\nBenchmarking merge-keys on type %s.\n", TypeIdName<T2>()); // for(int test = 0; test < NumTests; ++test) // BenchmarkMergeKeysNaive<T2>(Tests[test][0], Tests[test][1], *context); return 0; }
211402d5188eee512ac28ba60ed7e955d439e798.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include "utils.h" #include "pcg/pcg_basic.h" #include "ising.h" //initializing array with all spins 1 int8_t *initial_array(unsigned long n) { int8_t *array = malloc_int_h(n); for (unsigned long i = 0; i<n*n*n; i++) { array[i] = 1; } return array; } //initializing the array with random spin configuration int8_t *initial_array_random(unsigned long n, pcg32_random_t *rng) { int8_t *array = malloc_int_h(n); for (unsigned long i = 0; i<n*n*n; i++) { if (rand_f(rng, 0) > 0.5) { array[i] = 1; } else { array[i] = -1; } } return array; } //Energy of a single spin __device__ float E(int i, int j, int k, int8_t *array, int n, float J) { int up,down,left,right,front,back; //neighbour spins //Checking the periodic boundary conditions if (i == n-1) { down = array[index(0,j,k,n)]; } else { down = array[index(i+1,j,k,n)]; } if (j == n-1) { right = array[index(i,0,k,n)]; } else { right = array[index(i,j+1,k,n)]; } if (k == n-1) { back = array[index(i,j,0,n)]; } else { back = array[index(i,j,k+1,n)]; } if (i == 0) { up = array[index(n-1,j,k,n)]; } else { up = array[index(i-1,j,k,n)]; } if (j == 0) { left = array[index(i,n-1,k,n)]; } else { left = array[index(i,j-1,k,n)]; } if (k == 0) { front = array[index(i,j,n-1,n)]; } else { front = array[index(i,j,k-1,n)]; } return -1*J*array[index(i,j,k,n)]*(left+right+up+down+front+back); } //Total energy of the lattice __global__ void E_total(int8_t *array, int n, float J, float *E_result_d) { int x,y,z; x = blockIdx.x; y = blockIdx.y; z = threadIdx.x; float e = E(x,y,z, array, n, J); __shared__ float blockSum; if (z == 0) blockSum = 0; __syncthreads(); atomicAdd(&blockSum, e); __syncthreads(); if (z == 0) atomicAdd(E_result_d, blockSum); } __global__ void M_total(int8_t *array, int n, float *M_result_d) { int x,y,z, idx; x = blockIdx.x; y = blockIdx.y; z = threadIdx.x; idx = index(x,y,z, n); float m = array[idx]; __shared__ float blockSum; if (z == 0) blockSum = 0; __syncthreads(); atomicAdd(&blockSum, m); __syncthreads(); if (z == 0) atomicAdd(M_result_d, blockSum); } /*Returns an array of all the Boltzmann factors corresponding to different energies E<0. The factors are calculated beforehand to avoid calling exponential function on every iteration of the main loop*/ float *boltzmann_factors(float T, float J) { float *bf = malloc_float_h(6); //Loop over different energies E=J*i < 0 if (J > 0) { for (int i = -6; i<0; i++) { bf[-1*(i+1)] = exp(2*J*i/T); } } else { for (int i = 6; i>0; i--) { bf[i-1] = exp(2*J*i/T); } } return bf; } /*Flipping a single spin according to the Metropolis algorithm Returns 1 of spin is flipped and 0 if not*/ __global__ void update_spins( int n, int8_t *array, float *b_factors, float J, int which, pcg32_random_t *rng ) { int x, y, z, rng_idx; x = blockIdx.x; y = blockIdx.y; z = threadIdx.x*2 + (x + y + which) % 2; if (x >= n || y >= n || z >= n) { return; } rng_idx = index(x, y, z, n) / 2; float En = E(x, y, z, array, n, J); int b_idx = fabs(En/J)+0.1; //+0.1 to negotiate floating point inaccuracy float b_factor; if (b_idx > 0) { //to not call b_factors[-1] b_factor = b_factors[b_idx-1]; } else { b_factor = 0; } if (En > 0 || rand_f(rng, rng_idx) < b_factor) { array[index(x,y,z,n)] *= -1; } } // A single simulation float *run_simulation( int n, int mc_steps, int trans_steps, int calc, float T, float J, pcg32_random_t *rng_h, pcg32_random_t *rng_d ) { /*This function initializes the spin array and runs the simulation using Metropolis algorithm to find the minimum of the free energy. First transient mc steps are run after which the system is assumed to be at equilibrium with the heat bath. After that the mc steps are run and the means of the needed physical quantities are calculated and returned. Notice: Seed needs to be given for the PRNG before calling this function. Seed should be given only once and can be done by calling the function seed() of utils.h. Parameters: n = Cubic root of the number of the spin array elements. i.e. lenght of an edge mc_steps = Number of single spin flips = mc_steps * n^3 trans_steps = Number of mc_steps before starting to calculate the means. The system needs to be near the equilibrium after trans_steps are run. T = The temperature of the system J = The coupling constant of the spin interaction */ dim3 grid_size(n, n, 1); dim3 block_half((int) ceil(n/2.0), 1, 1); dim3 block_whole(n, 1, 1); // Initializing spin array on host and copying it to the device int8_t *spins_h, *spins_d; spins_h = initial_array(n); malloc_int_d(&spins_d, n); hipMemcpy(spins_d, spins_h, n*n*n, hipMemcpyHostToDevice); // Precalculated boltzmann_factors float *b_factors_h, *b_factors_d; b_factors_h = boltzmann_factors(T,J); malloc_float_d(&b_factors_d, 6); hipMemcpy(b_factors_d, b_factors_h, 6*sizeof(float), hipMemcpyHostToDevice); int which = 0, samples = 0; //Transient mc steps for (int i = 0; i < trans_steps*2; i++) { // Change which squares to update every iteration which = (which + 1) % 2; // Update half of the spins hipLaunchKernelGGL(( update_spins), dim3(grid_size), dim3(block_half), 0, 0, n, spins_d, b_factors_d, J, which, rng_d ); } float *E_h, *E_d, *M_h, *M_d; E_h = malloc_float_h(1); M_h = malloc_float_h(1); malloc_float_d(&E_d, 1); malloc_float_d(&M_d, 1); // Double precision used to avoid cumulating error from float summation double E_tot = 0, Mabs_tot = 0, E2_tot = 0, M2_tot = 0, M4_tot = 0; //Main loop where the averages are calculated for (int mc = 0; mc < mc_steps; mc++) { // Updating all spins for (int i = 0; i<2; i++) { // Change which squares to update every iteration which = (which + 1) % 2; hipLaunchKernelGGL(( update_spins), dim3(grid_size), dim3(block_half), 0, 0, n, spins_d, b_factors_d, J, which, rng_d ); } // Calculating the physical quantities every calc mc steps if (mc % calc == 0) { // Calculating the current energy and magnetization *E_h = 0; *M_h = 0; hipMemcpy(E_d, E_h, sizeof(float), hipMemcpyHostToDevice); hipMemcpy(M_d, M_h, sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( E_total), dim3(grid_size), dim3(block_whole), 0, 0, spins_d, n, J, E_d); hipLaunchKernelGGL(( M_total), dim3(grid_size), dim3(block_whole), 0, 0, spins_d, n, M_d); hipMemcpy(M_h, M_d, sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(E_h, E_d, sizeof(float), hipMemcpyDeviceToHost); // Updating all the physical quantities on the host E_tot += *E_h/3; E2_tot += pow(*E_h/3,2); Mabs_tot += abs(*M_h); M2_tot += pow(*M_h, 2); M4_tot += pow(*M_h, 4); samples++; } } float norm = 1.0/(n*n*n*samples); float *ret = malloc_float_h(9); ret[0] = E_tot*norm; //mean energy ret[1] = E2_tot*norm; //mean energy^2 ret[2] = Mabs_tot*norm; //mean |magnetization| ret[3] = M2_tot*norm; //mean magnetization^2 ret[4] = M4_tot*norm; //mean magnetization^4 ret[5] = (ret[1]-(ret[0]*ret[0]*n*n*n))/T; //heat capacity ret[6] = (ret[3]-(ret[2]*ret[2]*n*n*n))/T; //magnetic susceptibility ret[7] = 1-(ret[4]/(3*ret[3]*ret[3]*n*n*n)); //binder cumulant ret[8] = T; //temperature free(spins_h); free(b_factors_h); free(E_h); free(M_h); hipFree(spins_d); hipFree(b_factors_d); hipFree(E_d); hipFree(M_d); return ret; }
211402d5188eee512ac28ba60ed7e955d439e798.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include "utils.h" #include "pcg/pcg_basic.h" #include "ising.h" //initializing array with all spins 1 int8_t *initial_array(unsigned long n) { int8_t *array = malloc_int_h(n); for (unsigned long i = 0; i<n*n*n; i++) { array[i] = 1; } return array; } //initializing the array with random spin configuration int8_t *initial_array_random(unsigned long n, pcg32_random_t *rng) { int8_t *array = malloc_int_h(n); for (unsigned long i = 0; i<n*n*n; i++) { if (rand_f(rng, 0) > 0.5) { array[i] = 1; } else { array[i] = -1; } } return array; } //Energy of a single spin __device__ float E(int i, int j, int k, int8_t *array, int n, float J) { int up,down,left,right,front,back; //neighbour spins //Checking the periodic boundary conditions if (i == n-1) { down = array[index(0,j,k,n)]; } else { down = array[index(i+1,j,k,n)]; } if (j == n-1) { right = array[index(i,0,k,n)]; } else { right = array[index(i,j+1,k,n)]; } if (k == n-1) { back = array[index(i,j,0,n)]; } else { back = array[index(i,j,k+1,n)]; } if (i == 0) { up = array[index(n-1,j,k,n)]; } else { up = array[index(i-1,j,k,n)]; } if (j == 0) { left = array[index(i,n-1,k,n)]; } else { left = array[index(i,j-1,k,n)]; } if (k == 0) { front = array[index(i,j,n-1,n)]; } else { front = array[index(i,j,k-1,n)]; } return -1*J*array[index(i,j,k,n)]*(left+right+up+down+front+back); } //Total energy of the lattice __global__ void E_total(int8_t *array, int n, float J, float *E_result_d) { int x,y,z; x = blockIdx.x; y = blockIdx.y; z = threadIdx.x; float e = E(x,y,z, array, n, J); __shared__ float blockSum; if (z == 0) blockSum = 0; __syncthreads(); atomicAdd(&blockSum, e); __syncthreads(); if (z == 0) atomicAdd(E_result_d, blockSum); } __global__ void M_total(int8_t *array, int n, float *M_result_d) { int x,y,z, idx; x = blockIdx.x; y = blockIdx.y; z = threadIdx.x; idx = index(x,y,z, n); float m = array[idx]; __shared__ float blockSum; if (z == 0) blockSum = 0; __syncthreads(); atomicAdd(&blockSum, m); __syncthreads(); if (z == 0) atomicAdd(M_result_d, blockSum); } /*Returns an array of all the Boltzmann factors corresponding to different energies E<0. The factors are calculated beforehand to avoid calling exponential function on every iteration of the main loop*/ float *boltzmann_factors(float T, float J) { float *bf = malloc_float_h(6); //Loop over different energies E=J*i < 0 if (J > 0) { for (int i = -6; i<0; i++) { bf[-1*(i+1)] = exp(2*J*i/T); } } else { for (int i = 6; i>0; i--) { bf[i-1] = exp(2*J*i/T); } } return bf; } /*Flipping a single spin according to the Metropolis algorithm Returns 1 of spin is flipped and 0 if not*/ __global__ void update_spins( int n, int8_t *array, float *b_factors, float J, int which, pcg32_random_t *rng ) { int x, y, z, rng_idx; x = blockIdx.x; y = blockIdx.y; z = threadIdx.x*2 + (x + y + which) % 2; if (x >= n || y >= n || z >= n) { return; } rng_idx = index(x, y, z, n) / 2; float En = E(x, y, z, array, n, J); int b_idx = fabs(En/J)+0.1; //+0.1 to negotiate floating point inaccuracy float b_factor; if (b_idx > 0) { //to not call b_factors[-1] b_factor = b_factors[b_idx-1]; } else { b_factor = 0; } if (En > 0 || rand_f(rng, rng_idx) < b_factor) { array[index(x,y,z,n)] *= -1; } } // A single simulation float *run_simulation( int n, int mc_steps, int trans_steps, int calc, float T, float J, pcg32_random_t *rng_h, pcg32_random_t *rng_d ) { /*This function initializes the spin array and runs the simulation using Metropolis algorithm to find the minimum of the free energy. First transient mc steps are run after which the system is assumed to be at equilibrium with the heat bath. After that the mc steps are run and the means of the needed physical quantities are calculated and returned. Notice: Seed needs to be given for the PRNG before calling this function. Seed should be given only once and can be done by calling the function seed() of utils.h. Parameters: n = Cubic root of the number of the spin array elements. i.e. lenght of an edge mc_steps = Number of single spin flips = mc_steps * n^3 trans_steps = Number of mc_steps before starting to calculate the means. The system needs to be near the equilibrium after trans_steps are run. T = The temperature of the system J = The coupling constant of the spin interaction */ dim3 grid_size(n, n, 1); dim3 block_half((int) ceil(n/2.0), 1, 1); dim3 block_whole(n, 1, 1); // Initializing spin array on host and copying it to the device int8_t *spins_h, *spins_d; spins_h = initial_array(n); malloc_int_d(&spins_d, n); cudaMemcpy(spins_d, spins_h, n*n*n, cudaMemcpyHostToDevice); // Precalculated boltzmann_factors float *b_factors_h, *b_factors_d; b_factors_h = boltzmann_factors(T,J); malloc_float_d(&b_factors_d, 6); cudaMemcpy(b_factors_d, b_factors_h, 6*sizeof(float), cudaMemcpyHostToDevice); int which = 0, samples = 0; //Transient mc steps for (int i = 0; i < trans_steps*2; i++) { // Change which squares to update every iteration which = (which + 1) % 2; // Update half of the spins update_spins<<<grid_size, block_half>>>( n, spins_d, b_factors_d, J, which, rng_d ); } float *E_h, *E_d, *M_h, *M_d; E_h = malloc_float_h(1); M_h = malloc_float_h(1); malloc_float_d(&E_d, 1); malloc_float_d(&M_d, 1); // Double precision used to avoid cumulating error from float summation double E_tot = 0, Mabs_tot = 0, E2_tot = 0, M2_tot = 0, M4_tot = 0; //Main loop where the averages are calculated for (int mc = 0; mc < mc_steps; mc++) { // Updating all spins for (int i = 0; i<2; i++) { // Change which squares to update every iteration which = (which + 1) % 2; update_spins<<<grid_size, block_half>>>( n, spins_d, b_factors_d, J, which, rng_d ); } // Calculating the physical quantities every calc mc steps if (mc % calc == 0) { // Calculating the current energy and magnetization *E_h = 0; *M_h = 0; cudaMemcpy(E_d, E_h, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(M_d, M_h, sizeof(float), cudaMemcpyHostToDevice); E_total<<<grid_size, block_whole>>>(spins_d, n, J, E_d); M_total<<<grid_size, block_whole>>>(spins_d, n, M_d); cudaMemcpy(M_h, M_d, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(E_h, E_d, sizeof(float), cudaMemcpyDeviceToHost); // Updating all the physical quantities on the host E_tot += *E_h/3; E2_tot += pow(*E_h/3,2); Mabs_tot += abs(*M_h); M2_tot += pow(*M_h, 2); M4_tot += pow(*M_h, 4); samples++; } } float norm = 1.0/(n*n*n*samples); float *ret = malloc_float_h(9); ret[0] = E_tot*norm; //mean energy ret[1] = E2_tot*norm; //mean energy^2 ret[2] = Mabs_tot*norm; //mean |magnetization| ret[3] = M2_tot*norm; //mean magnetization^2 ret[4] = M4_tot*norm; //mean magnetization^4 ret[5] = (ret[1]-(ret[0]*ret[0]*n*n*n))/T; //heat capacity ret[6] = (ret[3]-(ret[2]*ret[2]*n*n*n))/T; //magnetic susceptibility ret[7] = 1-(ret[4]/(3*ret[3]*ret[3]*n*n*n)); //binder cumulant ret[8] = T; //temperature free(spins_h); free(b_factors_h); free(E_h); free(M_h); cudaFree(spins_d); cudaFree(b_factors_d); cudaFree(E_d); cudaFree(M_d); return ret; }
79ea108ec30fd4b548dccf148ecb9e746fa5398f.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file graphsum_app.cu * * @brief gcn graphsum application */ #include <gunrock/gunrock.h> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph definations #include <gunrock/graphio/graphio.cuh> #include <gunrock/app/app_base.cuh> #include <gunrock/app/test_base.cuh> // single-source shortest path includes #include <gunrock/app/GuNNrock/CrossEntropyLoss/CrossEntropyLoss_enactor.cuh> #include <gunrock/app/GuNNrock/CrossEntropyLoss/CrossEntropyLoss_test.cuh> /** * @brief graphsum layer of GCN * * @param parameters The parameters * @param graph The graph * @param[in] dim dimension of the feature vector * @param in the input to the graphsum layer * @param out output matrix * * @tparam GraphT type of the graph * @tparam ValueT type of the value, double by default * * @return time elapsed to execute */ namespace gunrock { namespace app { namespace CrossEntropyLoss { hipError_t UseParameters(util::Parameters &parameters) { hipError_t retval = hipSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<int>( "num_classes", util::OPTIONAL_PARAMETER | util::SINGLE_VALUE | util::OPTIONAL_ARGUMENT, 10, "number of classes per node", __FILE__, __LINE__ )); GUARD_CU(parameters.Use<int>( "num_nodes", util::OPTIONAL_PARAMETER | util::SINGLE_VALUE | util::OPTIONAL_ARGUMENT, 1000, "number of nodes", __FILE__, __LINE__ )); return retval; } } } } using namespace gunrock; template <typename SizeT, typename ValueT, typename GraphT> struct cross_entropy : module { typedef gunrock::app::CrossEntropyLoss::Problem<GraphT> ProblemT; typedef gunrock::app::CrossEntropyLoss::Enactor<ProblemT> EnactorT; typedef util::Array1D<SizeT, ValueT> Array; GraphT dummy; util::Array1D<SizeT, ValueT> logits, grad; util::Array1D<SizeT, int> truth; ProblemT *problem; EnactorT *enactor; int dim; float *fw_time; cross_entropy(util::Parameters &p, Array _logits, Array _grad, util::Array1D<SizeT, int> _truth, int num_nodes, int num_classes, float *_fw, bool training=true) : logits(_logits), grad(_grad), truth(_truth), fw_time(_fw) { problem = new ProblemT(p); enactor = new EnactorT(); problem->Init(dummy, num_nodes, num_classes, logits, grad, truth, training); enactor->Init(*problem); } virtual void forward(bool train) override { timer.Start (); problem->Reset(train); enactor->Reset(); enactor->Enact(); timer.Stop (); *fw_time += timer.ElapsedMillis (); } virtual void backward() override {} virtual double GetLoss() override { double loss; problem->Extract (&loss); return loss; } }; template <typename GraphT, typename ValueT = typename GraphT::ValueT> double CrossEntropyLoss(gunrock::util::Parameters &parameters, GraphT &graph, const int num_nodes, const int num_classes, ValueT *logits, int *ground_truth, ValueT *grad, ValueT &loss) { typedef typename GraphT::VertexT VertexT; typedef gunrock::app::CrossEntropyLoss::Problem<GraphT> ProblemT; typedef gunrock::app::CrossEntropyLoss::Enactor<ProblemT> EnactorT; gunrock::util::CpuTimer cpu_timer; gunrock::util::Location target = gunrock::util::DEVICE; double total_time = 0; // if (parameters.UseDefault("quiet")) parameters.Set("quiet", true); // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; problem.Init(graph, num_nodes, num_classes, logits, ground_truth); enactor.Init(problem, target); problem.Reset(); enactor.Reset(); cpu_timer.Start(); enactor.Enact(); cpu_timer.Stop(); total_time += cpu_timer.ElapsedMillis(); problem.Extract(grad, &loss); enactor.Release(target); problem.Release(target); return total_time; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
79ea108ec30fd4b548dccf148ecb9e746fa5398f.cu
// ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file graphsum_app.cu * * @brief gcn graphsum application */ #include <gunrock/gunrock.h> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph definations #include <gunrock/graphio/graphio.cuh> #include <gunrock/app/app_base.cuh> #include <gunrock/app/test_base.cuh> // single-source shortest path includes #include <gunrock/app/GuNNrock/CrossEntropyLoss/CrossEntropyLoss_enactor.cuh> #include <gunrock/app/GuNNrock/CrossEntropyLoss/CrossEntropyLoss_test.cuh> /** * @brief graphsum layer of GCN * * @param parameters The parameters * @param graph The graph * @param[in] dim dimension of the feature vector * @param in the input to the graphsum layer * @param out output matrix * * @tparam GraphT type of the graph * @tparam ValueT type of the value, double by default * * @return time elapsed to execute */ namespace gunrock { namespace app { namespace CrossEntropyLoss { cudaError_t UseParameters(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<int>( "num_classes", util::OPTIONAL_PARAMETER | util::SINGLE_VALUE | util::OPTIONAL_ARGUMENT, 10, "number of classes per node", __FILE__, __LINE__ )); GUARD_CU(parameters.Use<int>( "num_nodes", util::OPTIONAL_PARAMETER | util::SINGLE_VALUE | util::OPTIONAL_ARGUMENT, 1000, "number of nodes", __FILE__, __LINE__ )); return retval; } } } } using namespace gunrock; template <typename SizeT, typename ValueT, typename GraphT> struct cross_entropy : module { typedef gunrock::app::CrossEntropyLoss::Problem<GraphT> ProblemT; typedef gunrock::app::CrossEntropyLoss::Enactor<ProblemT> EnactorT; typedef util::Array1D<SizeT, ValueT> Array; GraphT dummy; util::Array1D<SizeT, ValueT> logits, grad; util::Array1D<SizeT, int> truth; ProblemT *problem; EnactorT *enactor; int dim; float *fw_time; cross_entropy(util::Parameters &p, Array _logits, Array _grad, util::Array1D<SizeT, int> _truth, int num_nodes, int num_classes, float *_fw, bool training=true) : logits(_logits), grad(_grad), truth(_truth), fw_time(_fw) { problem = new ProblemT(p); enactor = new EnactorT(); problem->Init(dummy, num_nodes, num_classes, logits, grad, truth, training); enactor->Init(*problem); } virtual void forward(bool train) override { timer.Start (); problem->Reset(train); enactor->Reset(); enactor->Enact(); timer.Stop (); *fw_time += timer.ElapsedMillis (); } virtual void backward() override {} virtual double GetLoss() override { double loss; problem->Extract (&loss); return loss; } }; template <typename GraphT, typename ValueT = typename GraphT::ValueT> double CrossEntropyLoss(gunrock::util::Parameters &parameters, GraphT &graph, const int num_nodes, const int num_classes, ValueT *logits, int *ground_truth, ValueT *grad, ValueT &loss) { typedef typename GraphT::VertexT VertexT; typedef gunrock::app::CrossEntropyLoss::Problem<GraphT> ProblemT; typedef gunrock::app::CrossEntropyLoss::Enactor<ProblemT> EnactorT; gunrock::util::CpuTimer cpu_timer; gunrock::util::Location target = gunrock::util::DEVICE; double total_time = 0; // if (parameters.UseDefault("quiet")) parameters.Set("quiet", true); // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; problem.Init(graph, num_nodes, num_classes, logits, ground_truth); enactor.Init(problem, target); problem.Reset(); enactor.Reset(); cpu_timer.Start(); enactor.Enact(); cpu_timer.Stop(); total_time += cpu_timer.ElapsedMillis(); problem.Extract(grad, &loss); enactor.Release(target); problem.Release(target); return total_time; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
38facc7dea4622c8a56ead8506e0fdfaf6e3137c.hip
// !!! This is a file automatically generated by hipify!!! /* count the number of match tuple in each partition and each thread */ #include <stdio.h> #include <stdint.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "tuple.h" extern "C" { __global__ void count_partitioning( TUPLE *t, int *L, int p_num, int t_num, int rows_num, int table_type ) { int rows_n = rows_num; int p_n = p_num; int t_n = t_num; int PER_TH = LEFT_PER_TH; if(table_type != LEFT) PER_TH = RIGHT_PER_TH; int DEF = blockIdx.x * blockDim.x * PER_TH; int x = blockIdx.x * blockDim.x + threadIdx.x; int Dim = 0; if(gridDim.x-1 == blockIdx.x){ Dim = t_n - blockIdx.x*blockDim.x; }else{ Dim = blockDim.x; } // Matching phase int hash = 0; if(x < t_n){ for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){ hash = t[DEF + threadIdx.x + i*Dim].val % p_n; L[hash*t_n + x]++; } } } __global__ void partitioning( TUPLE *t, TUPLE *pt, int *L, int p_num, int t_num, int rows_num, int table_type ) { int p_n = p_num; int t_n = t_num; int rows_n = rows_num; int PER_TH = LEFT_PER_TH; if(table_type != LEFT) PER_TH = RIGHT_PER_TH; int DEF = blockIdx.x * blockDim.x * PER_TH; int x = blockIdx.x * blockDim.x + threadIdx.x; int Dim = 0; if(gridDim.x-1 == blockIdx.x){ Dim = t_n - blockIdx.x*blockDim.x; }else{ Dim = blockDim.x; } // Matching phase int hash = 0; int temp = 0; if(x < t_n){ for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){ hash = t[DEF + threadIdx.x + i*Dim].val%p_n; temp = L[hash*t_n + x]; pt[temp].key = t[DEF + threadIdx.x + i*Dim].key; pt[temp].val = t[DEF + threadIdx.x + i*Dim].val; L[hash*t_n + x] = temp + 1; } } } }
38facc7dea4622c8a56ead8506e0fdfaf6e3137c.cu
/* count the number of match tuple in each partition and each thread */ #include <stdio.h> #include <stdint.h> #include <cuda.h> #include <sys/time.h> #include "tuple.h" extern "C" { __global__ void count_partitioning( TUPLE *t, int *L, int p_num, int t_num, int rows_num, int table_type ) { int rows_n = rows_num; int p_n = p_num; int t_n = t_num; int PER_TH = LEFT_PER_TH; if(table_type != LEFT) PER_TH = RIGHT_PER_TH; int DEF = blockIdx.x * blockDim.x * PER_TH; int x = blockIdx.x * blockDim.x + threadIdx.x; int Dim = 0; if(gridDim.x-1 == blockIdx.x){ Dim = t_n - blockIdx.x*blockDim.x; }else{ Dim = blockDim.x; } // Matching phase int hash = 0; if(x < t_n){ for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){ hash = t[DEF + threadIdx.x + i*Dim].val % p_n; L[hash*t_n + x]++; } } } __global__ void partitioning( TUPLE *t, TUPLE *pt, int *L, int p_num, int t_num, int rows_num, int table_type ) { int p_n = p_num; int t_n = t_num; int rows_n = rows_num; int PER_TH = LEFT_PER_TH; if(table_type != LEFT) PER_TH = RIGHT_PER_TH; int DEF = blockIdx.x * blockDim.x * PER_TH; int x = blockIdx.x * blockDim.x + threadIdx.x; int Dim = 0; if(gridDim.x-1 == blockIdx.x){ Dim = t_n - blockIdx.x*blockDim.x; }else{ Dim = blockDim.x; } // Matching phase int hash = 0; int temp = 0; if(x < t_n){ for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){ hash = t[DEF + threadIdx.x + i*Dim].val%p_n; temp = L[hash*t_n + x]; pt[temp].key = t[DEF + threadIdx.x + i*Dim].key; pt[temp].val = t[DEF + threadIdx.x + i*Dim].val; L[hash*t_n + x] = temp + 1; } } } }
59fa64cbf1b96110e5fabad484f450eb12ddff1a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define ITERATIONS REPLACE_ITERATIONS #include "../include/ContAcq-IntClk.h" // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1; float Value2; float Value3; float Value; float I1=A[i]; float I2=B[i]; // Excessive Addition access if( ((i%32)<=3) ){ for(unsigned k=0; k<ITERATIONS;k++) { Value1=I1*I2; Value3=I1*I2; Value1*=Value2; Value1*=Value2; Value2=Value3*Value1; Value1=Value2*Value3; } } __syncthreads(); Value=Value1; C[i]=Value*Value2; } int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after\n"); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
59fa64cbf1b96110e5fabad484f450eb12ddff1a.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define ITERATIONS REPLACE_ITERATIONS #include "../include/ContAcq-IntClk.h" // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1; float Value2; float Value3; float Value; float I1=A[i]; float I2=B[i]; // Excessive Addition access if( ((i%32)<=3) ){ for(unsigned k=0; k<ITERATIONS;k++) { Value1=I1*I2; Value3=I1*I2; Value1*=Value2; Value1*=Value2; Value2=Value3*Value1; Value1=Value2*Value3; } } __syncthreads(); Value=Value1; C[i]=Value*Value2; } int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after\n"); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
8e77bf0b223c9dde160a2daff456fed7d352ae57.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Lesson7.h" #include <cudaDefs.h> #include <limits> namespace lesson7 { constexpr size_t BLOCK_SIZE = 8; template<typename T> void fill_data(T *array, size_t len) { for (int i = 0; i < len; i++) array[i] = i; array[len / 2] = len + 1; } template<typename T> __global__ void atomic_max1(T *array, size_t len, T *max) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= len) return; int shift = gridDim.x + blockDim.x; while (i < len) { if (*max < array[i]) atomicMax(max, array[i]); i += shift; } } template<typename T> __global__ void atomic_max2(T *array, size_t len, T *max) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= len) return; int shift = gridDim.x + blockDim.x; T localMax = *max; while (i < len) { if (localMax < array[i]) localMax = array[i]; i += shift; } atomicMax(max, localMax); } void run() { constexpr size_t N = 500; int *hData = new int[N]; hipEvent_t start, stop; float time; createTimer(&start, &stop, &time); startTimer(start); fill_data(hData, N); stopTimer(start, stop, time); int *dData = nullptr; checkCudaErrors(hipMalloc((void**)&dData, sizeof(int) * N)); hipMemcpy(dData, hData, sizeof(int) * N, hipMemcpyKind::hipMemcpyHostToDevice); int hMax = std::numeric_limits<int>().min(); int *dMax = nullptr; checkCudaErrors(hipMalloc((void**)&dMax, sizeof(int))); hipMemcpy(dMax, &hMax, sizeof(int), hipMemcpyKind::hipMemcpyHostToDevice); KernelSetting ks_atomic_max; ks_atomic_max.dimBlock = dim3(BLOCK_SIZE); ks_atomic_max.blockSize = BLOCK_SIZE; ks_atomic_max.dimGrid = dim3(getNumberOfParts(N, ks_atomic_max.blockSize)); startTimer(start); atomic_max1 << <ks_atomic_max.dimBlock, ks_atomic_max.blockSize >> > (dData, N, dMax); stopTimer(start, stop, time); startTimer(start); atomic_max2 << <ks_atomic_max.dimBlock, ks_atomic_max.blockSize >> > (dData, N, dMax); stopTimer(start, stop, time); delete[] hData; } }
8e77bf0b223c9dde160a2daff456fed7d352ae57.cu
#include "Lesson7.h" #include <cudaDefs.h> #include <limits> namespace lesson7 { constexpr size_t BLOCK_SIZE = 8; template<typename T> void fill_data(T *array, size_t len) { for (int i = 0; i < len; i++) array[i] = i; array[len / 2] = len + 1; } template<typename T> __global__ void atomic_max1(T *array, size_t len, T *max) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= len) return; int shift = gridDim.x + blockDim.x; while (i < len) { if (*max < array[i]) atomicMax(max, array[i]); i += shift; } } template<typename T> __global__ void atomic_max2(T *array, size_t len, T *max) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= len) return; int shift = gridDim.x + blockDim.x; T localMax = *max; while (i < len) { if (localMax < array[i]) localMax = array[i]; i += shift; } atomicMax(max, localMax); } void run() { constexpr size_t N = 500; int *hData = new int[N]; cudaEvent_t start, stop; float time; createTimer(&start, &stop, &time); startTimer(start); fill_data(hData, N); stopTimer(start, stop, time); int *dData = nullptr; checkCudaErrors(cudaMalloc((void**)&dData, sizeof(int) * N)); cudaMemcpy(dData, hData, sizeof(int) * N, cudaMemcpyKind::cudaMemcpyHostToDevice); int hMax = std::numeric_limits<int>().min(); int *dMax = nullptr; checkCudaErrors(cudaMalloc((void**)&dMax, sizeof(int))); cudaMemcpy(dMax, &hMax, sizeof(int), cudaMemcpyKind::cudaMemcpyHostToDevice); KernelSetting ks_atomic_max; ks_atomic_max.dimBlock = dim3(BLOCK_SIZE); ks_atomic_max.blockSize = BLOCK_SIZE; ks_atomic_max.dimGrid = dim3(getNumberOfParts(N, ks_atomic_max.blockSize)); startTimer(start); atomic_max1 << <ks_atomic_max.dimBlock, ks_atomic_max.blockSize >> > (dData, N, dMax); stopTimer(start, stop, time); startTimer(start); atomic_max2 << <ks_atomic_max.dimBlock, ks_atomic_max.blockSize >> > (dData, N, dMax); stopTimer(start, stop, time); delete[] hData; } }
1b8fefa9a5dbabc83177293c42063ff9be103f7e.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination< float, 1, int32_t, float, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, true, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
1b8fefa9a5dbabc83177293c42063ff9be103f7e.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination< float, 1, int32_t, float, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, true, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
d21b7f196121646af935d4791513c2c879f110ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void apply_lifter_and_floor_energy( int num_frames, int num_cols, float cepstral_lifter, bool use_energy, float energy_floor, float *log_energy, float *lifter_coeffs, float *features, int32_t ldf) { int thread_id = threadIdx.x; int frame = blockIdx.x; float *feats = features + frame * ldf; // apply lifter coefficients if (cepstral_lifter != 0.0f) { for (int c = thread_id; c < num_cols; c += CU1DBLOCK) { float lift = lifter_coeffs[c]; float f = feats[c]; feats[c] = f * lift; } } // Thread 0 for each frame will apply energy if (use_energy && thread_id == 0) { float energy = log_energy[frame]; float log_energy_floor = log(energy_floor); if (energy_floor > 0.0f && energy < log_energy_floor) { energy = log_energy_floor; } feats[0] = energy; } }
d21b7f196121646af935d4791513c2c879f110ff.cu
#include "includes.h" __global__ void apply_lifter_and_floor_energy( int num_frames, int num_cols, float cepstral_lifter, bool use_energy, float energy_floor, float *log_energy, float *lifter_coeffs, float *features, int32_t ldf) { int thread_id = threadIdx.x; int frame = blockIdx.x; float *feats = features + frame * ldf; // apply lifter coefficients if (cepstral_lifter != 0.0f) { for (int c = thread_id; c < num_cols; c += CU1DBLOCK) { float lift = lifter_coeffs[c]; float f = feats[c]; feats[c] = f * lift; } } // Thread 0 for each frame will apply energy if (use_energy && thread_id == 0) { float energy = log_energy[frame]; float log_energy_floor = log(energy_floor); if (energy_floor > 0.0f && energy < log_energy_floor) { energy = log_energy_floor; } feats[0] = energy; } }
c2fc71a547bfa82b65b521ee3a6c511f04455ac7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * als.cu * * Created on: Feb 10, 2015 * Author: Wei Tan (wtan@us.ibm.com) * Alternating Least Square for Matrix Factorization on CUDA 7.0+ * Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2 */ //do not use magma and fp16 by default //#define CUMF_USE_MAGMA //#define CUMF_USE_HALF //#define SURPASS_NAN #include "als.h" #include "device_utilities.h" #include "cg.h" #include "host_utilities.h" #include <fstream> #include <assert.h> #include <hip/hip_fp16.h> #define USE_CG #ifdef CUMF_USE_HALF #define SCAN_BATCH 24 #else #define SCAN_BATCH 24 #endif #ifdef CUMF_USE_MAGMA #include "flops.h" #include "magma.h" #include "magma_lapack.h" #include "testings.h" #endif int updateX(const int batch_size, const int batch_offset, float * ythetaT, float * tt, float * XT, hipblasHandle_t handle, const int m, const int n, const int f, const int nnz, float** devPtrTTHost, float **devPtrYthetaTHost){ #ifdef DEBUG float elapsed; struct timeval tv0, tv1, tv2; gettimeofday(&tv0, NULL); printf("*******Batch LU factorization of tt.\n"); #endif //pointers needed by batch op float **devPtrTT = 0; int *INFO; for (int k = 0; k < batch_size; k++) { devPtrTTHost[k] = &tt[k * f * f]; } cudacall(hipMalloc((void** ) &devPtrTT, batch_size * sizeof(*devPtrTT))); cudacall(hipMemcpy(devPtrTT, devPtrTTHost, batch_size * sizeof(*devPtrTT),hipMemcpyHostToDevice)); //cudacall( hipMalloc(&P, f * batch_size * sizeof(int)) ); cudacall( hipMalloc(&INFO, batch_size * sizeof(int) )); cublascall(hipblasSgetrfBatched(handle, f, devPtrTT, f, NULL, INFO, batch_size)); hipDeviceSynchronize(); #ifdef DEBUG gettimeofday(&tv1, NULL); elapsed = (tv1.tv_sec - tv0.tv_sec) + (tv1.tv_usec - tv0.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); printf("*******solve: tt * XT = ythetaT use cublas, with LU decomposition.\n"); #endif float **devPtrYthetaT = 0; for (int k = 0; k < batch_size; k++) { devPtrYthetaTHost[k] = &ythetaT[batch_offset * f + k * f]; } cudacall(hipMalloc((void** ) &devPtrYthetaT, batch_size * sizeof(*devPtrYthetaT))); cudacall(hipMemcpy(devPtrYthetaT, devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaT), hipMemcpyHostToDevice)); int * info2 = (int *) malloc(sizeof(int)); cublascall( hipblasSgetrsBatched(handle, HIPBLAS_OP_N, f, 1, (const float ** ) devPtrTT, f, NULL, devPtrYthetaT, f, info2, batch_size) ); hipDeviceSynchronize(); hipError_t cudaStat1 = hipGetLastError(); if (cudaStat1 != hipSuccess) { fprintf(stderr,"Failed to launch hipblasSgetrsBatched (error code: %s)!\n", hipGetErrorString(cudaStat1)); exit(EXIT_FAILURE); } cudacall( hipMemcpy(&XT[batch_offset * f], &ythetaT[batch_offset * f], batch_size * f * sizeof(float), hipMemcpyDeviceToDevice) ); #ifdef DEBUG gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); #endif cudacall(hipFree(devPtrTT)); //cudacall(hipFree(P)); cudacall(hipFree(INFO)); cudacall(hipFree(devPtrYthetaT)); return 0; } int updateTheta(const int batch_size, const int batch_offset, float * xx, float * yTXT, float * thetaT, hipblasHandle_t handle, const int m, const int n, const int f, const int nnz, float ** devPtrXXHost, float **devPtrYTXTHost ){ #ifdef DEBUG float elapsed; struct timeval tv0, tv1, tv2; gettimeofday(&tv0, NULL); printf("*******LU factorize xx.\n"); #endif float **devPtrXX = 0; for (int k = 0; k < batch_size; k++) { devPtrXXHost[k] = &xx[k * f * f]; } cudacall(hipMalloc((void** ) &devPtrXX, batch_size * sizeof(*devPtrXX))); cudacall(hipMemcpy(devPtrXX, devPtrXXHost, batch_size * sizeof(*devPtrXX), hipMemcpyHostToDevice)); int *INFO; //cudacall(hipMalloc(&P, f * batch_size * sizeof(int))); cudacall(hipMalloc(&INFO, batch_size * sizeof(int))); cublascall(hipblasSgetrfBatched(handle, f, devPtrXX, f, NULL, INFO, batch_size)); hipDeviceSynchronize(); #ifdef DEBUG gettimeofday(&tv1, NULL); elapsed = (tv1.tv_sec - tv0.tv_sec) + (tv1.tv_usec - tv0.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); printf("******* solve xx * thetaT = yTXT with CUDA 7.\n"); #endif float **devPtrYTXT = 0; for (int k = 0; k < batch_size; k++) { devPtrYTXTHost[k] = &yTXT[batch_offset * f + k * f]; } cudacall(hipMalloc((void** ) &devPtrYTXT, batch_size * sizeof(*devPtrYTXT))); cudacall(hipMemcpy(devPtrYTXT, devPtrYTXTHost, batch_size * sizeof(*devPtrYTXT),hipMemcpyHostToDevice)); int * info2 = (int *) malloc(sizeof(int)); cublascall( hipblasSgetrsBatched(handle, HIPBLAS_OP_N, f, 1, (const float ** ) devPtrXX, f, NULL, devPtrYTXT, f, info2, batch_size) ); hipDeviceSynchronize(); hipError_t cudaStat1 = hipGetLastError(); if (cudaStat1 != hipSuccess) { fprintf(stderr,"Failed to launch hipblasSgetrsBatched (error code: %s)!\n", hipGetErrorString(cudaStat1)); exit(EXIT_FAILURE); } cudacall( hipMemcpy( &thetaT[batch_offset * f], &yTXT[batch_offset * f], batch_size * f * sizeof(float), hipMemcpyDeviceToDevice) ); #ifdef DEBUG gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); #endif hipFree(devPtrXX); hipFree(INFO); free(info2); hipFree(devPtrYTXT); return 0; } #ifdef USE_MAGMA int updateThetaMagma(const int batch_size, const int batch_offset, float * xx, float * yTXT, float * thetaT, hipblasHandle_t handle, const int m, const int n, const int f, const int nnz, float ** devPtrXXHost, float **devPtrYTXTHost ){ //variables for timing float elapsed; struct timeval tv1, tv2; gettimeofday(&tv1, NULL); printf("*******magma Cholesky factorization.\n"); magma_init(); magma_opts opts( MagmaOptsBatched ); char *parray[10]; char **x; x = &parray[0]; opts.parse_opts(1,x); magma_queue_t queue = opts.queue; int min_batch = batch_size; int info = 0; int * dinfo_array = 0; float **dA_array = NULL; float **dB_array = NULL; float **hA_array = (float**) malloc(min_batch * sizeof(hA_array[0])); float **hB_array = (float**) malloc(min_batch * sizeof(hB_array[0])); cudacall (hipMalloc((void**) &dinfo_array, min_batch*sizeof(int))); cudacall(hipMalloc((void** ) &dA_array, min_batch * sizeof(*dA_array))); cudacall(hipMalloc((void** ) &dB_array, min_batch * sizeof(*dB_array))); for (int k = 0; k < batch_size; k++) { hA_array[k] = &xx[k * f * f]; hB_array[k] = &yTXT[batch_offset * f + k * f]; } cudacall(hipMemcpy(dA_array, hA_array, min_batch * sizeof(*dA_array), hipMemcpyHostToDevice)); cudacall(hipMemcpy(dB_array, hB_array, min_batch * sizeof(*dB_array), hipMemcpyHostToDevice)); info = magma_sposv_batched(MagmaLower, f, 1, dA_array, f, dB_array, f, dinfo_array, min_batch, queue); magma_int_t *dipiv; magma_int_t **dipiv_array = NULL; TESTING_MALLOC_DEV( dipiv, magma_int_t, f * min_batch ); TESTING_MALLOC_DEV( dipiv_array, magma_int_t*, min_batch ); magma_iset_pointer( dipiv_array, dipiv, 1, 0, 0, f, min_batch, queue ); //info = magma_sgesv_nopiv_batched(f, 1, dA_array, f, dB_array, f, dinfo_array, min_batch, queue); //info = magma_sgesv_batched(f, 1, dA_array, f, dipiv_array, dB_array, f, dinfo_array, min_batch, queue); int *cpu_info = (int*) malloc(min_batch*sizeof(int)); cudacall(hipMemcpy(cpu_info, dinfo_array, min_batch * sizeof(int),hipMemcpyDeviceToHost)); cudacall( hipMemcpy(&thetaT[batch_offset * f], &yTXT[batch_offset * f], batch_size * f * sizeof(float), hipMemcpyDeviceToDevice) ); for(int i = 0; i < min_batch; i++){ if(cpu_info[i] != 0 ){ printf("magma_sposv_batched matrix %d returned internal error %d\n",i, (int)cpu_info[i] ); } } if (info != 0) printf("magma_sposv_batched returned argument error %d: %s.\n", (int) info, magma_strerror( info )); hipFree(dA_array); hipFree(dB_array); hipFree( dinfo_array ); hipFree(dipiv_array); hipFree(dipiv); free(cpu_info); free(hA_array); free(hB_array); //free(x); magma_finalize(); gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); return 0; } #endif __global__ void RMSE(const float * csrVal, const int* cooRowIndex, const int* csrColIndex, const float * __restrict__ thetaT, const float * __restrict__ XT, float * error, const int nnz, const int error_size, const int f) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < nnz) { int row = cooRowIndex[i]; int col = csrColIndex[i]; float e = csrVal[i]; //if(i%1000000==0) printf("row: %d, col: %d, csrVal[%d]: %f.\n", row, col, i, e); for (int k = 0; k < f; k++) { #ifdef SURPASS_NAN //a and b could be; there are user/item in testing but not training set float a = __ldg(&thetaT[f * col + k]); float b = __ldg(&XT[f * row + k]); if(isnan(a)||isnan(b)) break; else e -= a * b; //if(isnan(a)) printf("row: %d, col: %d\n", row, col); //if(isnan(b)) printf("b[%d]: %f.\n", i, b); #else e -= __ldg(&thetaT[f * col + k]) * __ldg(&XT[f * row + k]); #endif } atomicAdd(&error[i%error_size], e*e); //if(i%1000000==0) printf("error[%d]: %f.\n", i, e); } } //using fp16 as thetaT's format //using fp16 in computate seems causing register pressure since half intrinsics cannot be used. //using fp16 in compute also does not converge. not sure if the code is incorrect, or ALS cannot tolerate half-precision __global__ void __launch_bounds__(64, 6) get_hermitian100WithHalf(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const half* __restrict__ thetaT_fp16) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ //float2 theta; //copy texture --> smem, and sync //two layers: warp divergence unless we split at 32 //require: 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); thetaTemp[index * F/2 + k/2] = __half22float2(theta_half2); //theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k])); //theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1])); //thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); thetaTemp[index * F/2 + k/2 + 25] = __half22float2(theta_half2); //theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50])); //theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51])); //thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); if(threadIdx.x < 55 ){ //copy output to gmem int index = blockIdx.x*F*F; fill_lower_half_from_registers(); //symmetric if(tile_x!=tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } __global__ void __launch_bounds__(64, 6) get_hermitian100(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync /* if(threadIdx.x < SCAN_BATCH){ if(iter*SCAN_BATCH + threadIdx.x < end - start){ for (int k = 0; k < F; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1); thetaTemp[threadIdx.x * F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float)); } */ //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } /* //issue: not coalesced access to csrColIndex if(threadIdx.x < F && threadIdx.x%2 == 0){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1); thetaTemp[k * F/2 + threadIdx.x/2] = theta; } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float)); } } */ /* int layers = blockDim.x/SCAN_BATCH; //100/30 = 3 //int height = blockDim.x/layers; //30 int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable //min(y, (layers-1)) * height int y_start = y * 30;//0-29:0;30-59:30;60-89:60 int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90 if(y >= layers - 1) y_end = F; //60-89:100 if(threadIdx.x - y_start < SCAN_BATCH){ if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){ for (int k = y_start; k < y_end; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1); thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float)); } */ __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); #ifdef DEBUG //if(threadIdx.x==0) // printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); #endif if(threadIdx.x < 55 ){ //copy output to gmem int index = blockIdx.x*F*F; fill_lower_half_from_registers(); //symmetric if(tile_x!=tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } /*a generic kernel to get the hermitian matrices * as the left-hand side of the equations, to update X in ALS *examplary F = 100, T = 10 */ __global__ void get_hermitianT10(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT) { extern __shared__ float2 thetaTemp []; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int N = F/T10; // N = 100/10=10; for F = 100 and T = 10 int effective_block_size = N*(N+1)/2; //get the x and y coordinate int tile_x = 0; int tile_y = 0; for ( int i = 0; i < N; i++ ) { int end = ((2*N-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * T10; tile_y = (N + threadIdx.x - end) * T10; break; } } int index = blockIdx.x*F*F; //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ //phase 1 in iteration: gmem --> smem //REQ: blockDim.x >= F/2 if(threadIdx.x < F/2){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ float2 theta; theta.x = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]); theta.y = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x+1]); thetaTemp[k * F/2 + threadIdx.x] = theta; //this simpler statement is slower. //thetaTemp[k * F/2 + threadIdx.x] = __ldg((float2*)&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]); } //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x], 0, 2*sizeof(float)); } } __syncthreads(); //phase 2 in iteration: smem --> register if(threadIdx.x < effective_block_size){//this redundant "if" seems improving kernel performance for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); //phase 3, after iteration: register --> gmem if(threadIdx.x < effective_block_size){ fill_lower_half_from_registers(); //symmetric if(tile_x != tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < T10; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } float doALS(const int* csrRowIndexHostPtr, const int* csrColIndexHostPtr, const float* csrValHostPtr, const int* cscRowIndexHostPtr, const int* cscColIndexHostPtr, const float* cscValHostPtr, const int* cooRowIndexHostPtr, float* thetaTHost, float* XTHost, const int * cooRowIndexTestHostPtr, const int * cooColIndexTestHostPtr, const float * cooValHostTestPtr, const int m, const int n, const int f, const long nnz, const long nnz_test, const float lambda, const int ITERS, const int X_BATCH, const int THETA_BATCH, const int DEVICEID) { hipSetDevice(DEVICEID); printf("*******parameters: m: %d, n: %d, f: %d, nnz: %ld \n", m, n, f, nnz); //device pointers int * csrRowIndex = 0; int * csrColIndex = 0; float * csrVal = 0; float * thetaT = 0; float * tt = 0; float * XT = 0; float * cscVal =0; int * cscRowIndex = 0; int * cscColIndex = 0; //coo to calculate RMSE int * cooRowIndex =0; float * cooVal_test; int * cooRowIndex_test; int * cooColIndex_test; float final_rmse = 0; printf("*******start allocating memory on GPU...\n"); cudacall(hipMalloc((void** ) &cscRowIndex,nnz * sizeof(cscRowIndex[0]))); cudacall(hipMalloc((void** ) &cscColIndex, (n+1) * sizeof(cscColIndex[0]))); cudacall(hipMalloc((void** ) &cscVal, nnz * sizeof(cscVal[0]))); //dimension: F*N cudacall(hipMalloc((void** ) &thetaT, f * n * sizeof(thetaT[0]))); //dimension: M*F cudacall(hipMalloc((void** ) &XT, f * m * sizeof(XT[0]))); printf("*******start copying memory to GPU...\n"); cudacall(hipMemcpy(cscRowIndex, cscRowIndexHostPtr,(size_t ) nnz * sizeof(cscRowIndex[0]), hipMemcpyHostToDevice)); cudacall(hipMemcpy(cscColIndex, cscColIndexHostPtr,(size_t ) (n+1) * sizeof(cscColIndex[0]), hipMemcpyHostToDevice)); cudacall(hipMemcpy(cscVal, cscValHostPtr,(size_t ) (nnz * sizeof(cscVal[0])),hipMemcpyHostToDevice)); cudacall(hipMemcpy(thetaT, thetaTHost, (size_t ) (n * f * sizeof(thetaT[0])), hipMemcpyHostToDevice)); //CG needs XT cudacall(hipMemcpy(XT, XTHost, (size_t ) (m * f * sizeof(XT[0])), hipMemcpyHostToDevice)); cudacall(hipDeviceSetCacheConfig(hipFuncCachePreferShared)); //64-bit smem access //http://acceleware.com/blog/maximizing-shared-memory-bandwidth-nvidia-kepler-gpus hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); //initialize cublas, cusparse hipblasHandle_t handle; cublascall(hipblasCreate(&handle)); hipsparseHandle_t cushandle = 0; cusparsecall(hipsparseCreate(&cushandle)); hipsparseMatDescr_t descr; cusparsecall( hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); using namespace std; #ifdef DEBUG //variable used to time double t0 = 0; double t1 = 0; #endif printf("*******start iterations...\n"); for(int iter = 0; iter < ITERS ; iter ++){ #ifdef DEBUG printf("---------------------------ALS iteration %d, update X.----------------------------------\n", iter); t0 = seconds(); t1 = seconds(); #endif //copy csr matrix in cudacall(hipMalloc((void** ) &csrRowIndex,(m + 1) * sizeof(csrRowIndex[0]))); cudacall(hipMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0]))); cudacall(hipMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0]))); cudacall(hipMemcpy(csrRowIndex, csrRowIndexHostPtr,(size_t ) ((m + 1) * sizeof(csrRowIndex[0])), hipMemcpyHostToDevice)); cudacall(hipMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), hipMemcpyHostToDevice)); cudacall(hipMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),hipMemcpyHostToDevice)); #ifdef DEBUG printf("\tgenerate: Y*theta using cusparse.\n"); #endif float * ytheta = 0; float * ythetaT = 0; cudacall(hipMalloc((void** ) &ytheta, f * m * sizeof(ytheta[0]))); cudacall(hipMalloc((void** ) &ythetaT, f * m * sizeof(ythetaT[0]))); const float alpha = 1.0f; const float beta = 0.0f; cusparsecall (hipsparseScsrmm2(cushandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, m, f, n, nnz, &alpha, descr, csrVal, csrRowIndex, csrColIndex, thetaT, f, &beta, ytheta, m) ); //hipDeviceSynchronize(); //printf("*******transpose ytheta use cublas.\n"); //ytheta: m*f; need ythetaT = (ytheta).T = f*m cublascall(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, f, m, &alpha, (const float * ) ytheta, m, &beta, ythetaT, f, ythetaT, f)); //hipDeviceSynchronize(); //cudaCheckError(); cudacall(hipFree(ytheta)); cudacall(hipFree(csrVal)); #ifdef DEBUG printf("\tgenerate: Y*theta run %f seconds.\n", seconds() - t1); #endif int block_dim = f/T10*(f/T10+1)/2; if (block_dim < f/2) block_dim = f/2; for(int batch_id = 0; batch_id< X_BATCH; batch_id ++){ #ifdef DEBUG printf("*******batch %d / %d.*******\n", batch_id, X_BATCH); #endif int batch_size = 0; if(batch_id != X_BATCH - 1) batch_size = m/X_BATCH; else batch_size = m - batch_id*(m/X_BATCH); int batch_offset = batch_id * (m/X_BATCH); cudacall(hipMalloc((void** ) &tt, f * f * batch_size * sizeof(float))); #ifdef DEBUG t1 = seconds(); printf("\tupdateXByBlock kernel.\n"); #endif if(f == 100){ //do not use fp16 by default #ifdef CUMF_USE_HALF half* thetaT_fp16 = 0; cudacall(hipMalloc((void** ) &thetaT_fp16, f * n * sizeof(thetaT_fp16[0]))); hipLaunchKernelGGL(( fp32Array2fp16Array), dim3((n*f-1)/1024 + 1), dim3(1024), 0, 0, thetaT, thetaT_fp16, f*n); hipLaunchKernelGGL(( get_hermitian100WithHalf), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0, batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT_fp16); cudacall(hipFree(thetaT_fp16)); #else hipLaunchKernelGGL(( get_hermitian100), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0, batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT); //This commented out is the fused kernel //performance not good due to register pressure and low occupancy //alsUpdateFeature100Host // (batch_offset, csrRowIndex, csrColIndex, lambda, m, f, thetaT, XT, ythetaT, 6); #endif } else hipLaunchKernelGGL(( get_hermitianT10), dim3(batch_size), dim3(block_dim), SCAN_BATCH * f/2*sizeof(float2), 0, batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT); hipDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("\tupdate X kernel run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t1, batch_size, f); t1 = seconds(); #endif #ifdef USE_CG //cg_iter = als_iter: solve more carefully in later ALS iterations updateXWithCGHost(tt, &XT[batch_offset*f], &ythetaT[batch_offset*f], batch_size, f, 6); #else //host pointers for cublas batch operations float ** devPtrTTHost = 0; cudacall(hipHostMalloc( (void** ) &devPtrTTHost, batch_size * sizeof(*devPtrTTHost) ) ); float **devPtrYthetaTHost = 0; cudacall(hipHostMalloc( (void** ) &devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaTHost) ) ); updateX(batch_size, batch_offset, ythetaT, tt, XT, handle, m, n, f, nnz, devPtrTTHost, devPtrYthetaTHost); cudacall(hipHostFree(devPtrTTHost)); cudacall(hipHostFree(devPtrYthetaTHost)); #endif #ifdef DEBUG printf("\tinvoke updateX with batch_size: %d, batch_offset: %d..\n", batch_size, batch_offset); printf("\tupdateX solver run seconds: %f \n", seconds() - t1); #endif cudacall(hipFree(tt)); } #ifdef DEBUG printf("ALS update X run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t0, m, f); #endif cudacall(hipFree(csrRowIndex)); cudacall(hipFree(csrColIndex)); cudacall(hipFree(ythetaT)); ///* #ifdef DEBUG t0 = seconds(); t1 = seconds(); printf("---------------------------------- ALS iteration %d, update theta ----------------------------------\n", iter); printf("\tgenerate: Y'*X using cusparse.\n"); #endif float * yTX = 0; float * yTXT = 0; cudacall(hipMalloc((void** ) &yTXT, f * n * sizeof(yTXT[0]))); cudacall(hipMalloc((void** ) &yTX, n * f * sizeof(yTX[0]))); cusparsecall( hipsparseScsrmm2(cushandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, n, f, m, nnz, &alpha, descr, cscVal, cscColIndex, cscRowIndex, XT, f, &beta, yTX, n) ); //hipDeviceSynchronize(); //printf("*******transpose yTX \n"); //yTX: n*f; need yTXT = (yTX).T = f*n cublascall(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, f, n, &alpha, (const float * ) yTX, n, &beta, yTXT, f, yTXT, f)); hipDeviceSynchronize(); cudacall(hipFree(yTX)); #ifdef DEBUG printf("\tgenerate: Y'*X run %f seconds.\n", seconds() - t1); #endif //in batches, when N is huge for(int batch_id = 0; batch_id< THETA_BATCH; batch_id ++){ #ifdef DEBUG printf("*******batch %d / %d.*******\n", batch_id, THETA_BATCH); #endif int batch_size = 0; if(batch_id != THETA_BATCH - 1) batch_size = n/THETA_BATCH; else batch_size = n - batch_id*(n/THETA_BATCH); int batch_offset = batch_id * (n/THETA_BATCH); float * xx = 0; cudacall(hipMalloc((void** ) &xx, f * f * batch_size * sizeof(xx[0]))); cudacall( hipMemset(xx, 0, f*f*batch_size*sizeof(float)) ); #ifdef DEBUG t1 = seconds(); printf("\tupdateThetaByBlock kernel.\n"); #endif //get_hermitian_theta<<<batch_size, 64>>>(batch_offset, xx, cscRowIndex, cscColIndex, lambda, n); //updateThetaByBlock2pRegDsmemTile<<<batch_size, F>>> if(f == 100){ #ifdef CUMF_USE_HALF half * XT_fp16 = 0; cudacall(hipMalloc((void** ) &XT_fp16, f * m * sizeof(XT_fp16[0]))); hipLaunchKernelGGL(( fp32Array2fp16Array), dim3((n*f-1)/1024 + 1), dim3(1024), 0, 0, XT, XT_fp16, f*m); hipLaunchKernelGGL(( get_hermitian100WithHalf), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0, batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT_fp16); cudacall(hipFree(XT_fp16)); #else hipLaunchKernelGGL(( get_hermitian100), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0, batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT); #endif } else hipLaunchKernelGGL(( get_hermitianT10), dim3(batch_size), dim3(block_dim), SCAN_BATCH*f*sizeof(float), 0, batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT); hipDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("\tupdate Theta kernel run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t1, batch_size, f); t1 = seconds(); #endif #ifdef DEBUG printf("*******invoke updateTheta with batch_size: %d, batch_offset: %d.\n", batch_size, batch_offset); #endif #ifdef USE_CG updateXWithCGHost(xx, &thetaT[batch_offset*f], &yTXT[batch_offset*f], batch_size, f, 6); #else float ** devPtrXXHost = 0; cudacall(hipHostMalloc( (void** ) &devPtrXXHost, batch_size * sizeof(*devPtrXXHost) ) ); float **devPtrYTXTHost = 0; cudacall(hipHostMalloc( (void** ) &devPtrYTXTHost, batch_size * sizeof(*devPtrYTXTHost) ) ); updateTheta(batch_size, batch_offset, xx, yTXT, thetaT, handle, m, n, f, nnz, devPtrXXHost, devPtrYTXTHost); cudacall(hipHostFree(devPtrXXHost)); cudacall(hipHostFree(devPtrYTXTHost)); #endif #ifdef DEBUG printf("\tupdateTheta solver run seconds: %f \n", seconds() - t1); #endif cudacall(hipFree(xx)); } cudacall(hipFree(yTXT)); #ifdef DEBUG printf("update theta run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t0, n, f); printf("Calculate RMSE.\n"); #endif float * errors_train = 0; int error_size = 1000; cudacall(hipMalloc((void** ) &errors_train, error_size * sizeof(errors_train[0]))); cudacall( hipMemset(errors_train, 0, error_size*sizeof(float)) ); cudacall(hipMalloc((void** ) &cooRowIndex, nnz * sizeof(cooRowIndex[0]))); cudacall(hipMemcpy(cooRowIndex, cooRowIndexHostPtr,(size_t ) (nnz * sizeof(cooRowIndex[0])), hipMemcpyHostToDevice)); cudacall(hipMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0]))); cudacall(hipMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0]))); cudacall(hipMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), hipMemcpyHostToDevice)); cudacall(hipMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( RMSE), dim3((nnz-1)/256 + 1), dim3(256), 0, 0, csrVal, cooRowIndex, csrColIndex, thetaT, XT, errors_train, nnz, error_size, f); hipDeviceSynchronize(); cudaCheckError(); cudacall(hipFree(cooRowIndex)); cudacall(hipFree(csrColIndex)); cudacall(hipFree(csrVal)); float* rmse_train = (float*) malloc (sizeof(float)); cublascall( hipblasSasum(handle, error_size, errors_train, 1, rmse_train) ); hipDeviceSynchronize(); printf("--------- Train RMSE in iter %d: %f\n", iter, sqrt((*rmse_train)/nnz)); cudacall(hipFree(errors_train)); float * errors_test = 0; cudacall(hipMalloc((void** ) &errors_test, error_size * sizeof(errors_test[0]))); cudacall( hipMemset(errors_test, 0, error_size*sizeof(float)) ); cudacall(hipMalloc((void** ) &cooRowIndex_test, nnz_test * sizeof(cooRowIndex_test[0]))); cudacall(hipMemcpy(cooRowIndex_test, cooRowIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooRowIndex_test[0])), hipMemcpyHostToDevice)); cudacall(hipMalloc((void** ) &cooColIndex_test, nnz_test * sizeof(cooColIndex_test[0]))); cudacall(hipMalloc((void** ) &cooVal_test, nnz_test * sizeof(cooVal_test[0]))); cudacall(hipMemcpy(cooColIndex_test, cooColIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooColIndex_test[0])), hipMemcpyHostToDevice)); cudacall(hipMemcpy(cooVal_test, cooValHostTestPtr,(size_t ) (nnz_test * sizeof(cooVal_test[0])),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( RMSE), dim3((nnz_test-1)/256), dim3(256), 0, 0, cooVal_test, cooRowIndex_test, cooColIndex_test, thetaT, XT, errors_test, nnz_test, error_size, f); hipDeviceSynchronize(); cudaCheckError(); cudacall(hipFree(cooRowIndex_test)); cudacall(hipFree(cooColIndex_test)); cudacall(hipFree(cooVal_test)); float* rmse_test = (float*) malloc (sizeof(float)); cublascall( hipblasSasum(handle, error_size, errors_test, 1, rmse_test) ); hipDeviceSynchronize(); final_rmse = sqrt((*rmse_test)/nnz_test); printf("--------- Test RMSE in iter %d: %f\n", iter, final_rmse); cudacall(hipFree(errors_test)); //*/ } //copy feature vectors back to host cudacall(hipMemcpy(thetaTHost, thetaT, (size_t ) (n * f * sizeof(thetaT[0])), hipMemcpyDeviceToHost)); cudacall(hipMemcpy(XTHost, XT, (size_t ) (m * f * sizeof(XT[0])), hipMemcpyDeviceToHost)); cudacall(hipFree(thetaT)); cudacall(hipFree(XT)); cudacall(hipFree(cscVal)); cudacall(hipFree(cscColIndex)); cudacall(hipFree(cscRowIndex)); //WARN: do not call hipDeviceReset inside ALS() //because the caller needs to access XT and thetaT which was in the same context //cudacall(hipDeviceReset()); return final_rmse; }
c2fc71a547bfa82b65b521ee3a6c511f04455ac7.cu
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * als.cu * * Created on: Feb 10, 2015 * Author: Wei Tan (wtan@us.ibm.com) * Alternating Least Square for Matrix Factorization on CUDA 7.0+ * Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2 */ //do not use magma and fp16 by default //#define CUMF_USE_MAGMA //#define CUMF_USE_HALF //#define SURPASS_NAN #include "als.h" #include "device_utilities.h" #include "cg.h" #include "host_utilities.h" #include <fstream> #include <assert.h> #include <cuda_fp16.h> #define USE_CG #ifdef CUMF_USE_HALF #define SCAN_BATCH 24 #else #define SCAN_BATCH 24 #endif #ifdef CUMF_USE_MAGMA #include "flops.h" #include "magma.h" #include "magma_lapack.h" #include "testings.h" #endif int updateX(const int batch_size, const int batch_offset, float * ythetaT, float * tt, float * XT, cublasHandle_t handle, const int m, const int n, const int f, const int nnz, float** devPtrTTHost, float **devPtrYthetaTHost){ #ifdef DEBUG float elapsed; struct timeval tv0, tv1, tv2; gettimeofday(&tv0, NULL); printf("*******Batch LU factorization of tt.\n"); #endif //pointers needed by batch op float **devPtrTT = 0; int *INFO; for (int k = 0; k < batch_size; k++) { devPtrTTHost[k] = &tt[k * f * f]; } cudacall(cudaMalloc((void** ) &devPtrTT, batch_size * sizeof(*devPtrTT))); cudacall(cudaMemcpy(devPtrTT, devPtrTTHost, batch_size * sizeof(*devPtrTT),cudaMemcpyHostToDevice)); //cudacall( cudaMalloc(&P, f * batch_size * sizeof(int)) ); cudacall( cudaMalloc(&INFO, batch_size * sizeof(int) )); cublascall(cublasSgetrfBatched(handle, f, devPtrTT, f, NULL, INFO, batch_size)); cudaThreadSynchronize(); #ifdef DEBUG gettimeofday(&tv1, NULL); elapsed = (tv1.tv_sec - tv0.tv_sec) + (tv1.tv_usec - tv0.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); printf("*******solve: tt * XT = ythetaT use cublas, with LU decomposition.\n"); #endif float **devPtrYthetaT = 0; for (int k = 0; k < batch_size; k++) { devPtrYthetaTHost[k] = &ythetaT[batch_offset * f + k * f]; } cudacall(cudaMalloc((void** ) &devPtrYthetaT, batch_size * sizeof(*devPtrYthetaT))); cudacall(cudaMemcpy(devPtrYthetaT, devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaT), cudaMemcpyHostToDevice)); int * info2 = (int *) malloc(sizeof(int)); cublascall( cublasSgetrsBatched(handle, CUBLAS_OP_N, f, 1, (const float ** ) devPtrTT, f, NULL, devPtrYthetaT, f, info2, batch_size) ); cudaThreadSynchronize(); cudaError_t cudaStat1 = cudaGetLastError(); if (cudaStat1 != cudaSuccess) { fprintf(stderr,"Failed to launch cublasSgetrsBatched (error code: %s)!\n", cudaGetErrorString(cudaStat1)); exit(EXIT_FAILURE); } cudacall( cudaMemcpy(&XT[batch_offset * f], &ythetaT[batch_offset * f], batch_size * f * sizeof(float), cudaMemcpyDeviceToDevice) ); #ifdef DEBUG gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); #endif cudacall(cudaFree(devPtrTT)); //cudacall(cudaFree(P)); cudacall(cudaFree(INFO)); cudacall(cudaFree(devPtrYthetaT)); return 0; } int updateTheta(const int batch_size, const int batch_offset, float * xx, float * yTXT, float * thetaT, cublasHandle_t handle, const int m, const int n, const int f, const int nnz, float ** devPtrXXHost, float **devPtrYTXTHost ){ #ifdef DEBUG float elapsed; struct timeval tv0, tv1, tv2; gettimeofday(&tv0, NULL); printf("*******LU factorize xx.\n"); #endif float **devPtrXX = 0; for (int k = 0; k < batch_size; k++) { devPtrXXHost[k] = &xx[k * f * f]; } cudacall(cudaMalloc((void** ) &devPtrXX, batch_size * sizeof(*devPtrXX))); cudacall(cudaMemcpy(devPtrXX, devPtrXXHost, batch_size * sizeof(*devPtrXX), cudaMemcpyHostToDevice)); int *INFO; //cudacall(cudaMalloc(&P, f * batch_size * sizeof(int))); cudacall(cudaMalloc(&INFO, batch_size * sizeof(int))); cublascall(cublasSgetrfBatched(handle, f, devPtrXX, f, NULL, INFO, batch_size)); cudaThreadSynchronize(); #ifdef DEBUG gettimeofday(&tv1, NULL); elapsed = (tv1.tv_sec - tv0.tv_sec) + (tv1.tv_usec - tv0.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); printf("******* solve xx * thetaT = yTXT with CUDA 7.\n"); #endif float **devPtrYTXT = 0; for (int k = 0; k < batch_size; k++) { devPtrYTXTHost[k] = &yTXT[batch_offset * f + k * f]; } cudacall(cudaMalloc((void** ) &devPtrYTXT, batch_size * sizeof(*devPtrYTXT))); cudacall(cudaMemcpy(devPtrYTXT, devPtrYTXTHost, batch_size * sizeof(*devPtrYTXT),cudaMemcpyHostToDevice)); int * info2 = (int *) malloc(sizeof(int)); cublascall( cublasSgetrsBatched(handle, CUBLAS_OP_N, f, 1, (const float ** ) devPtrXX, f, NULL, devPtrYTXT, f, info2, batch_size) ); cudaThreadSynchronize(); cudaError_t cudaStat1 = cudaGetLastError(); if (cudaStat1 != cudaSuccess) { fprintf(stderr,"Failed to launch cublasSgetrsBatched (error code: %s)!\n", cudaGetErrorString(cudaStat1)); exit(EXIT_FAILURE); } cudacall( cudaMemcpy( &thetaT[batch_offset * f], &yTXT[batch_offset * f], batch_size * f * sizeof(float), cudaMemcpyDeviceToDevice) ); #ifdef DEBUG gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); #endif cudaFree(devPtrXX); cudaFree(INFO); free(info2); cudaFree(devPtrYTXT); return 0; } #ifdef USE_MAGMA int updateThetaMagma(const int batch_size, const int batch_offset, float * xx, float * yTXT, float * thetaT, cublasHandle_t handle, const int m, const int n, const int f, const int nnz, float ** devPtrXXHost, float **devPtrYTXTHost ){ //variables for timing float elapsed; struct timeval tv1, tv2; gettimeofday(&tv1, NULL); printf("*******magma Cholesky factorization.\n"); magma_init(); magma_opts opts( MagmaOptsBatched ); char *parray[10]; char **x; x = &parray[0]; opts.parse_opts(1,x); magma_queue_t queue = opts.queue; int min_batch = batch_size; int info = 0; int * dinfo_array = 0; float **dA_array = NULL; float **dB_array = NULL; float **hA_array = (float**) malloc(min_batch * sizeof(hA_array[0])); float **hB_array = (float**) malloc(min_batch * sizeof(hB_array[0])); cudacall (cudaMalloc((void**) &dinfo_array, min_batch*sizeof(int))); cudacall(cudaMalloc((void** ) &dA_array, min_batch * sizeof(*dA_array))); cudacall(cudaMalloc((void** ) &dB_array, min_batch * sizeof(*dB_array))); for (int k = 0; k < batch_size; k++) { hA_array[k] = &xx[k * f * f]; hB_array[k] = &yTXT[batch_offset * f + k * f]; } cudacall(cudaMemcpy(dA_array, hA_array, min_batch * sizeof(*dA_array), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(dB_array, hB_array, min_batch * sizeof(*dB_array), cudaMemcpyHostToDevice)); info = magma_sposv_batched(MagmaLower, f, 1, dA_array, f, dB_array, f, dinfo_array, min_batch, queue); magma_int_t *dipiv; magma_int_t **dipiv_array = NULL; TESTING_MALLOC_DEV( dipiv, magma_int_t, f * min_batch ); TESTING_MALLOC_DEV( dipiv_array, magma_int_t*, min_batch ); magma_iset_pointer( dipiv_array, dipiv, 1, 0, 0, f, min_batch, queue ); //info = magma_sgesv_nopiv_batched(f, 1, dA_array, f, dB_array, f, dinfo_array, min_batch, queue); //info = magma_sgesv_batched(f, 1, dA_array, f, dipiv_array, dB_array, f, dinfo_array, min_batch, queue); int *cpu_info = (int*) malloc(min_batch*sizeof(int)); cudacall(cudaMemcpy(cpu_info, dinfo_array, min_batch * sizeof(int),cudaMemcpyDeviceToHost)); cudacall( cudaMemcpy(&thetaT[batch_offset * f], &yTXT[batch_offset * f], batch_size * f * sizeof(float), cudaMemcpyDeviceToDevice) ); for(int i = 0; i < min_batch; i++){ if(cpu_info[i] != 0 ){ printf("magma_sposv_batched matrix %d returned internal error %d\n",i, (int)cpu_info[i] ); } } if (info != 0) printf("magma_sposv_batched returned argument error %d: %s.\n", (int) info, magma_strerror( info )); cudaFree(dA_array); cudaFree(dB_array); cudaFree( dinfo_array ); cudaFree(dipiv_array); cudaFree(dipiv); free(cpu_info); free(hA_array); free(hB_array); //free(x); magma_finalize(); gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); return 0; } #endif __global__ void RMSE(const float * csrVal, const int* cooRowIndex, const int* csrColIndex, const float * __restrict__ thetaT, const float * __restrict__ XT, float * error, const int nnz, const int error_size, const int f) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < nnz) { int row = cooRowIndex[i]; int col = csrColIndex[i]; float e = csrVal[i]; //if(i%1000000==0) printf("row: %d, col: %d, csrVal[%d]: %f.\n", row, col, i, e); for (int k = 0; k < f; k++) { #ifdef SURPASS_NAN //a and b could be; there are user/item in testing but not training set float a = __ldg(&thetaT[f * col + k]); float b = __ldg(&XT[f * row + k]); if(isnan(a)||isnan(b)) break; else e -= a * b; //if(isnan(a)) printf("row: %d, col: %d\n", row, col); //if(isnan(b)) printf("b[%d]: %f.\n", i, b); #else e -= __ldg(&thetaT[f * col + k]) * __ldg(&XT[f * row + k]); #endif } atomicAdd(&error[i%error_size], e*e); //if(i%1000000==0) printf("error[%d]: %f.\n", i, e); } } //using fp16 as thetaT's format //using fp16 in computate seems causing register pressure since half intrinsics cannot be used. //using fp16 in compute also does not converge. not sure if the code is incorrect, or ALS cannot tolerate half-precision __global__ void __launch_bounds__(64, 6) get_hermitian100WithHalf(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const half* __restrict__ thetaT_fp16) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ //float2 theta; //copy texture --> smem, and sync //two layers: warp divergence unless we split at 32 //require: 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); thetaTemp[index * F/2 + k/2] = __half22float2(theta_half2); //theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k])); //theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1])); //thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); thetaTemp[index * F/2 + k/2 + 25] = __half22float2(theta_half2); //theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50])); //theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51])); //thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); if(threadIdx.x < 55 ){ //copy output to gmem int index = blockIdx.x*F*F; fill_lower_half_from_registers(); //symmetric if(tile_x!=tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } __global__ void __launch_bounds__(64, 6) get_hermitian100(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync /* if(threadIdx.x < SCAN_BATCH){ if(iter*SCAN_BATCH + threadIdx.x < end - start){ for (int k = 0; k < F; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1); thetaTemp[threadIdx.x * F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float)); } */ //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } /* //issue: not coalesced access to csrColIndex if(threadIdx.x < F && threadIdx.x%2 == 0){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1); thetaTemp[k * F/2 + threadIdx.x/2] = theta; } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float)); } } */ /* int layers = blockDim.x/SCAN_BATCH; //100/30 = 3 //int height = blockDim.x/layers; //30 int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable //min(y, (layers-1)) * height int y_start = y * 30;//0-29:0;30-59:30;60-89:60 int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90 if(y >= layers - 1) y_end = F; //60-89:100 if(threadIdx.x - y_start < SCAN_BATCH){ if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){ for (int k = y_start; k < y_end; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1); thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float)); } */ __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); #ifdef DEBUG //if(threadIdx.x==0) // printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); #endif if(threadIdx.x < 55 ){ //copy output to gmem int index = blockIdx.x*F*F; fill_lower_half_from_registers(); //symmetric if(tile_x!=tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } /*a generic kernel to get the hermitian matrices * as the left-hand side of the equations, to update X in ALS *examplary F = 100, T = 10 */ __global__ void get_hermitianT10(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT) { extern __shared__ float2 thetaTemp []; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int N = F/T10; // N = 100/10=10; for F = 100 and T = 10 int effective_block_size = N*(N+1)/2; //get the x and y coordinate int tile_x = 0; int tile_y = 0; for ( int i = 0; i < N; i++ ) { int end = ((2*N-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * T10; tile_y = (N + threadIdx.x - end) * T10; break; } } int index = blockIdx.x*F*F; //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ //phase 1 in iteration: gmem --> smem //REQ: blockDim.x >= F/2 if(threadIdx.x < F/2){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ float2 theta; theta.x = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]); theta.y = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x+1]); thetaTemp[k * F/2 + threadIdx.x] = theta; //this simpler statement is slower. //thetaTemp[k * F/2 + threadIdx.x] = __ldg((float2*)&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]); } //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x], 0, 2*sizeof(float)); } } __syncthreads(); //phase 2 in iteration: smem --> register if(threadIdx.x < effective_block_size){//this redundant "if" seems improving kernel performance for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); //phase 3, after iteration: register --> gmem if(threadIdx.x < effective_block_size){ fill_lower_half_from_registers(); //symmetric if(tile_x != tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < T10; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } float doALS(const int* csrRowIndexHostPtr, const int* csrColIndexHostPtr, const float* csrValHostPtr, const int* cscRowIndexHostPtr, const int* cscColIndexHostPtr, const float* cscValHostPtr, const int* cooRowIndexHostPtr, float* thetaTHost, float* XTHost, const int * cooRowIndexTestHostPtr, const int * cooColIndexTestHostPtr, const float * cooValHostTestPtr, const int m, const int n, const int f, const long nnz, const long nnz_test, const float lambda, const int ITERS, const int X_BATCH, const int THETA_BATCH, const int DEVICEID) { cudaSetDevice(DEVICEID); printf("*******parameters: m: %d, n: %d, f: %d, nnz: %ld \n", m, n, f, nnz); //device pointers int * csrRowIndex = 0; int * csrColIndex = 0; float * csrVal = 0; float * thetaT = 0; float * tt = 0; float * XT = 0; float * cscVal =0; int * cscRowIndex = 0; int * cscColIndex = 0; //coo to calculate RMSE int * cooRowIndex =0; float * cooVal_test; int * cooRowIndex_test; int * cooColIndex_test; float final_rmse = 0; printf("*******start allocating memory on GPU...\n"); cudacall(cudaMalloc((void** ) &cscRowIndex,nnz * sizeof(cscRowIndex[0]))); cudacall(cudaMalloc((void** ) &cscColIndex, (n+1) * sizeof(cscColIndex[0]))); cudacall(cudaMalloc((void** ) &cscVal, nnz * sizeof(cscVal[0]))); //dimension: F*N cudacall(cudaMalloc((void** ) &thetaT, f * n * sizeof(thetaT[0]))); //dimension: M*F cudacall(cudaMalloc((void** ) &XT, f * m * sizeof(XT[0]))); printf("*******start copying memory to GPU...\n"); cudacall(cudaMemcpy(cscRowIndex, cscRowIndexHostPtr,(size_t ) nnz * sizeof(cscRowIndex[0]), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(cscColIndex, cscColIndexHostPtr,(size_t ) (n+1) * sizeof(cscColIndex[0]), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(cscVal, cscValHostPtr,(size_t ) (nnz * sizeof(cscVal[0])),cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(thetaT, thetaTHost, (size_t ) (n * f * sizeof(thetaT[0])), cudaMemcpyHostToDevice)); //CG needs XT cudacall(cudaMemcpy(XT, XTHost, (size_t ) (m * f * sizeof(XT[0])), cudaMemcpyHostToDevice)); cudacall(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared)); //64-bit smem access //http://acceleware.com/blog/maximizing-shared-memory-bandwidth-nvidia-kepler-gpus cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); //initialize cublas, cusparse cublasHandle_t handle; cublascall(cublasCreate(&handle)); cusparseHandle_t cushandle = 0; cusparsecall(cusparseCreate(&cushandle)); cusparseMatDescr_t descr; cusparsecall( cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); using namespace std; #ifdef DEBUG //variable used to time double t0 = 0; double t1 = 0; #endif printf("*******start iterations...\n"); for(int iter = 0; iter < ITERS ; iter ++){ #ifdef DEBUG printf("---------------------------ALS iteration %d, update X.----------------------------------\n", iter); t0 = seconds(); t1 = seconds(); #endif //copy csr matrix in cudacall(cudaMalloc((void** ) &csrRowIndex,(m + 1) * sizeof(csrRowIndex[0]))); cudacall(cudaMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0]))); cudacall(cudaMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0]))); cudacall(cudaMemcpy(csrRowIndex, csrRowIndexHostPtr,(size_t ) ((m + 1) * sizeof(csrRowIndex[0])), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),cudaMemcpyHostToDevice)); #ifdef DEBUG printf("\tgenerate: Y*theta using cusparse.\n"); #endif float * ytheta = 0; float * ythetaT = 0; cudacall(cudaMalloc((void** ) &ytheta, f * m * sizeof(ytheta[0]))); cudacall(cudaMalloc((void** ) &ythetaT, f * m * sizeof(ythetaT[0]))); const float alpha = 1.0f; const float beta = 0.0f; cusparsecall (cusparseScsrmm2(cushandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, m, f, n, nnz, &alpha, descr, csrVal, csrRowIndex, csrColIndex, thetaT, f, &beta, ytheta, m) ); //cudaDeviceSynchronize(); //printf("*******transpose ytheta use cublas.\n"); //ytheta: m*f; need ythetaT = (ytheta).T = f*m cublascall(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, f, m, &alpha, (const float * ) ytheta, m, &beta, ythetaT, f, ythetaT, f)); //cudaDeviceSynchronize(); //cudaCheckError(); cudacall(cudaFree(ytheta)); cudacall(cudaFree(csrVal)); #ifdef DEBUG printf("\tgenerate: Y*theta run %f seconds.\n", seconds() - t1); #endif int block_dim = f/T10*(f/T10+1)/2; if (block_dim < f/2) block_dim = f/2; for(int batch_id = 0; batch_id< X_BATCH; batch_id ++){ #ifdef DEBUG printf("*******batch %d / %d.*******\n", batch_id, X_BATCH); #endif int batch_size = 0; if(batch_id != X_BATCH - 1) batch_size = m/X_BATCH; else batch_size = m - batch_id*(m/X_BATCH); int batch_offset = batch_id * (m/X_BATCH); cudacall(cudaMalloc((void** ) &tt, f * f * batch_size * sizeof(float))); #ifdef DEBUG t1 = seconds(); printf("\tupdateXByBlock kernel.\n"); #endif if(f == 100){ //do not use fp16 by default #ifdef CUMF_USE_HALF half* thetaT_fp16 = 0; cudacall(cudaMalloc((void** ) &thetaT_fp16, f * n * sizeof(thetaT_fp16[0]))); fp32Array2fp16Array<<<(n*f-1)/1024 + 1, 1024>>>(thetaT, thetaT_fp16, f*n); get_hermitian100WithHalf<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>> (batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT_fp16); cudacall(cudaFree(thetaT_fp16)); #else get_hermitian100<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>> (batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT); //This commented out is the fused kernel //performance not good due to register pressure and low occupancy //alsUpdateFeature100Host // (batch_offset, csrRowIndex, csrColIndex, lambda, m, f, thetaT, XT, ythetaT, 6); #endif } else get_hermitianT10<<<batch_size, block_dim, SCAN_BATCH * f/2*sizeof(float2)>>> (batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT); cudaDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("\tupdate X kernel run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t1, batch_size, f); t1 = seconds(); #endif #ifdef USE_CG //cg_iter = als_iter: solve more carefully in later ALS iterations updateXWithCGHost(tt, &XT[batch_offset*f], &ythetaT[batch_offset*f], batch_size, f, 6); #else //host pointers for cublas batch operations float ** devPtrTTHost = 0; cudacall(cudaMallocHost( (void** ) &devPtrTTHost, batch_size * sizeof(*devPtrTTHost) ) ); float **devPtrYthetaTHost = 0; cudacall(cudaMallocHost( (void** ) &devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaTHost) ) ); updateX(batch_size, batch_offset, ythetaT, tt, XT, handle, m, n, f, nnz, devPtrTTHost, devPtrYthetaTHost); cudacall(cudaFreeHost(devPtrTTHost)); cudacall(cudaFreeHost(devPtrYthetaTHost)); #endif #ifdef DEBUG printf("\tinvoke updateX with batch_size: %d, batch_offset: %d..\n", batch_size, batch_offset); printf("\tupdateX solver run seconds: %f \n", seconds() - t1); #endif cudacall(cudaFree(tt)); } #ifdef DEBUG printf("ALS update X run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t0, m, f); #endif cudacall(cudaFree(csrRowIndex)); cudacall(cudaFree(csrColIndex)); cudacall(cudaFree(ythetaT)); ///* #ifdef DEBUG t0 = seconds(); t1 = seconds(); printf("---------------------------------- ALS iteration %d, update theta ----------------------------------\n", iter); printf("\tgenerate: Y'*X using cusparse.\n"); #endif float * yTX = 0; float * yTXT = 0; cudacall(cudaMalloc((void** ) &yTXT, f * n * sizeof(yTXT[0]))); cudacall(cudaMalloc((void** ) &yTX, n * f * sizeof(yTX[0]))); cusparsecall( cusparseScsrmm2(cushandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, n, f, m, nnz, &alpha, descr, cscVal, cscColIndex, cscRowIndex, XT, f, &beta, yTX, n) ); //cudaDeviceSynchronize(); //printf("*******transpose yTX \n"); //yTX: n*f; need yTXT = (yTX).T = f*n cublascall(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, f, n, &alpha, (const float * ) yTX, n, &beta, yTXT, f, yTXT, f)); cudaDeviceSynchronize(); cudacall(cudaFree(yTX)); #ifdef DEBUG printf("\tgenerate: Y'*X run %f seconds.\n", seconds() - t1); #endif //in batches, when N is huge for(int batch_id = 0; batch_id< THETA_BATCH; batch_id ++){ #ifdef DEBUG printf("*******batch %d / %d.*******\n", batch_id, THETA_BATCH); #endif int batch_size = 0; if(batch_id != THETA_BATCH - 1) batch_size = n/THETA_BATCH; else batch_size = n - batch_id*(n/THETA_BATCH); int batch_offset = batch_id * (n/THETA_BATCH); float * xx = 0; cudacall(cudaMalloc((void** ) &xx, f * f * batch_size * sizeof(xx[0]))); cudacall( cudaMemset(xx, 0, f*f*batch_size*sizeof(float)) ); #ifdef DEBUG t1 = seconds(); printf("\tupdateThetaByBlock kernel.\n"); #endif //get_hermitian_theta<<<batch_size, 64>>>(batch_offset, xx, cscRowIndex, cscColIndex, lambda, n); //updateThetaByBlock2pRegDsmemTile<<<batch_size, F>>> if(f == 100){ #ifdef CUMF_USE_HALF half * XT_fp16 = 0; cudacall(cudaMalloc((void** ) &XT_fp16, f * m * sizeof(XT_fp16[0]))); fp32Array2fp16Array<<<(n*f-1)/1024 + 1, 1024>>>(XT, XT_fp16, f*m); get_hermitian100WithHalf<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>> (batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT_fp16); cudacall(cudaFree(XT_fp16)); #else get_hermitian100<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>> (batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT); #endif } else get_hermitianT10<<<batch_size, block_dim, SCAN_BATCH*f*sizeof(float)>>> (batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT); cudaDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("\tupdate Theta kernel run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t1, batch_size, f); t1 = seconds(); #endif #ifdef DEBUG printf("*******invoke updateTheta with batch_size: %d, batch_offset: %d.\n", batch_size, batch_offset); #endif #ifdef USE_CG updateXWithCGHost(xx, &thetaT[batch_offset*f], &yTXT[batch_offset*f], batch_size, f, 6); #else float ** devPtrXXHost = 0; cudacall(cudaMallocHost( (void** ) &devPtrXXHost, batch_size * sizeof(*devPtrXXHost) ) ); float **devPtrYTXTHost = 0; cudacall(cudaMallocHost( (void** ) &devPtrYTXTHost, batch_size * sizeof(*devPtrYTXTHost) ) ); updateTheta(batch_size, batch_offset, xx, yTXT, thetaT, handle, m, n, f, nnz, devPtrXXHost, devPtrYTXTHost); cudacall(cudaFreeHost(devPtrXXHost)); cudacall(cudaFreeHost(devPtrYTXTHost)); #endif #ifdef DEBUG printf("\tupdateTheta solver run seconds: %f \n", seconds() - t1); #endif cudacall(cudaFree(xx)); } cudacall(cudaFree(yTXT)); #ifdef DEBUG printf("update theta run %f seconds, gridSize: %d, blockSize %d.\n", seconds() - t0, n, f); printf("Calculate RMSE.\n"); #endif float * errors_train = 0; int error_size = 1000; cudacall(cudaMalloc((void** ) &errors_train, error_size * sizeof(errors_train[0]))); cudacall( cudaMemset(errors_train, 0, error_size*sizeof(float)) ); cudacall(cudaMalloc((void** ) &cooRowIndex, nnz * sizeof(cooRowIndex[0]))); cudacall(cudaMemcpy(cooRowIndex, cooRowIndexHostPtr,(size_t ) (nnz * sizeof(cooRowIndex[0])), cudaMemcpyHostToDevice)); cudacall(cudaMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0]))); cudacall(cudaMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0]))); cudacall(cudaMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),cudaMemcpyHostToDevice)); RMSE<<<(nnz-1)/256 + 1, 256>>> (csrVal, cooRowIndex, csrColIndex, thetaT, XT, errors_train, nnz, error_size, f); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaFree(cooRowIndex)); cudacall(cudaFree(csrColIndex)); cudacall(cudaFree(csrVal)); float* rmse_train = (float*) malloc (sizeof(float)); cublascall( cublasSasum(handle, error_size, errors_train, 1, rmse_train) ); cudaDeviceSynchronize(); printf("--------- Train RMSE in iter %d: %f\n", iter, sqrt((*rmse_train)/nnz)); cudacall(cudaFree(errors_train)); float * errors_test = 0; cudacall(cudaMalloc((void** ) &errors_test, error_size * sizeof(errors_test[0]))); cudacall( cudaMemset(errors_test, 0, error_size*sizeof(float)) ); cudacall(cudaMalloc((void** ) &cooRowIndex_test, nnz_test * sizeof(cooRowIndex_test[0]))); cudacall(cudaMemcpy(cooRowIndex_test, cooRowIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooRowIndex_test[0])), cudaMemcpyHostToDevice)); cudacall(cudaMalloc((void** ) &cooColIndex_test, nnz_test * sizeof(cooColIndex_test[0]))); cudacall(cudaMalloc((void** ) &cooVal_test, nnz_test * sizeof(cooVal_test[0]))); cudacall(cudaMemcpy(cooColIndex_test, cooColIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooColIndex_test[0])), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(cooVal_test, cooValHostTestPtr,(size_t ) (nnz_test * sizeof(cooVal_test[0])),cudaMemcpyHostToDevice)); RMSE<<<(nnz_test-1)/256, 256>>>(cooVal_test, cooRowIndex_test, cooColIndex_test, thetaT, XT, errors_test, nnz_test, error_size, f); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaFree(cooRowIndex_test)); cudacall(cudaFree(cooColIndex_test)); cudacall(cudaFree(cooVal_test)); float* rmse_test = (float*) malloc (sizeof(float)); cublascall( cublasSasum(handle, error_size, errors_test, 1, rmse_test) ); cudaDeviceSynchronize(); final_rmse = sqrt((*rmse_test)/nnz_test); printf("--------- Test RMSE in iter %d: %f\n", iter, final_rmse); cudacall(cudaFree(errors_test)); //*/ } //copy feature vectors back to host cudacall(cudaMemcpy(thetaTHost, thetaT, (size_t ) (n * f * sizeof(thetaT[0])), cudaMemcpyDeviceToHost)); cudacall(cudaMemcpy(XTHost, XT, (size_t ) (m * f * sizeof(XT[0])), cudaMemcpyDeviceToHost)); cudacall(cudaFree(thetaT)); cudacall(cudaFree(XT)); cudacall(cudaFree(cscVal)); cudacall(cudaFree(cscColIndex)); cudacall(cudaFree(cscRowIndex)); //WARN: do not call cudaDeviceReset inside ALS() //because the caller needs to access XT and thetaT which was in the same context //cudacall(cudaDeviceReset()); return final_rmse; }
34e578ce9b19ba3244fa55d3bde879d2c51b095c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void LreluBackward(float* srcDiff, float* dstDiff, float* srcData, int data_size) { int thread_index = threadIdx.x + blockIdx.x * blockDim.x; int num_threads = blockDim.x * gridDim.x; for(int i = 0; i < data_size; i += num_threads) { int index = i + thread_index; if(index < data_size) { dstDiff[index] = srcDiff[index] * ((srcData[index] > 0) + (srcData[index] <= 0) * 0.01); } } }
34e578ce9b19ba3244fa55d3bde879d2c51b095c.cu
#include "includes.h" __global__ void LreluBackward(float* srcDiff, float* dstDiff, float* srcData, int data_size) { int thread_index = threadIdx.x + blockIdx.x * blockDim.x; int num_threads = blockDim.x * gridDim.x; for(int i = 0; i < data_size; i += num_threads) { int index = i + thread_index; if(index < data_size) { dstDiff[index] = srcDiff[index] * ((srcData[index] > 0) + (srcData[index] <= 0) * 0.01); } } }
5f1d451ff989d0d4eee10d790275ade5dcc69a17.hip
// !!! This is a file automatically generated by hipify!!! //jacobi7.cu #include <hip/hip_runtime.h> #include <stdio.h> #include <jacobi7_cuda_shared_double.h> #include <jacobi7_double.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } void initial_data(double *h_A, double *h_B, const int xyz){ // randomly generaed test data srand(time(NULL)); int i = 0; for(; i < xyz; i++) { h_A[i] = 1 + (double)rand() / (double)RAND_MAX; h_B[i] = h_A[i]; } } int main(int argc, char* *argv){ if(argc != 7) { printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TIME STEPS>\n", argv[0]); return 1; } // program parameters trans const int nx = atoi(argv[1]); const int ny = atoi(argv[2]); const int nz = atoi(argv[3]); const int tx = atoi(argv[4]); const int ty = atoi(argv[5]); const int timesteps = atoi(argv[6]); const int xyz = nx * ny * nz; const int xyz_bytes = xyz * sizeof(double); double *h_A, *h_A1; double *h_B, *h_B1; double *d_A; double *d_B; int devId = 0; hipDeviceProp_t prop; checkCuda( hipGetDeviceProperties(&prop, devId)); printf("Device : %s\n", prop.name); checkCuda( hipSetDevice(devId)); // Allocate host buffers checkCuda(hipHostMalloc((void**)&h_A, xyz_bytes)); // host pinned checkCuda(hipHostMalloc((void**)&h_B, xyz_bytes)); // for comparison btw CPU and GPU version checkCuda(hipHostMalloc((void**)&h_A1, xyz_bytes)); checkCuda(hipHostMalloc((void**)&h_B1, xyz_bytes)); // grid data iniatialization // randomly generaed test data srand(time(NULL)); int i = 0; for(; i < xyz; i++) { h_A[i] = 1 + (double)rand() / (double)RAND_MAX; h_A1[i] = h_B1[i] = h_B[i] = h_A[i]; } // A simple comparison of the result int testIndex = 3 + 3*nx+ 3*nx*ny; printf("Iniatialized data[%d]=%f\n", testIndex , h_A[testIndex]); printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]); printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]); const double fac = 6.0/(h_A[0] * h_A[0]); double *tmp; dim3 grid((nx+tx-1)/tx, (ny+ty-1)/ty); dim3 block(tx, ty); printf("grid:(%d, %d)\n", grid.x, grid.y); printf("block:(%d, %d)\n", tx, ty); float ms, ms1; // elapsed time in milliseconds printf("Start computing...\n"); /* set the ratio of cache/shared memory hipFuncCachePreferNone: Default function cache configuration, no preference hipFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache hipFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory checkCuda(hipDeviceSetCacheConfig(hipFuncCachePreferL1));*/ //const int sharedMemSize = (block.x + 2) * (block.y + 2) * sizeof(double); // create events and streams hipEvent_t startEvent, stopEvent, startEvent1, stopEvent1; checkCuda( hipEventCreate(&startEvent) ); checkCuda( hipEventCreate(&stopEvent) ); checkCuda( hipEventCreate(&startEvent1)); checkCuda( hipEventCreate(&stopEvent1)); // timing start include data transfer and memory allocation checkCuda( hipEventRecord(startEvent,0) ); // Allocate device buffers checkCuda(hipMalloc((void**)&d_A, xyz_bytes)); // device checkCuda(hipMalloc((void**)&d_B, xyz_bytes)); double* input = d_A; double* output = d_B; // copy data to device checkCuda( hipMemcpy(d_A, h_A, xyz_bytes, hipMemcpyHostToDevice)); checkCuda( hipMemcpy(d_B, d_A, xyz_bytes, hipMemcpyDeviceToDevice)); // timing start pure gpu computing checkCuda( hipEventRecord(startEvent1, 0)); // Run the GPU kernel for(int t = 0; t < timesteps; t += 1) { hipLaunchKernelGGL(( jacobi3d_7p_shmem_adam_reg), dim3(grid), dim3(block), 0, 0, input, output, nx, ny, nz, fac); // swap input and output tmp = input; input = output; output = tmp; } // timing end pure gpu computing checkCuda( hipEventRecord(stopEvent1, 0)); checkCuda( hipEventSynchronize(stopEvent1)); checkCuda( hipEventElapsedTime(&ms1, startEvent1, stopEvent1)); printf("Time of register version (pure GPU) (ms): %f\n", ms1); double gflop = (xyz * 1e-9) * 7.0 * timesteps; double gflop_per_sec = gflop * 1e3 / ms1; printf("(GPU) %lf GFlop/s\n", gflop_per_sec); double mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms1; printf("(GPU) %lf M updates/s\n", mupdate_per_sec); if(timesteps%2==0) checkCuda( hipMemcpy(h_A, output, xyz_bytes, hipMemcpyDeviceToHost)); else checkCuda( hipMemcpy(h_A, input, xyz_bytes, hipMemcpyDeviceToHost)); checkCuda( hipEventRecord(stopEvent, 0)); checkCuda( hipEventSynchronize(stopEvent)); checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent)); double *gpuResult = h_A; printf("Time of shared memory version (ms): %f\n", ms); printf("(including data transfer and memory allocation in GPU.)\n"); gflop = (xyz * 1e-9) * 7.0 * timesteps; gflop_per_sec = gflop * 1e3 / ms; printf("(GPU) %lf GFlop/s\n", gflop_per_sec); mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms; printf("(GPU) %lf M updates/s\n", mupdate_per_sec); // Run the CPU version //double startTime = rtclock(); double *tmp1; for(int t = 0; t < timesteps; t += 1) { jacobi7(nx, ny, nz, h_A1, h_B1, fac); tmp1 = h_A1; h_A1 = h_B1; h_B1 = tmp1; } double *cpuResult; if ((timesteps%2) == 0) cpuResult = h_B1; else cpuResult = h_A1; /*double endTime = rtclock(); double elapsedTimeC = endTime - startTime; printf("Elapsed Time:%lf\n", elapsedTimeC); flops = xyz * 7.0 * timesteps; gflops = flops / elapsedTimeC / 1e9; printf("(CPU) %lf GFlop/s\n", gflops); */ // compare the results btw CPU and GPU version double errorNorm, refNorm, diff; errorNorm = 0.0; refNorm = 0.0; i = 0; for (; i < xyz; ++i){ diff = cpuResult[i] - gpuResult[i]; errorNorm += diff * diff; refNorm += cpuResult[i] * cpuResult[i]; } errorNorm = sqrt(errorNorm); refNorm = sqrt(refNorm); printf("Error Norm:%lf\n", errorNorm); printf("Ref Norm:%lf\n", refNorm); if(abs(refNorm) < 1e-7) { printf("Correctness, FAILED\n"); } else if((errorNorm / refNorm) > 1e-2) { printf("Correctness, FAILED\n"); } else { printf("Correctness, PASSED\n"); } printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]); printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]); printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]); printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]); printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]); printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]); // cleanup checkCuda( hipEventDestroy(startEvent)); checkCuda( hipEventDestroy(stopEvent)); hipHostFree(h_A); hipHostFree(h_B); hipHostFree(h_A1); hipHostFree(h_B1); hipFree(d_A); hipFree(d_B); return 0; }
5f1d451ff989d0d4eee10d790275ade5dcc69a17.cu
//jacobi7.cu #include <cuda.h> #include <stdio.h> #include <jacobi7_cuda_shared_double.h> #include <jacobi7_double.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } void initial_data(double *h_A, double *h_B, const int xyz){ // randomly generaed test data srand(time(NULL)); int i = 0; for(; i < xyz; i++) { h_A[i] = 1 + (double)rand() / (double)RAND_MAX; h_B[i] = h_A[i]; } } int main(int argc, char* *argv){ if(argc != 7) { printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TIME STEPS>\n", argv[0]); return 1; } // program parameters trans const int nx = atoi(argv[1]); const int ny = atoi(argv[2]); const int nz = atoi(argv[3]); const int tx = atoi(argv[4]); const int ty = atoi(argv[5]); const int timesteps = atoi(argv[6]); const int xyz = nx * ny * nz; const int xyz_bytes = xyz * sizeof(double); double *h_A, *h_A1; double *h_B, *h_B1; double *d_A; double *d_B; int devId = 0; cudaDeviceProp prop; checkCuda( cudaGetDeviceProperties(&prop, devId)); printf("Device : %s\n", prop.name); checkCuda( cudaSetDevice(devId)); // Allocate host buffers checkCuda(cudaMallocHost((void**)&h_A, xyz_bytes)); // host pinned checkCuda(cudaMallocHost((void**)&h_B, xyz_bytes)); // for comparison btw CPU and GPU version checkCuda(cudaMallocHost((void**)&h_A1, xyz_bytes)); checkCuda(cudaMallocHost((void**)&h_B1, xyz_bytes)); // grid data iniatialization // randomly generaed test data srand(time(NULL)); int i = 0; for(; i < xyz; i++) { h_A[i] = 1 + (double)rand() / (double)RAND_MAX; h_A1[i] = h_B1[i] = h_B[i] = h_A[i]; } // A simple comparison of the result int testIndex = 3 + 3*nx+ 3*nx*ny; printf("Iniatialized data[%d]=%f\n", testIndex , h_A[testIndex]); printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]); printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]); const double fac = 6.0/(h_A[0] * h_A[0]); double *tmp; dim3 grid((nx+tx-1)/tx, (ny+ty-1)/ty); dim3 block(tx, ty); printf("grid:(%d, %d)\n", grid.x, grid.y); printf("block:(%d, %d)\n", tx, ty); float ms, ms1; // elapsed time in milliseconds printf("Start computing...\n"); /* set the ratio of cache/shared memory cudaFuncCachePreferNone: Default function cache configuration, no preference cudaFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache cudaFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory checkCuda(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));*/ //const int sharedMemSize = (block.x + 2) * (block.y + 2) * sizeof(double); // create events and streams cudaEvent_t startEvent, stopEvent, startEvent1, stopEvent1; checkCuda( cudaEventCreate(&startEvent) ); checkCuda( cudaEventCreate(&stopEvent) ); checkCuda( cudaEventCreate(&startEvent1)); checkCuda( cudaEventCreate(&stopEvent1)); // timing start include data transfer and memory allocation checkCuda( cudaEventRecord(startEvent,0) ); // Allocate device buffers checkCuda(cudaMalloc((void**)&d_A, xyz_bytes)); // device checkCuda(cudaMalloc((void**)&d_B, xyz_bytes)); double* input = d_A; double* output = d_B; // copy data to device checkCuda( cudaMemcpy(d_A, h_A, xyz_bytes, cudaMemcpyHostToDevice)); checkCuda( cudaMemcpy(d_B, d_A, xyz_bytes, cudaMemcpyDeviceToDevice)); // timing start pure gpu computing checkCuda( cudaEventRecord(startEvent1, 0)); // Run the GPU kernel for(int t = 0; t < timesteps; t += 1) { jacobi3d_7p_shmem_adam_reg<<<grid, block>>>(input, output, nx, ny, nz, fac); // swap input and output tmp = input; input = output; output = tmp; } // timing end pure gpu computing checkCuda( cudaEventRecord(stopEvent1, 0)); checkCuda( cudaEventSynchronize(stopEvent1)); checkCuda( cudaEventElapsedTime(&ms1, startEvent1, stopEvent1)); printf("Time of register version (pure GPU) (ms): %f\n", ms1); double gflop = (xyz * 1e-9) * 7.0 * timesteps; double gflop_per_sec = gflop * 1e3 / ms1; printf("(GPU) %lf GFlop/s\n", gflop_per_sec); double mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms1; printf("(GPU) %lf M updates/s\n", mupdate_per_sec); if(timesteps%2==0) checkCuda( cudaMemcpy(h_A, output, xyz_bytes, cudaMemcpyDeviceToHost)); else checkCuda( cudaMemcpy(h_A, input, xyz_bytes, cudaMemcpyDeviceToHost)); checkCuda( cudaEventRecord(stopEvent, 0)); checkCuda( cudaEventSynchronize(stopEvent)); checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent)); double *gpuResult = h_A; printf("Time of shared memory version (ms): %f\n", ms); printf("(including data transfer and memory allocation in GPU.)\n"); gflop = (xyz * 1e-9) * 7.0 * timesteps; gflop_per_sec = gflop * 1e3 / ms; printf("(GPU) %lf GFlop/s\n", gflop_per_sec); mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / ms; printf("(GPU) %lf M updates/s\n", mupdate_per_sec); // Run the CPU version //double startTime = rtclock(); double *tmp1; for(int t = 0; t < timesteps; t += 1) { jacobi7(nx, ny, nz, h_A1, h_B1, fac); tmp1 = h_A1; h_A1 = h_B1; h_B1 = tmp1; } double *cpuResult; if ((timesteps%2) == 0) cpuResult = h_B1; else cpuResult = h_A1; /*double endTime = rtclock(); double elapsedTimeC = endTime - startTime; printf("Elapsed Time:%lf\n", elapsedTimeC); flops = xyz * 7.0 * timesteps; gflops = flops / elapsedTimeC / 1e9; printf("(CPU) %lf GFlop/s\n", gflops); */ // compare the results btw CPU and GPU version double errorNorm, refNorm, diff; errorNorm = 0.0; refNorm = 0.0; i = 0; for (; i < xyz; ++i){ diff = cpuResult[i] - gpuResult[i]; errorNorm += diff * diff; refNorm += cpuResult[i] * cpuResult[i]; } errorNorm = sqrt(errorNorm); refNorm = sqrt(refNorm); printf("Error Norm:%lf\n", errorNorm); printf("Ref Norm:%lf\n", refNorm); if(abs(refNorm) < 1e-7) { printf("Correctness, FAILED\n"); } else if((errorNorm / refNorm) > 1e-2) { printf("Correctness, FAILED\n"); } else { printf("Correctness, PASSED\n"); } printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]); printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]); printf("h_A[%d]=%f\n", testIndex, h_A[testIndex]); printf("h_B[%d]=%f\n", testIndex, h_B[testIndex]); printf("h_A1[%d]=%f\n", testIndex, h_A1[testIndex]); printf("h_B1[%d]=%f\n", testIndex, h_B1[testIndex]); // cleanup checkCuda( cudaEventDestroy(startEvent)); checkCuda( cudaEventDestroy(stopEvent)); cudaFreeHost(h_A); cudaFreeHost(h_B); cudaFreeHost(h_A1); cudaFreeHost(h_B1); cudaFree(d_A); cudaFree(d_B); return 0; }
8d8c988206462097f924660231beebadd5af9c61.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------------- * * CUDA function for backrpojection using FDK weigts for CBCT * * * CODE by Ander Biguri --------------------------------------------------------------------------- --------------------------------------------------------------------------- Copyright (c) 2015, University of Bath and CERN- European Organization for Nuclear Research All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------- Contact: tigre.toolbox@gmail.com Codes : https://github.com/CERN/TIGRE --------------------------------------------------------------------------- */ #define PI_2 1.57079632679489661923 #include <algorithm> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "voxel_backprojection.hpp" #include "mex.h" #include <math.h> // https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\ } \ } while (0) #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | | / /| | A Z | / / |*D | | | +--------+ | | | | | | | | | | | *O | + | *--->y | | | / | / | | |/ | V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ texture<float, hipTextureType3D , hipReadModeElementType> tex; __global__ void FDKweigths(const Geometry geo,float* image,float constant){ size_t idx = threadIdx.x + blockIdx.x * blockDim.x; for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) { image[idx]*=constant; } } __global__ void kernelPixelBackprojectionFDK(const Geometry geo, float* image, const int indAlpha, const Point3D deltaX , const Point3D deltaY, const Point3D deltaZ, const Point3D xyzOrigin, const Point3D xyzOffset, const Point3D uv0Offset){ int indY = blockIdx.y * blockDim.y + threadIdx.y; int indX = blockIdx.x * blockDim.x + threadIdx.x; int indZ = blockIdx.z * blockDim.z + threadIdx.z; //Make sure we dont go out of bounds size_t idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX; if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |indZ>=geo.nVoxelZ) return; // Geometric trasnformations: //Source, scaled XYZ coordinates Point3D S; S.x=geo.DSO; // we dont scale the x direction, because the detecros is only in YZ (and the image is rotated) S.y=-uv0Offset.x/geo.dDetecU; S.z=-uv0Offset.y/geo.dDetecV; // "XYZ" in the scaled coordinate system of the current point. The iamge is rotated with the projection angles. Point3D P; P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x); P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-geo.COR/geo.dDetecU; P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z); // This is the vector defining the line from the source to the Voxel float vectX,vectY,vectZ; vectX=(P.x -S.x); vectY=(P.y -S.y); vectZ=(P.z -S.z); // Get the coordinates in the detector UV where the mid point of the voxel is projected. float t=(geo.DSO-geo.DSD /*-DDO*/ - S.x)/vectX; float y,z; y=vectY*t+S.y; z=vectZ*t+S.z; float u,v; u=y+geo.nDetecU/2-0.5; v=z+geo.nDetecV/2-0.5; float weigth; float realx,realy; realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x; realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+geo.COR; weigth=(geo.DSO+realy*sin(geo.alpha)-realx*cos(geo.alpha))/geo.DSO; weigth=1/(weigth*weigth); // Get Value in the computed (U,V) and multiply by the corresponding weigth. image[idx]+=tex3D(tex, u +0.5 , v +0.5 , indAlpha+0.5) *weigth; } int voxel_backprojection(float const * const projections, Geometry geo, float* result,float const * const alphas,int nalpha){ /* * Allocate texture memory on the device */ // copy data to CUDA memory hipArray *d_projectiondata = 0; const hipExtent extent = make_hipExtent(geo.nDetecU,geo.nDetecV,nalpha); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipMalloc3DArray(&d_projectiondata, &channelDesc, extent); cudaCheckErrors("hipMalloc3D error 3D tex"); hipMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_hipPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_projectiondata; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); cudaCheckErrors("hipMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = hipFilterModeLinear; tex.addressMode[0] = hipAddressModeBorder; tex.addressMode[1] = hipAddressModeBorder; tex.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex, d_projectiondata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); // Allocate result image memory size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float); float* dimage; hipMalloc((void**)&dimage, num_bytes); hipMemset(dimage,0,num_bytes); cudaCheckErrors("hipMalloc fail"); // If we are going to time bool timekernel=false; hipEvent_t start, stop; float elapsedTime; if (timekernel){ hipEventCreate(&start); hipEventRecord(start,0); } int divx,divy,divz; //enpirical divx=32; divy=32; divz=1; dim3 grid((geo.nVoxelX+divx-1)/divx, (geo.nVoxelY+divy-1)/divy, (geo.nVoxelZ+divz-1)/divz); dim3 block(divx,divy,divz); Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec; for (int i=0;i<nalpha;i++){ geo.alpha=-alphas[i]; computeDeltasCube(geo,geo.alpha,i,&xyzOrigin,&deltaX,&deltaY,&deltaZ); offOrig.x=geo.offOrigX[i]; offOrig.y=geo.offOrigY[i]; offDetec.x=geo.offDetecU[i]; offDetec.y=geo.offDetecV[i]; hipLaunchKernelGGL(( kernelPixelBackprojectionFDK), dim3(grid),dim3(block), 0, 0, geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec); cudaCheckErrors("Kernel fail"); } if (timekernel){ hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); mexPrintf("%f\n" ,elapsedTime); cudaCheckErrors("cuda Timing fail"); } hipMemcpy(result, dimage, num_bytes, hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy result fail"); hipUnbindTexture(tex); cudaCheckErrors("Unbind fail"); hipFree(dimage); hipFreeArray(d_projectiondata); cudaCheckErrors("hipFree d_imagedata fail"); //hipDeviceReset(); return 0; } void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ){ Point3D P0, Px0,Py0,Pz0; // Get coords of Img(0,0,0) P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i]; P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i]; P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i]; // Get coors from next voxel in each direction Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x; Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y; Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ; // Rotate image (this is equivalent of rotating the source and detector) Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values! P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z; Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z; Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z; Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z; // Scale coords so detector pixels are 1x1 P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU; Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU; Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU; Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU; deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z; deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z; deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z; P.z =P.z-geo.offDetecV[i]/geo.dDetecV; P.y =P.y-geo.offDetecU[i]/geo.dDetecU; *xyzorigin=P; }
8d8c988206462097f924660231beebadd5af9c61.cu
/*------------------------------------------------------------------------- * * CUDA function for backrpojection using FDK weigts for CBCT * * * CODE by Ander Biguri --------------------------------------------------------------------------- --------------------------------------------------------------------------- Copyright (c) 2015, University of Bath and CERN- European Organization for Nuclear Research All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------- Contact: tigre.toolbox@gmail.com Codes : https://github.com/CERN/TIGRE --------------------------------------------------------------------------- */ #define PI_2 1.57079632679489661923 #include <algorithm> #include <cuda_runtime_api.h> #include <cuda.h> #include "voxel_backprojection.hpp" #include "mex.h" #include <math.h> // https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\ } \ } while (0) #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | | / /| | A Z | / / |*D | | | +--------+ | | | | | | | | | | | *O | + | *--->y | | | / | / | | |/ | V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ texture<float, cudaTextureType3D , cudaReadModeElementType> tex; __global__ void FDKweigths(const Geometry geo,float* image,float constant){ size_t idx = threadIdx.x + blockIdx.x * blockDim.x; for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) { image[idx]*=constant; } } __global__ void kernelPixelBackprojectionFDK(const Geometry geo, float* image, const int indAlpha, const Point3D deltaX , const Point3D deltaY, const Point3D deltaZ, const Point3D xyzOrigin, const Point3D xyzOffset, const Point3D uv0Offset){ int indY = blockIdx.y * blockDim.y + threadIdx.y; int indX = blockIdx.x * blockDim.x + threadIdx.x; int indZ = blockIdx.z * blockDim.z + threadIdx.z; //Make sure we dont go out of bounds size_t idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX; if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |indZ>=geo.nVoxelZ) return; // Geometric trasnformations: //Source, scaled XYZ coordinates Point3D S; S.x=geo.DSO; // we dont scale the x direction, because the detecros is only in YZ (and the image is rotated) S.y=-uv0Offset.x/geo.dDetecU; S.z=-uv0Offset.y/geo.dDetecV; // "XYZ" in the scaled coordinate system of the current point. The iamge is rotated with the projection angles. Point3D P; P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x); P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-geo.COR/geo.dDetecU; P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z); // This is the vector defining the line from the source to the Voxel float vectX,vectY,vectZ; vectX=(P.x -S.x); vectY=(P.y -S.y); vectZ=(P.z -S.z); // Get the coordinates in the detector UV where the mid point of the voxel is projected. float t=(geo.DSO-geo.DSD /*-DDO*/ - S.x)/vectX; float y,z; y=vectY*t+S.y; z=vectZ*t+S.z; float u,v; u=y+geo.nDetecU/2-0.5; v=z+geo.nDetecV/2-0.5; float weigth; float realx,realy; realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x; realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+geo.COR; weigth=(geo.DSO+realy*sin(geo.alpha)-realx*cos(geo.alpha))/geo.DSO; weigth=1/(weigth*weigth); // Get Value in the computed (U,V) and multiply by the corresponding weigth. image[idx]+=tex3D(tex, u +0.5 , v +0.5 , indAlpha+0.5) *weigth; } int voxel_backprojection(float const * const projections, Geometry geo, float* result,float const * const alphas,int nalpha){ /* * Allocate texture memory on the device */ // copy data to CUDA memory cudaArray *d_projectiondata = 0; const cudaExtent extent = make_cudaExtent(geo.nDetecU,geo.nDetecV,nalpha); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaMalloc3DArray(&d_projectiondata, &channelDesc, extent); cudaCheckErrors("cudaMalloc3D error 3D tex"); cudaMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_cudaPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_projectiondata; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); cudaCheckErrors("cudaMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = cudaFilterModeLinear; tex.addressMode[0] = cudaAddressModeBorder; tex.addressMode[1] = cudaAddressModeBorder; tex.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex, d_projectiondata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); // Allocate result image memory size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float); float* dimage; cudaMalloc((void**)&dimage, num_bytes); cudaMemset(dimage,0,num_bytes); cudaCheckErrors("cudaMalloc fail"); // If we are going to time bool timekernel=false; cudaEvent_t start, stop; float elapsedTime; if (timekernel){ cudaEventCreate(&start); cudaEventRecord(start,0); } int divx,divy,divz; //enpirical divx=32; divy=32; divz=1; dim3 grid((geo.nVoxelX+divx-1)/divx, (geo.nVoxelY+divy-1)/divy, (geo.nVoxelZ+divz-1)/divz); dim3 block(divx,divy,divz); Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec; for (int i=0;i<nalpha;i++){ geo.alpha=-alphas[i]; computeDeltasCube(geo,geo.alpha,i,&xyzOrigin,&deltaX,&deltaY,&deltaZ); offOrig.x=geo.offOrigX[i]; offOrig.y=geo.offOrigY[i]; offDetec.x=geo.offDetecU[i]; offDetec.y=geo.offDetecV[i]; kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec); cudaCheckErrors("Kernel fail"); } if (timekernel){ cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); mexPrintf("%f\n" ,elapsedTime); cudaCheckErrors("cuda Timing fail"); } cudaMemcpy(result, dimage, num_bytes, cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy result fail"); cudaUnbindTexture(tex); cudaCheckErrors("Unbind fail"); cudaFree(dimage); cudaFreeArray(d_projectiondata); cudaCheckErrors("cudaFree d_imagedata fail"); //cudaDeviceReset(); return 0; } void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ){ Point3D P0, Px0,Py0,Pz0; // Get coords of Img(0,0,0) P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i]; P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i]; P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i]; // Get coors from next voxel in each direction Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x; Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y; Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ; // Rotate image (this is equivalent of rotating the source and detector) Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values! P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z; Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z; Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z; Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z; // Scale coords so detector pixels are 1x1 P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU; Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU; Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU; Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU; deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z; deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z; deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z; P.z =P.z-geo.offDetecV[i]/geo.dDetecV; P.y =P.y-geo.offDetecU[i]/geo.dDetecU; *xyzorigin=P; }
c054cb263b9ab272449b9a534a94d749e70b0c3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> extern"C"{ #include <ppm.h> } #include <sys/time.h> // Super macro de mesure du temps // A NE PAS MODIFIER #define TIME(fun) \ do { struct timeval t1, t2; \ gettimeofday(&t1, 0); \ fun; \ gettimeofday(&t2, 0); \ double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000000.0; \ printf("%s, elapsed time : \033[31;01m%g\033[00m\n", #fun , time); \ } while (0) // A NE PAS MODIFIER pixel * readppm (const char* fname, int* cols, int* rows, pixval* maxval, int* format){ FILE* img_in; img_in = fopen(fname, "r"); ppm_readppminit(img_in, cols, rows, maxval, format); pixel* out = (pixel*)malloc ((*cols)*(*rows)*sizeof(pixel)); int i; for (i =0; i < *rows; i++){ ppm_readppmrow(img_in, out+(i*(*cols)), *cols, *maxval, *format); } return out; } // A NE PAS MODIFIER void writeppm(const char* fname, pixel* out, int cols, int rows, pixval maxval, int format){ FILE* img_out; img_out = fopen(fname, "w+"); int i; ppm_writeppminit(img_out, cols, rows, maxval, format); for (i =0; i < rows; i++){ ppm_writeppmrow(img_out, out+(i*(cols)), cols, maxval, 1); } } /********** A partir d'ici c'est vous de jouer **********/ /* On commence doucement, sans textures! Vous pouvez vous appuyer sur l'exercice du TD prcdent (correction sur Celene) */ // Noyau qui floute une image en utilisant un masque de "rayon" paramtrable __global__ void blur(pixel* img_in, pixel* img_out, int cols, int rows, int ray){ // rcupration des indices globaux dans la grille 2D pour les // dimensions X et Y int y = blockIdx.y*blockDim.y + threadIdx.y; int x = blockIdx.x*blockDim.x + threadIdx.x; // on s'assure de ne pas sortir des limites de l'image if (x < cols && y < rows){ int idx, sommeR, sommeG, sommeB; int cpt = 0; sommeR=0; sommeG=0; sommeB=0; pixel pix; for(int xInd = max(0,x-ray); xInd < min(rows, x+ray); xInd++){ for(int yInd = max(0, y-ray); yInd < min(cols, y + ray); yInd++){ cpt++; idx = xInd*rows+yInd; pix = img_in[idx]; sommeR += pix.r; sommeG += pix.g; sommeB += pix.b; } } idx = x*rows+y; img_out[idx].r = sommeR/cpt; img_out[idx].g = sommeG/cpt; img_out[idx].b = sommeB/cpt; // En tant normal, lorsque on s'appuie sur une bibliothques avec // des types abstraits (ici le type pixel), on utilise les // fonctions ou macros de la bibliothques pour manipuler ce type, // par exemple PPM_ASSIGN(img_out[y*cols+x], gray, gray, gray); } } // Fonction CPU qui utilise le noyau blur void blur (pixel* ppm_in, pixel* ppm_out, size_t size, int cols, int rows, pixval maxval, int ray) { pixel *d_ppm_in, *d_ppm_out = NULL; dim3 DimBlock(16, 16,1); dim3 DimGrid((rows + DimBlock.x -1)/DimBlock.x, (cols + DimBlock.y -1)/DimBlock.y, 1); hipMalloc(&d_ppm_in, size); hipMalloc(&d_ppm_out, size); hipMemcpy(d_ppm_in, ppm_in, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( blur), dim3(DimGrid), dim3(DimBlock), 0, 0, d_ppm_in, d_ppm_out, cols, rows, ray); hipMemcpy(ppm_out, d_ppm_out, size, hipMemcpyDeviceToHost); hipFree(d_ppm_in); hipFree(d_ppm_out); } /* Quand la version "classique" fonctionne, vous pouvez passer l'utilisation de textures */ // Texture globale (pas besoin de la passer en paramre d'un kernel) // 2D contenant des unsigned int // A NE PAS MODIFIER texture<unsigned int, 2, hipReadModeElementType> tex; // Mme chose qu'avant mais avec une texture. // // Vous utiliserez tex2D(texture, x, y) // pour lire une valeur dans la texture __global__ void blur_tex(pixel* img_out, int cols, int rows, int ray){ int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x < cols && y < rows){ int i=0, j=0; int r=0, g=0, b=0; int pixelsATraiter = (2*ray+1)*(2*ray+1); for(i=-ray; i<=ray; i++){ for(j=-ray*3; j<=ray*3; j+=3){ r+= tex2D(tex, (3*x+j), (y+i)); g+= tex2D(tex, (3*x+j+1), (y+i)); b+= tex2D(tex, (3*x+j+2), (y+i)); } } r/=pixelsATraiter;g/=pixelsATraiter;b/=pixelsATraiter; PPM_ASSIGN(img_out[y*cols+x ], r, g, b); } } /* Fonction qui lance le noyau blur_tex Pour rappel, pour utiliser une texture, il faut : + dcrire un cudaChennelFormatDesc (ici, on associe notre texture de simples unsigned int, on pourra donc utiliser le code suivant : hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindUnsigned); + il faut spcifier les proprits de notre texture, en particulier: ++ son mode de gestion des frontires : tex.adressMode[0] pour la dimension 1 tex.adressMode[1] pour la dimension 2 ici, on utilisera le mode Wrap (voir API CUDA) ++ son mode de filtrage, filterMode, ici on restera sur un mode "non" filtr, en utilisant hipFilterModePoint ++ si la texture est normalise ou non (ici, non) + on doit associer la texture un hipArray, pour cela il faut : ++ allouer un hipArray (voir API CUDA) ++ copier les donnes dans le hipArray sur le GPU (voir API CUDA) ++ lier la texture au hipArray avec hipBindTextureToArray (voir API CUDA) N'oubliez pas de lancer le kernel, allouer la mmoire pour le rsultat etc */ void blur_tex (pixel* ppm_in, pixel* ppm_out, size_t size, int cols, int rows, pixval maxval, int ray) { pixel* d_ppm_out; hipMalloc(&d_ppm_out, size); hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32,0,0,0, hipChannelFormatKindUnsigned); hipArray* cuArray; hipMallocArray(&cuArray, &channelDesc, cols*3, rows); hipMemcpyToArray(cuArray, 0, 0, ppm_in, sizeof(unsigned int)*3*cols*rows, hipMemcpyHostToDevice); tex.addressMode[0] = hipAddressModeWrap; tex.addressMode[1] = hipAddressModeWrap; tex.filterMode = hipFilterModePoint; tex.normalized = false; hipBindTextureToArray(tex, cuArray, channelDesc); dim3 DimBlock(16, 16, 1); dim3 DimGrid((rows + DimBlock.x-1)/DimBlock.x, (cols + DimBlock.y -1)/DimBlock.y, 1); hipLaunchKernelGGL(( blur_tex), dim3(DimGrid), dim3(DimBlock), 0, 0, d_ppm_out, cols, rows, ray); hipMemcpy(ppm_out, d_ppm_out, size, hipMemcpyDeviceToHost); hipFree(d_ppm_out); hipUnbindTexture(tex); hipFree(cuArray); } /*********** FIN DU TRAVAIL POUR VOUS **********/ // A NE PAS MODIFIER // le programme utilisera lena.ppm par dfaut mais // vous pouvez lui passer une image ppm (sans l'extension) en // paramtre pour l'essayer sur d'autres images. // par exemple : // ./blur mandril int main(int argc, char* argv[]){ char* name = (argc <= 1)?((char*)"lena"):argv[1]; char *in = (char*)malloc(sizeof(char)); in = strcat(strcat(in,name), ".ppm"); char *out = (char*)malloc(sizeof(char)); out = strcat(strcat(out,name), "_blur.ppm"); char *out_tex = (char*)malloc(sizeof(char)); out_tex = strcat(strcat(out_tex,name), "_blur_tex.ppm"); printf("Will work on %s and generate %s and %s\n", in, out, out_tex); pixel *ppm_in, *ppm_out = NULL; int cols, rows; pixval maxval; int format; ppm_in = readppm(in, &cols, &rows, &maxval, &format); long size = cols*rows*sizeof(pixel); ppm_out = (pixel*)malloc(size); int ray = 10; TIME(blur(ppm_in, ppm_out, size, cols, rows, maxval, ray)); writeppm(out, ppm_out, cols, rows, maxval, 1); TIME(blur_tex(ppm_in, ppm_out, size, cols, rows, maxval, ray)); writeppm(out_tex, ppm_out, cols, rows, maxval, 1); free(ppm_in); free(ppm_out); return 0; }
c054cb263b9ab272449b9a534a94d749e70b0c3c.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> extern"C"{ #include <ppm.h> } #include <sys/time.h> // Super macro de mesure du temps // A NE PAS MODIFIER #define TIME(fun) \ do { struct timeval t1, t2; \ gettimeofday(&t1, 0); \ fun; \ gettimeofday(&t2, 0); \ double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000000.0; \ printf("%s, elapsed time : \033[31;01m%g\033[00m\n", #fun , time); \ } while (0) // A NE PAS MODIFIER pixel * readppm (const char* fname, int* cols, int* rows, pixval* maxval, int* format){ FILE* img_in; img_in = fopen(fname, "r"); ppm_readppminit(img_in, cols, rows, maxval, format); pixel* out = (pixel*)malloc ((*cols)*(*rows)*sizeof(pixel)); int i; for (i =0; i < *rows; i++){ ppm_readppmrow(img_in, out+(i*(*cols)), *cols, *maxval, *format); } return out; } // A NE PAS MODIFIER void writeppm(const char* fname, pixel* out, int cols, int rows, pixval maxval, int format){ FILE* img_out; img_out = fopen(fname, "w+"); int i; ppm_writeppminit(img_out, cols, rows, maxval, format); for (i =0; i < rows; i++){ ppm_writeppmrow(img_out, out+(i*(cols)), cols, maxval, 1); } } /********** A partir d'ici c'est à vous de jouer **********/ /* On commence doucement, sans textures! Vous pouvez vous appuyer sur l'exercice du TD précédent (correction sur Celene) */ // Noyau qui floute une image en utilisant un masque de "rayon" paramétrable __global__ void blur(pixel* img_in, pixel* img_out, int cols, int rows, int ray){ // récupération des indices globaux dans la grille 2D pour les // dimensions X et Y int y = blockIdx.y*blockDim.y + threadIdx.y; int x = blockIdx.x*blockDim.x + threadIdx.x; // on s'assure de ne pas sortir des limites de l'image if (x < cols && y < rows){ int idx, sommeR, sommeG, sommeB; int cpt = 0; sommeR=0; sommeG=0; sommeB=0; pixel pix; for(int xInd = max(0,x-ray); xInd < min(rows, x+ray); xInd++){ for(int yInd = max(0, y-ray); yInd < min(cols, y + ray); yInd++){ cpt++; idx = xInd*rows+yInd; pix = img_in[idx]; sommeR += pix.r; sommeG += pix.g; sommeB += pix.b; } } idx = x*rows+y; img_out[idx].r = sommeR/cpt; img_out[idx].g = sommeG/cpt; img_out[idx].b = sommeB/cpt; // En tant normal, lorsque on s'appuie sur une bibliothèques avec // des types abstraits (ici le type pixel), on utilise les // fonctions ou macros de la bibliothèques pour manipuler ce type, // par exemple PPM_ASSIGN(img_out[y*cols+x], gray, gray, gray); } } // Fonction CPU qui utilise le noyau blur void blur (pixel* ppm_in, pixel* ppm_out, size_t size, int cols, int rows, pixval maxval, int ray) { pixel *d_ppm_in, *d_ppm_out = NULL; dim3 DimBlock(16, 16,1); dim3 DimGrid((rows + DimBlock.x -1)/DimBlock.x, (cols + DimBlock.y -1)/DimBlock.y, 1); cudaMalloc(&d_ppm_in, size); cudaMalloc(&d_ppm_out, size); cudaMemcpy(d_ppm_in, ppm_in, size, cudaMemcpyHostToDevice); blur<<<DimGrid, DimBlock>>>(d_ppm_in, d_ppm_out, cols, rows, ray); cudaMemcpy(ppm_out, d_ppm_out, size, cudaMemcpyDeviceToHost); cudaFree(d_ppm_in); cudaFree(d_ppm_out); } /* Quand la version "classique" fonctionne, vous pouvez passer à l'utilisation de textures */ // Texture globale (pas besoin de la passer en paramère d'un kernel) // 2D contenant des unsigned int // A NE PAS MODIFIER texture<unsigned int, 2, cudaReadModeElementType> tex; // Même chose qu'avant mais avec une texture. // // Vous utiliserez tex2D(texture, x, y) // pour lire une valeur dans la texture __global__ void blur_tex(pixel* img_out, int cols, int rows, int ray){ int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x < cols && y < rows){ int i=0, j=0; int r=0, g=0, b=0; int pixelsATraiter = (2*ray+1)*(2*ray+1); for(i=-ray; i<=ray; i++){ for(j=-ray*3; j<=ray*3; j+=3){ r+= tex2D(tex, (3*x+j), (y+i)); g+= tex2D(tex, (3*x+j+1), (y+i)); b+= tex2D(tex, (3*x+j+2), (y+i)); } } r/=pixelsATraiter;g/=pixelsATraiter;b/=pixelsATraiter; PPM_ASSIGN(img_out[y*cols+x ], r, g, b); } } /* Fonction qui lance le noyau blur_tex Pour rappel, pour utiliser une texture, il faut : + décrire un cudaChennelFormatDesc (ici, on associe notre texture à de simples unsigned int, on pourra donc utiliser le code suivant : cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindUnsigned); + il faut spécifier les propriétés de notre texture, en particulier: ++ son mode de gestion des frontières : tex.adressMode[0] pour la dimension 1 tex.adressMode[1] pour la dimension 2 ici, on utilisera le mode Wrap (voir API CUDA) ++ son mode de filtrage, filterMode, ici on restera sur un mode "non" filtré, en utilisant cudaFilterModePoint ++ si la texture est normalisée ou non (ici, non) + on doit associer la texture à un cudaArray, pour cela il faut : ++ allouer un cudaArray (voir API CUDA) ++ copier les données dans le cudaArray sur le GPU (voir API CUDA) ++ lier la texture au cudaArray avec cudaBindTextureToArray (voir API CUDA) N'oubliez pas de lancer le kernel, allouer la mémoire pour le résultat etc */ void blur_tex (pixel* ppm_in, pixel* ppm_out, size_t size, int cols, int rows, pixval maxval, int ray) { pixel* d_ppm_out; cudaMalloc(&d_ppm_out, size); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32,0,0,0, cudaChannelFormatKindUnsigned); cudaArray* cuArray; cudaMallocArray(&cuArray, &channelDesc, cols*3, rows); cudaMemcpyToArray(cuArray, 0, 0, ppm_in, sizeof(unsigned int)*3*cols*rows, cudaMemcpyHostToDevice); tex.addressMode[0] = cudaAddressModeWrap; tex.addressMode[1] = cudaAddressModeWrap; tex.filterMode = cudaFilterModePoint; tex.normalized = false; cudaBindTextureToArray(tex, cuArray, channelDesc); dim3 DimBlock(16, 16, 1); dim3 DimGrid((rows + DimBlock.x-1)/DimBlock.x, (cols + DimBlock.y -1)/DimBlock.y, 1); blur_tex<<<DimGrid, DimBlock>>>(d_ppm_out, cols, rows, ray); cudaMemcpy(ppm_out, d_ppm_out, size, cudaMemcpyDeviceToHost); cudaFree(d_ppm_out); cudaUnbindTexture(tex); cudaFree(cuArray); } /*********** FIN DU TRAVAIL POUR VOUS **********/ // A NE PAS MODIFIER // le programme utilisera lena.ppm par défaut mais // vous pouvez lui passer une image ppm (sans l'extension) en // paramètre pour l'essayer sur d'autres images. // par exemple : // ./blur mandril int main(int argc, char* argv[]){ char* name = (argc <= 1)?((char*)"lena"):argv[1]; char *in = (char*)malloc(sizeof(char)); in = strcat(strcat(in,name), ".ppm"); char *out = (char*)malloc(sizeof(char)); out = strcat(strcat(out,name), "_blur.ppm"); char *out_tex = (char*)malloc(sizeof(char)); out_tex = strcat(strcat(out_tex,name), "_blur_tex.ppm"); printf("Will work on %s and generate %s and %s\n", in, out, out_tex); pixel *ppm_in, *ppm_out = NULL; int cols, rows; pixval maxval; int format; ppm_in = readppm(in, &cols, &rows, &maxval, &format); long size = cols*rows*sizeof(pixel); ppm_out = (pixel*)malloc(size); int ray = 10; TIME(blur(ppm_in, ppm_out, size, cols, rows, maxval, ray)); writeppm(out, ppm_out, cols, rows, maxval, 1); TIME(blur_tex(ppm_in, ppm_out, size, cols, rows, maxval, ray)); writeppm(out_tex, ppm_out, cols, rows, maxval, 1); free(ppm_in); free(ppm_out); return 0; }
5a510225fe2259d2e7259ff789169020ae849d36.hip
// !!! This is a file automatically generated by hipify!!! //ye nhi chla sort #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "Lock.h" __global__ void addKernel(int *c) { Lock ml; ml.lock(); int i = threadIdx.x; int j = threadIdx.y; if ((i == j+1 && c[i] < c[j])|| (j == i+1 && c[i] > c[j])) { //exchange c[i] and c[j] //printf("c[i] : %d -- c[j] : %d\n", c[i], c[j]); //#if __CUDA_ARCH__ >= 200 int tempi = c[i]; int tempj = c[j]; c[i] = tempj; c[j] = tempi; //printf("i %d : j %d\n", i, j); //int z1 = atomicExch(&c[i], tempj); //int z2 = atomicExch(&c[j], tempi); //for (int i = 0; i < 5; i++) //{ // printf("%d ", c[i]); //} //printf("\n"); //#endif } ml.unlock(); } hipError_t addWithCuda(int *a, unsigned int size) { int *dev_a = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); dim3 threadsPerBlock(size, size); addKernel << <1, threadsPerBlock >> >(dev_a); cudaStatus = hipGetLastError(); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(a, dev_a, size * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_a); return cudaStatus; } int main() { const int arraySize = 5; int a[arraySize] = {5,4,3,2,1}; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(a,arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("this is the sorted array = {%d,%d,%d,%d,%d}\n", a[0], a[1], a[2], a[3], a[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel.
5a510225fe2259d2e7259ff789169020ae849d36.cu
//ye nhi chla sort #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "Lock.h" __global__ void addKernel(int *c) { Lock ml; ml.lock(); int i = threadIdx.x; int j = threadIdx.y; if ((i == j+1 && c[i] < c[j])|| (j == i+1 && c[i] > c[j])) { //exchange c[i] and c[j] //printf("c[i] : %d -- c[j] : %d\n", c[i], c[j]); //#if __CUDA_ARCH__ >= 200 int tempi = c[i]; int tempj = c[j]; c[i] = tempj; c[j] = tempi; //printf("i %d : j %d\n", i, j); //int z1 = atomicExch(&c[i], tempj); //int z2 = atomicExch(&c[j], tempi); //for (int i = 0; i < 5; i++) //{ // printf("%d ", c[i]); //} //printf("\n"); //#endif } ml.unlock(); } cudaError_t addWithCuda(int *a, unsigned int size) { int *dev_a = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); dim3 threadsPerBlock(size, size); addKernel << <1, threadsPerBlock >> >(dev_a); cudaStatus = cudaGetLastError(); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(a, dev_a, size * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_a); return cudaStatus; } int main() { const int arraySize = 5; int a[arraySize] = {5,4,3,2,1}; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(a,arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("this is the sorted array = {%d,%d,%d,%d,%d}\n", a[0], a[1], a[2], a[3], a[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel.
1c8d2319ce5100419d5d057e4e69a731d5f8a4fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<string.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image/stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image/stb_image_write.h" #include"error_check.h" #include"time_helper.h" #include<iostream> using namespace std; void rgb_to_blur_cpu(unsigned char *input_image, unsigned char *output_image, int width, int height, int channels, int BLUR_SIZE){ for(int row=0; row<height; row++){ for(int col=0; col<width; col++){ int pixVal1 = 0; int pixVal2 = 0; int pixVal3 = 0; int pixels = 0; int offset = (row * width + col)*channels; for(int blurrow = -BLUR_SIZE; blurrow <= BLUR_SIZE; ++blurrow){ for(int blurcol = -BLUR_SIZE; blurcol <= BLUR_SIZE; ++blurcol){ int currow = row + blurrow; int curcol = col + blurcol; if(currow > -1 && currow < height && curcol > -1 && curcol < width){ pixVal1 += input_image[(currow * width + curcol) * channels]; pixVal2 += input_image[(currow * width + curcol) * channels + 1]; pixVal3 += input_image[(currow * width + curcol) * channels + 2]; pixels++; } } } *(output_image + offset) = (unsigned char)(pixVal1 / pixels); *(output_image + offset + 1) = (unsigned char)(pixVal2 / pixels); *(output_image + offset + 2) = (unsigned char)(pixVal3 / pixels); if(channels==4) { *(output_image + offset + 3) = input_image[offset + 3]; } } } } __global__ void blur_gpu(unsigned char *input_image, unsigned char *output_image, int width, int height, int channels, int blur_size) { int Col = blockIdx.x * blockDim.x + threadIdx.x; int Row = blockIdx.y * blockDim.y + threadIdx.y; if(Col < width && Row < height){ int pixValr = 0, pixValg = 0, pixValb = 0, pixels = 0; for(int i = -blur_size; i <= blur_size; i++){ for(int j = -blur_size; j <= blur_size; j++){ int curRow = Row + i; int curCol = Col + j; int offset = (curRow*width+curCol)*channels; if(curRow >= 0 && curRow < height && curCol >=0 && curCol < width){ pixValr += input_image[offset]; pixValg += input_image[offset+1]; pixValb += input_image[offset+2]; pixels++; } } } output_image[(Row * width + Col)*channels] = (unsigned char)(pixValr/pixels); output_image[(Row * width + Col)*channels+1] = (unsigned char)(pixValg/pixels); output_image[(Row * width + Col)*channels+2] = (unsigned char)(pixValb/pixels); } } int main(int argc, char *argv[]) { if(argc<6) { printf("Usage: command input-image-name output-image-name cpu/gpu? channels blursize"); return -1; } char *input_image_name = argv[1]; char *output_image_name = argv[2]; char *option = argv[3]; char *channel = argv[4]; char *bsize = argv[5]; int blur_size = 0; for(int i = 0; i < strlen(bsize); i++){ blur_size *= 10; blur_size += *(bsize+i)-'0'; } int desired_no_channels = *channel-'0'; int width, height, original_no_channels; unsigned char *input_img = stbi_load(input_image_name, &width, &height, &original_no_channels, desired_no_channels); if(input_img==NULL){ printf("Error in loading the image.\n"); exit(1);} printf("Loaded image with a width of %dpx, a height of %dpx. The original image had %d channels, the loaded image has %d channels.\n", width, height, original_no_channels, desired_no_channels); int channels = original_no_channels; int img_mem_size = width * height * channels * sizeof(char); double begin; if(strcmp(option, "cpu")==0) { printf("Processing with CPU!\n"); unsigned char *sepia_img = (unsigned char *)malloc(img_mem_size); if(sepia_img==NULL){ printf("Unable to allocate memory for the sepia image. \n"); exit(1); } begin = cpuSecond(); rgb_to_blur_cpu(input_img, sepia_img, width, height, channels, blur_size); printf("Time cost [CPU]:%f s\n", cpuSecond()-begin); stbi_write_jpg(output_image_name, width, height, channels, sepia_img, 100); free(sepia_img); } else if(strcmp(option, "gpu")==0) { printf("Processing with GPU!\n"); unsigned char *d_input_img, *d_output_img; hipMalloc((void**)&d_input_img, img_mem_size); hipMalloc((void**)&d_output_img, img_mem_size); unsigned char *output_img = (unsigned char *)malloc(img_mem_size); hipMemcpy(d_input_img, input_img, img_mem_size, hipMemcpyHostToDevice); dim3 block(64, 64, 1); dim3 grid((width-1)/block.x+1, (height-1)/block.y+1, 1); begin = cpuSecond(); hipLaunchKernelGGL(( blur_gpu), dim3(block), dim3(grid), 0, 0, d_input_img, d_output_img, width, height, channels, blur_size); CHECK(hipGetLastError()); CHECK(hipDeviceSynchronize()); printf("Time cost [GPU]:%f s\n", cpuSecond()-begin); hipMemcpy(output_img, d_output_img, img_mem_size, hipMemcpyDeviceToHost); stbi_write_jpg(output_image_name, width, height, channels, output_img, 100); free(output_img); hipFree(d_input_img); hipFree(d_output_img); } stbi_image_free(input_img); return 0; }
1c8d2319ce5100419d5d057e4e69a731d5f8a4fd.cu
#include<stdio.h> #include<stdlib.h> #include<string.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image/stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image/stb_image_write.h" #include"error_check.h" #include"time_helper.h" #include<iostream> using namespace std; void rgb_to_blur_cpu(unsigned char *input_image, unsigned char *output_image, int width, int height, int channels, int BLUR_SIZE){ for(int row=0; row<height; row++){ for(int col=0; col<width; col++){ int pixVal1 = 0; int pixVal2 = 0; int pixVal3 = 0; int pixels = 0; int offset = (row * width + col)*channels; for(int blurrow = -BLUR_SIZE; blurrow <= BLUR_SIZE; ++blurrow){ for(int blurcol = -BLUR_SIZE; blurcol <= BLUR_SIZE; ++blurcol){ int currow = row + blurrow; int curcol = col + blurcol; if(currow > -1 && currow < height && curcol > -1 && curcol < width){ pixVal1 += input_image[(currow * width + curcol) * channels]; pixVal2 += input_image[(currow * width + curcol) * channels + 1]; pixVal3 += input_image[(currow * width + curcol) * channels + 2]; pixels++; } } } *(output_image + offset) = (unsigned char)(pixVal1 / pixels); *(output_image + offset + 1) = (unsigned char)(pixVal2 / pixels); *(output_image + offset + 2) = (unsigned char)(pixVal3 / pixels); if(channels==4) { *(output_image + offset + 3) = input_image[offset + 3]; } } } } __global__ void blur_gpu(unsigned char *input_image, unsigned char *output_image, int width, int height, int channels, int blur_size) { int Col = blockIdx.x * blockDim.x + threadIdx.x; int Row = blockIdx.y * blockDim.y + threadIdx.y; if(Col < width && Row < height){ int pixValr = 0, pixValg = 0, pixValb = 0, pixels = 0; for(int i = -blur_size; i <= blur_size; i++){ for(int j = -blur_size; j <= blur_size; j++){ int curRow = Row + i; int curCol = Col + j; int offset = (curRow*width+curCol)*channels; if(curRow >= 0 && curRow < height && curCol >=0 && curCol < width){ pixValr += input_image[offset]; pixValg += input_image[offset+1]; pixValb += input_image[offset+2]; pixels++; } } } output_image[(Row * width + Col)*channels] = (unsigned char)(pixValr/pixels); output_image[(Row * width + Col)*channels+1] = (unsigned char)(pixValg/pixels); output_image[(Row * width + Col)*channels+2] = (unsigned char)(pixValb/pixels); } } int main(int argc, char *argv[]) { if(argc<6) { printf("Usage: command input-image-name output-image-name cpu/gpu? channels blursize"); return -1; } char *input_image_name = argv[1]; char *output_image_name = argv[2]; char *option = argv[3]; char *channel = argv[4]; char *bsize = argv[5]; int blur_size = 0; for(int i = 0; i < strlen(bsize); i++){ blur_size *= 10; blur_size += *(bsize+i)-'0'; } int desired_no_channels = *channel-'0'; int width, height, original_no_channels; unsigned char *input_img = stbi_load(input_image_name, &width, &height, &original_no_channels, desired_no_channels); if(input_img==NULL){ printf("Error in loading the image.\n"); exit(1);} printf("Loaded image with a width of %dpx, a height of %dpx. The original image had %d channels, the loaded image has %d channels.\n", width, height, original_no_channels, desired_no_channels); int channels = original_no_channels; int img_mem_size = width * height * channels * sizeof(char); double begin; if(strcmp(option, "cpu")==0) { printf("Processing with CPU!\n"); unsigned char *sepia_img = (unsigned char *)malloc(img_mem_size); if(sepia_img==NULL){ printf("Unable to allocate memory for the sepia image. \n"); exit(1); } begin = cpuSecond(); rgb_to_blur_cpu(input_img, sepia_img, width, height, channels, blur_size); printf("Time cost [CPU]:%f s\n", cpuSecond()-begin); stbi_write_jpg(output_image_name, width, height, channels, sepia_img, 100); free(sepia_img); } else if(strcmp(option, "gpu")==0) { printf("Processing with GPU!\n"); unsigned char *d_input_img, *d_output_img; cudaMalloc((void**)&d_input_img, img_mem_size); cudaMalloc((void**)&d_output_img, img_mem_size); unsigned char *output_img = (unsigned char *)malloc(img_mem_size); cudaMemcpy(d_input_img, input_img, img_mem_size, cudaMemcpyHostToDevice); dim3 block(64, 64, 1); dim3 grid((width-1)/block.x+1, (height-1)/block.y+1, 1); begin = cpuSecond(); blur_gpu<<<block, grid>>>(d_input_img, d_output_img, width, height, channels, blur_size); CHECK(cudaGetLastError()); CHECK(cudaDeviceSynchronize()); printf("Time cost [GPU]:%f s\n", cpuSecond()-begin); cudaMemcpy(output_img, d_output_img, img_mem_size, cudaMemcpyDeviceToHost); stbi_write_jpg(output_image_name, width, height, channels, output_img, 100); free(output_img); cudaFree(d_input_img); cudaFree(d_output_img); } stbi_image_free(input_img); return 0; }
03bb01578fb41d04b2f57994041f21ea47cf84ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, const bool weight_by_label_freqs, const float* label_counts, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); if (weight_by_label_freqs) { loss[index] *= static_cast<Dtype>(label_counts[label_value]); } counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything, we use it here to avoid having // to allocate new GPU memory to accumulate intermediate results. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); const float* label_count_data = weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, weight_by_label_freqs_, label_count_data , loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } // Clear scratch memory to prevent interfering with backward (see #6202). caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, const bool weight_by_label_freqs, const float* label_counts, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; if (weight_by_label_freqs) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] *= static_cast<Dtype>(label_counts[label_value]); } } counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); const float* label_count_data = weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, weight_by_label_freqs_, label_count_data, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
03bb01578fb41d04b2f57994041f21ea47cf84ad.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, const bool weight_by_label_freqs, const float* label_counts, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); if (weight_by_label_freqs) { loss[index] *= static_cast<Dtype>(label_counts[label_value]); } counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything, we use it here to avoid having // to allocate new GPU memory to accumulate intermediate results. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); const float* label_count_data = weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL; // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, weight_by_label_freqs_, label_count_data , loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } // Clear scratch memory to prevent interfering with backward (see #6202). caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, const bool weight_by_label_freqs, const float* label_counts, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; if (weight_by_label_freqs) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] *= static_cast<Dtype>(label_counts[label_value]); } } counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); const float* label_count_data = weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL; // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, weight_by_label_freqs_, label_count_data, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
7de1d918ef4f4c465786b389467603a52887fa82.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <gflags/gflags.h> #include <groute/event_pool.h> #include <groute/distributed_worklist.h> #include <groute/worklist_stack.h> #include <utils/parser.h> #include <utils/utils.h> #include <utils/stopwatch.h> #include <utils/markers.h> #include <groute/graphs/csr_graph.h> #include <groute/graphs/traversal_algo.h> #include <groute/cta_work.h> #include <utils/cuda_utils.h> #include "bc_common.h" DEFINE_int32(source_node, 0, "The source node for the BC traversal (clamped to [0, nnodes-1])"); const level_t INF = UINT_MAX; #define GTID (blockIdx.x * blockDim.x + threadIdx.x) namespace bc { __global__ void BCInit(level_t *levels, sigma_t *sigmas, int nnodes, index_t source) { int tid = GTID; if (tid < nnodes) { if (tid == source) { levels[tid] = 0; sigmas[tid] = 1; } else { levels[tid] = INF; sigmas[tid] = 0; } } } template<typename TGraph, typename TGraphDatum, typename TWorklist, typename TWLStack> __global__ void BFSKernelFused(TGraph graph, TGraphDatum levels_datum, sigma_t *p_node_sigmas, index_t *p_search_depth, TWorklist wl1, TWorklist wl2, TWLStack wl_stack, cub::GridBarrier grid_barrier) { int tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; uint32_t work_size; TWorklist *wl_in = &wl1; TWorklist *wl_out = &wl2; while ((work_size = wl_in->len()) > 0) { for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = wl_in->read(i); level_t next_level = levels_datum.get_item(node) + 1; wl_stack.append(node); for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); level_t prev = atomicMin(levels_datum.get_item_ptr(dest), next_level); if (prev == INF) { atomicAdd(p_node_sigmas + dest, p_node_sigmas[node]); atomicMax(p_search_depth, next_level); } else { if (levels_datum[dest] == next_level) { atomicAdd(p_node_sigmas + dest, p_node_sigmas[node]); } } if (next_level < prev) { wl_out->append(dest); } } } grid_barrier.Sync(); if (tid == 0) { wl_in->reset(); wl_stack.push(); } grid_barrier.Sync(); auto *tmp = wl_in; wl_in = wl_out; wl_out = tmp; } } template<typename TGraph, typename TGraphDatum, typename TWorklist, typename TWLStack> __global__ void BFSKernelCTAFused(TGraph graph, TGraphDatum levels_datum, sigma_t *p_node_sigmas, index_t *p_search_depth, TWorklist wl1, TWorklist wl2, TWLStack wl_stack, cub::GridBarrier grid_barrier) { int tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; uint32_t work_size; TWorklist *wl_in = &wl1; TWorklist *wl_out = &wl2; while ((work_size = wl_in->len()) > 0) { uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<index_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = wl_in->read(i); wl_stack.append(node); np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = node; } groute::dev::CTAWorkScheduler<level_t>::schedule (np_local, [&graph, &levels_datum, &p_node_sigmas, &p_search_depth, &wl_out](index_t edge, index_t node) { level_t next_level = levels_datum.get_item(node) + 1; index_t dest = graph.edge_dest(edge); level_t prev = atomicMin(levels_datum.get_item_ptr(dest), next_level); if (prev == INF) { atomicAdd(p_node_sigmas + dest, p_node_sigmas[node]); atomicMax(p_search_depth, next_level); } else { if (levels_datum[dest] == next_level) { atomicAdd(p_node_sigmas + dest, p_node_sigmas[node]); } } if (next_level < prev) { wl_out->append(dest); } }); } grid_barrier.Sync(); if (tid == 0) { wl_in->reset(); wl_stack.push(); } grid_barrier.Sync(); auto *tmp = wl_in; wl_in = wl_out; wl_out = tmp; } } template<typename Graph, typename WLStack, typename SourcePath, typename Sigmas> __global__ void StageTwoDDFused(Graph graph, WLStack wl_stack, SourcePath node_source_path, Sigmas *p_node_sigmas, Sigmas *p_node_bc_values, uint32_t *p_search_depth, cub::GridBarrier barrier) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t curr_depth = *p_search_depth; while (curr_depth > 0) { uint32_t begin_pos = wl_stack.begin_pos(curr_depth); uint32_t end_pos = wl_stack.end_pos(curr_depth); for (uint32_t idx = tid + begin_pos; idx < end_pos; idx += nthreads) { index_t node = wl_stack.read(idx); index_t src_depth = node_source_path[node]; for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; edge++) { index_t dest = graph.edge_dest(edge); if (node_source_path[dest] == src_depth + 1) { float delta_to = 1.0f * p_node_sigmas[node] / p_node_sigmas[dest] * (1.0f + p_node_bc_values[dest]); atomicAdd(p_node_bc_values + node, delta_to); } } } barrier.Sync(); curr_depth--; } } template<typename Graph, typename WLStack, typename SourcePath, typename Sigmas> __global__ void StageTwoDDCTAFused(Graph graph, WLStack wl_stack, SourcePath node_source_path, Sigmas *p_node_sigmas, Sigmas *p_node_bc_values, uint32_t *p_search_depth, cub::GridBarrier barrier) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t curr_depth = *p_search_depth; while (curr_depth > 0) { uint32_t begin_pos = wl_stack.begin_pos(curr_depth); uint32_t end_pos = wl_stack.end_pos(curr_depth); uint32_t work_size = end_pos - begin_pos; uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<index_t> np_local = {0, 0}; if (i < work_size) { index_t node = wl_stack.read(begin_pos + i); np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = node; } groute::dev::CTAWorkScheduler<index_t>::schedule (np_local, [&graph, &node_source_path, &p_node_bc_values, &p_node_sigmas](index_t edge, index_t node) { index_t src_depth = node_source_path[node]; index_t dest = graph.edge_dest(edge); if (node_source_path[dest] == src_depth + 1) { float delta_to = 1.0f * p_node_sigmas[node] / p_node_sigmas[dest] * (1.0f + p_node_bc_values[dest]); atomicAdd(p_node_bc_values + node, delta_to); } return true; }); } barrier.Sync(); curr_depth--; } } template<typename TGraph, typename TGraphDatum> class Problem { private: TGraph m_graph; TGraphDatum m_levels_datum; sigma_t *m_p_sigmas_datum; centrality_t *m_p_bc_value_datum; uint32_t *m_search_depth; public: Problem(const TGraph &graph, const TGraphDatum &levels_datum, sigma_t *p_sigmas_datum, centrality_t *p_bc_value_datum, uint32_t *search_depth) : m_graph(graph), m_levels_datum(levels_datum), m_p_sigmas_datum(p_sigmas_datum), m_p_bc_value_datum(p_bc_value_datum), m_search_depth(search_depth) { } void Init(index_t source_node, groute::Worklist<index_t> &in_wl, groute::Stream &stream) const { dim3 grid_dims, block_dims; KernelSizing(grid_dims, block_dims, m_levels_datum.size); BCInit << < grid_dims, block_dims, 0, stream.cuda_stream >> > (m_levels_datum.data_ptr, m_p_sigmas_datum, m_graph.nnodes, source_node); in_wl.AppendItemAsync(stream.cuda_stream, source_node); } template<typename TWorklist, typename TWLStack> void Relax(TWorklist &wl1, TWorklist &wl2, TWLStack &wl_stack, groute::Stream &stream) { dim3 grid_dims, block_dims; int occupancy_per_MP; hipDeviceProp_t dev_props; cub::GridBarrierLifetime barrier; GROUTE_CUDA_CHECK(hipGetDeviceProperties(&dev_props, 0)); Stopwatch sw_stage1(true); if (FLAGS_cta_np) { hipOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy_per_MP, BFSKernelCTAFused<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum<level_t>, groute::dev::Worklist<index_t>, groute::dev::WorklistStack<index_t >>, FLAGS_block_size, 0); int fused_work_blocks = dev_props.multiProcessorCount * occupancy_per_MP; grid_dims.x = fused_work_blocks; block_dims.x = FLAGS_block_size; cub::GridBarrierLifetime barrier; barrier.Setup(grid_dims.x); Stopwatch sw_stage1(true); BFSKernelCTAFused << < grid_dims, block_dims, 0, stream.cuda_stream >> > (m_graph, m_levels_datum, m_p_sigmas_datum, m_search_depth, wl1.DeviceObject(), wl2.DeviceObject(), wl_stack.DeviceObject(), barrier); stream.Sync(); } else { hipOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy_per_MP, BFSKernelFused<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum<level_t>, groute::dev::Worklist<index_t>, groute::dev::WorklistStack<index_t >>, FLAGS_block_size, 0); int fused_work_blocks = dev_props.multiProcessorCount * occupancy_per_MP; grid_dims.x = fused_work_blocks; block_dims.x = FLAGS_block_size; barrier.Setup(grid_dims.x); BFSKernelFused << < grid_dims, block_dims, 0, stream.cuda_stream >> > (m_graph, m_levels_datum, m_p_sigmas_datum, m_search_depth, wl1.DeviceObject(), wl2.DeviceObject(), wl_stack.DeviceObject(), barrier); stream.Sync(); } sw_stage1.stop(); printf("Time stage1: %f\n", sw_stage1.ms()); Stopwatch sw_stage2(true); if (FLAGS_cta_np) { hipOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy_per_MP, StageTwoDDCTAFused<groute::graphs::dev::CSRGraph, groute::dev::WorklistStack<index_t>, groute::graphs::dev::GraphDatum<level_t>, sigma_t>, FLAGS_block_size, 0); grid_dims.x = dev_props.multiProcessorCount * occupancy_per_MP; barrier.Setup(grid_dims.x); StageTwoDDCTAFused << < grid_dims, block_dims, 0, stream.cuda_stream >> > (m_graph, wl_stack.DeviceObject(), m_levels_datum, m_p_sigmas_datum, m_p_bc_value_datum, m_search_depth, barrier); stream.Sync(); } else { hipOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy_per_MP, StageTwoDDFused<groute::graphs::dev::CSRGraph, groute::dev::WorklistStack<index_t>, groute::graphs::dev::GraphDatum<level_t>, sigma_t>, FLAGS_block_size, 0); grid_dims.x = dev_props.multiProcessorCount * occupancy_per_MP; barrier.Setup(grid_dims.x); StageTwoDDFused << < grid_dims, block_dims, 0, stream.cuda_stream >> > (m_graph, wl_stack.DeviceObject(), m_levels_datum, m_p_sigmas_datum, m_p_bc_value_datum, m_search_depth, barrier); stream.Sync(); } sw_stage2.stop(); printf("Time stage2: %f\n", sw_stage2.ms()); } }; struct Algo { static const char *NameLower() { return "bc"; } static const char *Name() { return "BC"; } }; } bool TestBCSingle() { groute::graphs::single::NodeOutputDatum<level_t> levels_datum; groute::graphs::traversal::Context<bc::Algo> context(1); groute::graphs::single::CSRGraphAllocator dev_graph_allocator(context.host_graph); context.SetDevice(0); dev_graph_allocator.AllocateDatumObjects(levels_datum); context.SyncDevice(0); // graph allocations are on default streams, must sync device index_t nnodes = context.nvtxs; utils::SharedArray<sigma_t> dev_node_sigmas(nnodes); utils::SharedArray<float> dev_node_bc_values(nnodes); utils::SharedValue<uint32_t> dev_search_depth; bc::Problem<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum<level_t>> problem(dev_graph_allocator.DeviceObject(), levels_datum.DeviceObject(), dev_node_sigmas.dev_ptr, dev_node_bc_values.dev_ptr, dev_search_depth.dev_ptr); size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor; if (FLAGS_wl_alloc_abs > 0) max_work_size = FLAGS_wl_alloc_abs; groute::Stream stream; groute::Worklist<index_t> wl1(max_work_size), wl2(max_work_size); groute::WorklistStack<index_t> wl_stack(max_work_size * 2); wl1.ResetAsync(stream.cuda_stream); wl2.ResetAsync(stream.cuda_stream); wl_stack.ResetAsync(stream); stream.Sync(); index_t source_node = min(max(0, FLAGS_source_node), context.nvtxs - 1); Stopwatch sw(true); problem.Init(source_node, wl1, stream); problem.Relax(wl1, wl2, wl_stack, stream); stream.Sync(); sw.stop(); printf("\n%s: %f ms. <filter>\n\n", bc::Algo::Name(), sw.ms()); dev_node_sigmas.D2H(); dev_node_bc_values.D2H(); for (int i = 0; i < dev_node_bc_values.host_vec.size(); i++) { dev_node_bc_values.host_vec[i] /= 2; } // for (int i = 0; i < 100; i++) // { // printf("node: %d %f %f\n", i, dev_node_sigmas.host_vec[i], dev_node_bc_values.host_vec[i]); // } // Gather if (FLAGS_output.length() != 0) BCOutput(FLAGS_output.c_str(), dev_node_bc_values.host_vec); if (FLAGS_check) { auto result_pair = BetweennessCentralityHost(context.host_graph, source_node); int failed_sigmas = BCCheckErrors(result_pair.second, dev_node_sigmas.host_vec); if (failed_sigmas) { printf("Sigams failed!\n"); } int failed_bc = BCCheckErrors(result_pair.first, dev_node_bc_values.host_vec); if (failed_bc) { printf("BC value failed!\n"); } return failed_sigmas + failed_bc == 0; } else { printf("Warning: Result not checked\n"); return true; } }
7de1d918ef4f4c465786b389467603a52887fa82.cu
// Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <gflags/gflags.h> #include <groute/event_pool.h> #include <groute/distributed_worklist.h> #include <groute/worklist_stack.h> #include <utils/parser.h> #include <utils/utils.h> #include <utils/stopwatch.h> #include <utils/markers.h> #include <groute/graphs/csr_graph.h> #include <groute/graphs/traversal_algo.h> #include <groute/cta_work.h> #include <utils/cuda_utils.h> #include "bc_common.h" DEFINE_int32(source_node, 0, "The source node for the BC traversal (clamped to [0, nnodes-1])"); const level_t INF = UINT_MAX; #define GTID (blockIdx.x * blockDim.x + threadIdx.x) namespace bc { __global__ void BCInit(level_t *levels, sigma_t *sigmas, int nnodes, index_t source) { int tid = GTID; if (tid < nnodes) { if (tid == source) { levels[tid] = 0; sigmas[tid] = 1; } else { levels[tid] = INF; sigmas[tid] = 0; } } } template<typename TGraph, typename TGraphDatum, typename TWorklist, typename TWLStack> __global__ void BFSKernelFused(TGraph graph, TGraphDatum levels_datum, sigma_t *p_node_sigmas, index_t *p_search_depth, TWorklist wl1, TWorklist wl2, TWLStack wl_stack, cub::GridBarrier grid_barrier) { int tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; uint32_t work_size; TWorklist *wl_in = &wl1; TWorklist *wl_out = &wl2; while ((work_size = wl_in->len()) > 0) { for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = wl_in->read(i); level_t next_level = levels_datum.get_item(node) + 1; wl_stack.append(node); for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); level_t prev = atomicMin(levels_datum.get_item_ptr(dest), next_level); if (prev == INF) { atomicAdd(p_node_sigmas + dest, p_node_sigmas[node]); atomicMax(p_search_depth, next_level); } else { if (levels_datum[dest] == next_level) { atomicAdd(p_node_sigmas + dest, p_node_sigmas[node]); } } if (next_level < prev) { wl_out->append(dest); } } } grid_barrier.Sync(); if (tid == 0) { wl_in->reset(); wl_stack.push(); } grid_barrier.Sync(); auto *tmp = wl_in; wl_in = wl_out; wl_out = tmp; } } template<typename TGraph, typename TGraphDatum, typename TWorklist, typename TWLStack> __global__ void BFSKernelCTAFused(TGraph graph, TGraphDatum levels_datum, sigma_t *p_node_sigmas, index_t *p_search_depth, TWorklist wl1, TWorklist wl2, TWLStack wl_stack, cub::GridBarrier grid_barrier) { int tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; uint32_t work_size; TWorklist *wl_in = &wl1; TWorklist *wl_out = &wl2; while ((work_size = wl_in->len()) > 0) { uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<index_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = wl_in->read(i); wl_stack.append(node); np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = node; } groute::dev::CTAWorkScheduler<level_t>::schedule (np_local, [&graph, &levels_datum, &p_node_sigmas, &p_search_depth, &wl_out](index_t edge, index_t node) { level_t next_level = levels_datum.get_item(node) + 1; index_t dest = graph.edge_dest(edge); level_t prev = atomicMin(levels_datum.get_item_ptr(dest), next_level); if (prev == INF) { atomicAdd(p_node_sigmas + dest, p_node_sigmas[node]); atomicMax(p_search_depth, next_level); } else { if (levels_datum[dest] == next_level) { atomicAdd(p_node_sigmas + dest, p_node_sigmas[node]); } } if (next_level < prev) { wl_out->append(dest); } }); } grid_barrier.Sync(); if (tid == 0) { wl_in->reset(); wl_stack.push(); } grid_barrier.Sync(); auto *tmp = wl_in; wl_in = wl_out; wl_out = tmp; } } template<typename Graph, typename WLStack, typename SourcePath, typename Sigmas> __global__ void StageTwoDDFused(Graph graph, WLStack wl_stack, SourcePath node_source_path, Sigmas *p_node_sigmas, Sigmas *p_node_bc_values, uint32_t *p_search_depth, cub::GridBarrier barrier) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t curr_depth = *p_search_depth; while (curr_depth > 0) { uint32_t begin_pos = wl_stack.begin_pos(curr_depth); uint32_t end_pos = wl_stack.end_pos(curr_depth); for (uint32_t idx = tid + begin_pos; idx < end_pos; idx += nthreads) { index_t node = wl_stack.read(idx); index_t src_depth = node_source_path[node]; for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; edge++) { index_t dest = graph.edge_dest(edge); if (node_source_path[dest] == src_depth + 1) { float delta_to = 1.0f * p_node_sigmas[node] / p_node_sigmas[dest] * (1.0f + p_node_bc_values[dest]); atomicAdd(p_node_bc_values + node, delta_to); } } } barrier.Sync(); curr_depth--; } } template<typename Graph, typename WLStack, typename SourcePath, typename Sigmas> __global__ void StageTwoDDCTAFused(Graph graph, WLStack wl_stack, SourcePath node_source_path, Sigmas *p_node_sigmas, Sigmas *p_node_bc_values, uint32_t *p_search_depth, cub::GridBarrier barrier) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t curr_depth = *p_search_depth; while (curr_depth > 0) { uint32_t begin_pos = wl_stack.begin_pos(curr_depth); uint32_t end_pos = wl_stack.end_pos(curr_depth); uint32_t work_size = end_pos - begin_pos; uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<index_t> np_local = {0, 0}; if (i < work_size) { index_t node = wl_stack.read(begin_pos + i); np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = node; } groute::dev::CTAWorkScheduler<index_t>::schedule (np_local, [&graph, &node_source_path, &p_node_bc_values, &p_node_sigmas](index_t edge, index_t node) { index_t src_depth = node_source_path[node]; index_t dest = graph.edge_dest(edge); if (node_source_path[dest] == src_depth + 1) { float delta_to = 1.0f * p_node_sigmas[node] / p_node_sigmas[dest] * (1.0f + p_node_bc_values[dest]); atomicAdd(p_node_bc_values + node, delta_to); } return true; }); } barrier.Sync(); curr_depth--; } } template<typename TGraph, typename TGraphDatum> class Problem { private: TGraph m_graph; TGraphDatum m_levels_datum; sigma_t *m_p_sigmas_datum; centrality_t *m_p_bc_value_datum; uint32_t *m_search_depth; public: Problem(const TGraph &graph, const TGraphDatum &levels_datum, sigma_t *p_sigmas_datum, centrality_t *p_bc_value_datum, uint32_t *search_depth) : m_graph(graph), m_levels_datum(levels_datum), m_p_sigmas_datum(p_sigmas_datum), m_p_bc_value_datum(p_bc_value_datum), m_search_depth(search_depth) { } void Init(index_t source_node, groute::Worklist<index_t> &in_wl, groute::Stream &stream) const { dim3 grid_dims, block_dims; KernelSizing(grid_dims, block_dims, m_levels_datum.size); BCInit << < grid_dims, block_dims, 0, stream.cuda_stream >> > (m_levels_datum.data_ptr, m_p_sigmas_datum, m_graph.nnodes, source_node); in_wl.AppendItemAsync(stream.cuda_stream, source_node); } template<typename TWorklist, typename TWLStack> void Relax(TWorklist &wl1, TWorklist &wl2, TWLStack &wl_stack, groute::Stream &stream) { dim3 grid_dims, block_dims; int occupancy_per_MP; cudaDeviceProp dev_props; cub::GridBarrierLifetime barrier; GROUTE_CUDA_CHECK(cudaGetDeviceProperties(&dev_props, 0)); Stopwatch sw_stage1(true); if (FLAGS_cta_np) { cudaOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy_per_MP, BFSKernelCTAFused<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum<level_t>, groute::dev::Worklist<index_t>, groute::dev::WorklistStack<index_t >>, FLAGS_block_size, 0); int fused_work_blocks = dev_props.multiProcessorCount * occupancy_per_MP; grid_dims.x = fused_work_blocks; block_dims.x = FLAGS_block_size; cub::GridBarrierLifetime barrier; barrier.Setup(grid_dims.x); Stopwatch sw_stage1(true); BFSKernelCTAFused << < grid_dims, block_dims, 0, stream.cuda_stream >> > (m_graph, m_levels_datum, m_p_sigmas_datum, m_search_depth, wl1.DeviceObject(), wl2.DeviceObject(), wl_stack.DeviceObject(), barrier); stream.Sync(); } else { cudaOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy_per_MP, BFSKernelFused<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum<level_t>, groute::dev::Worklist<index_t>, groute::dev::WorklistStack<index_t >>, FLAGS_block_size, 0); int fused_work_blocks = dev_props.multiProcessorCount * occupancy_per_MP; grid_dims.x = fused_work_blocks; block_dims.x = FLAGS_block_size; barrier.Setup(grid_dims.x); BFSKernelFused << < grid_dims, block_dims, 0, stream.cuda_stream >> > (m_graph, m_levels_datum, m_p_sigmas_datum, m_search_depth, wl1.DeviceObject(), wl2.DeviceObject(), wl_stack.DeviceObject(), barrier); stream.Sync(); } sw_stage1.stop(); printf("Time stage1: %f\n", sw_stage1.ms()); Stopwatch sw_stage2(true); if (FLAGS_cta_np) { cudaOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy_per_MP, StageTwoDDCTAFused<groute::graphs::dev::CSRGraph, groute::dev::WorklistStack<index_t>, groute::graphs::dev::GraphDatum<level_t>, sigma_t>, FLAGS_block_size, 0); grid_dims.x = dev_props.multiProcessorCount * occupancy_per_MP; barrier.Setup(grid_dims.x); StageTwoDDCTAFused << < grid_dims, block_dims, 0, stream.cuda_stream >> > (m_graph, wl_stack.DeviceObject(), m_levels_datum, m_p_sigmas_datum, m_p_bc_value_datum, m_search_depth, barrier); stream.Sync(); } else { cudaOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy_per_MP, StageTwoDDFused<groute::graphs::dev::CSRGraph, groute::dev::WorklistStack<index_t>, groute::graphs::dev::GraphDatum<level_t>, sigma_t>, FLAGS_block_size, 0); grid_dims.x = dev_props.multiProcessorCount * occupancy_per_MP; barrier.Setup(grid_dims.x); StageTwoDDFused << < grid_dims, block_dims, 0, stream.cuda_stream >> > (m_graph, wl_stack.DeviceObject(), m_levels_datum, m_p_sigmas_datum, m_p_bc_value_datum, m_search_depth, barrier); stream.Sync(); } sw_stage2.stop(); printf("Time stage2: %f\n", sw_stage2.ms()); } }; struct Algo { static const char *NameLower() { return "bc"; } static const char *Name() { return "BC"; } }; } bool TestBCSingle() { groute::graphs::single::NodeOutputDatum<level_t> levels_datum; groute::graphs::traversal::Context<bc::Algo> context(1); groute::graphs::single::CSRGraphAllocator dev_graph_allocator(context.host_graph); context.SetDevice(0); dev_graph_allocator.AllocateDatumObjects(levels_datum); context.SyncDevice(0); // graph allocations are on default streams, must sync device index_t nnodes = context.nvtxs; utils::SharedArray<sigma_t> dev_node_sigmas(nnodes); utils::SharedArray<float> dev_node_bc_values(nnodes); utils::SharedValue<uint32_t> dev_search_depth; bc::Problem<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum<level_t>> problem(dev_graph_allocator.DeviceObject(), levels_datum.DeviceObject(), dev_node_sigmas.dev_ptr, dev_node_bc_values.dev_ptr, dev_search_depth.dev_ptr); size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor; if (FLAGS_wl_alloc_abs > 0) max_work_size = FLAGS_wl_alloc_abs; groute::Stream stream; groute::Worklist<index_t> wl1(max_work_size), wl2(max_work_size); groute::WorklistStack<index_t> wl_stack(max_work_size * 2); wl1.ResetAsync(stream.cuda_stream); wl2.ResetAsync(stream.cuda_stream); wl_stack.ResetAsync(stream); stream.Sync(); index_t source_node = min(max(0, FLAGS_source_node), context.nvtxs - 1); Stopwatch sw(true); problem.Init(source_node, wl1, stream); problem.Relax(wl1, wl2, wl_stack, stream); stream.Sync(); sw.stop(); printf("\n%s: %f ms. <filter>\n\n", bc::Algo::Name(), sw.ms()); dev_node_sigmas.D2H(); dev_node_bc_values.D2H(); for (int i = 0; i < dev_node_bc_values.host_vec.size(); i++) { dev_node_bc_values.host_vec[i] /= 2; } // for (int i = 0; i < 100; i++) // { // printf("node: %d %f %f\n", i, dev_node_sigmas.host_vec[i], dev_node_bc_values.host_vec[i]); // } // Gather if (FLAGS_output.length() != 0) BCOutput(FLAGS_output.c_str(), dev_node_bc_values.host_vec); if (FLAGS_check) { auto result_pair = BetweennessCentralityHost(context.host_graph, source_node); int failed_sigmas = BCCheckErrors(result_pair.second, dev_node_sigmas.host_vec); if (failed_sigmas) { printf("Sigams failed!\n"); } int failed_bc = BCCheckErrors(result_pair.first, dev_node_bc_values.host_vec); if (failed_bc) { printf("BC value failed!\n"); } return failed_sigmas + failed_bc == 0; } else { printf("Warning: Result not checked\n"); return true; } }
1652c6ef26fd8df55973bf924f44af5b62d7d4c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef __Salsa20_KERNEL_CU__ #define __Salsa20_KERNEL_CU__ #define __mem(mm,i,j,N) ((mm)[(i)+(j)*(N)]) #define max(a,b) (((a)>(b))?(a):(b)) #define min(a,b) (((a)<(b))?(a):(b)) #define rotl32(v, n) \ ((u32)((v) << (n)) | ((v) >> (32 - (n)))) #define ROTATE(v,c) (rotl32(v,c)) #define XOR(v,w) ((v) ^ (w)) #define PLUS(v,w) ((u32)((v) + (w))) #define PLUSONE(v) (PLUS((v),1)) #define SIGMA_0 0x61707865 #define SIGMA_1 0x3320646e #define SIGMA_2 0x79622d32 #define SIGMA_3 0x6b206574 #define TAU_0 0x61707865 #define TAU_1 0x3120646e #define TAU_2 0x79622d36 #define TAU_3 0x6b206574 __global__ void Salsa20_keyivsetup(u32* g_x, u32 *keys, u32 key_size, u32 *ivs, u32 iv_size) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; u32 x0, x1, x2, x3, x4, x5, x10, x11, x12, x13, x14, x15; x1 = __mem(keys,tID,0,nr_streams); x2 = __mem(keys,tID,1,nr_streams); x3 = __mem(keys,tID,2,nr_streams); x4 = __mem(keys,tID,3,nr_streams); if(key_size==256) { x11 = __mem(keys,tID,4,nr_streams); x12 = __mem(keys,tID,5,nr_streams); x13 = __mem(keys,tID,6,nr_streams); x14 = __mem(keys,tID,7,nr_streams); x0 = SIGMA_0; x5 = SIGMA_1; x10 = SIGMA_2; x15 = SIGMA_3; } else { x11 = x1; x12 = x2; x13 = x3; x14 = x4; x0 = TAU_0; x5 = TAU_1; x10 = TAU_2; x15 = TAU_3; } __mem(g_x,tID, 0,nr_streams) = x0; __mem(g_x,tID, 1,nr_streams) = x1; __mem(g_x,tID, 2,nr_streams) = x2; __mem(g_x,tID, 3,nr_streams) = x3; __mem(g_x,tID, 4,nr_streams) = x4; __mem(g_x,tID, 5,nr_streams) = x5; if(iv_size>0) { __mem(g_x,tID, 6,nr_streams) = __mem(ivs,tID,0,nr_streams); __mem(g_x,tID, 7,nr_streams) = __mem(ivs,tID,1,nr_streams); } __mem(g_x,tID, 8,nr_streams) = 0; __mem(g_x,tID, 9,nr_streams) = 0; __mem(g_x,tID,10,nr_streams) = x10; __mem(g_x,tID,11,nr_streams) = x11; __mem(g_x,tID,12,nr_streams) = x12; __mem(g_x,tID,13,nr_streams) = x13; __mem(g_x,tID,14,nr_streams) = x14; __mem(g_x,tID,15,nr_streams) = x15; } #define print_all\ printf("%d:[0x%08x],[0x%08x]\n",tID,x0 ,input( 0));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x1 ,input( 1));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x2 ,input( 2));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x3 ,input( 3));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x4 ,input( 4));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x5 ,input( 5));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x6 ,input( 6));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x7 ,input( 7));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x8 ,input( 8));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x9 ,input( 9));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x10,input(10));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x11,input(11));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x12,input(12));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x13,input(13));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x14,input(14));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x15,input(15));\ printf("\n");\ #define SALSA20(x)\ do {\ int i;\ x0 = input( 0);\ x1 = input( 1);\ x2 = input( 2);\ x3 = input( 3);\ x4 = input( 4);\ x5 = input( 5);\ x6 = input( 6);\ x7 = input( 7);\ x8 = input( 8);\ x9 = input( 9);\ x10 = input(10);\ x11 = input(11);\ x12 = input(12);\ x13 = input(13);\ x14 = input(14);\ x15 = input(15);\ for (i = 20;i > 0;i -= 2) {\ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));\ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));\ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));\ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));\ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));\ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));\ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));\ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));\ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));\ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));\ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));\ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));\ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));\ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));\ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));\ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));\ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));\ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));\ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));\ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));\ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));\ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));\ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));\ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));\ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));\ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));\ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));\ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));\ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));\ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));\ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));\ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));\ }\ x0 = PLUS( x0,input( 0));\ x1 = PLUS( x1,input( 1));\ x2 = PLUS( x2,input( 2));\ x3 = PLUS( x3,input( 3));\ x4 = PLUS( x4,input( 4));\ x5 = PLUS( x5,input( 5));\ x6 = PLUS( x6,input( 6));\ x7 = PLUS( x7,input( 7));\ x8 = PLUS( x8,input( 8));\ x9 = PLUS( x9,input( 9));\ x10 = PLUS(x10,input(10));\ x11 = PLUS(x11,input(11));\ x12 = PLUS(x12,input(12));\ x13 = PLUS(x13,input(13));\ x14 = PLUS(x14,input(14));\ x15 = PLUS(x15,input(15));\ if(!(input( 8) = PLUSONE(input( 8)))) {\ input( 9) = PLUSONE(input( 9));\ }\ } while(0) extern __shared__ __align__ (__alignof(void*)) u32 smem_cache[]; __global__ void Salsa20_process_blocks(gSTREAM_action act, u32* g_x, u32 *buff, u32 nr_blocks) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; u32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; #ifdef INPUT_SHMEM u32* input_csh=(u32*) smem_cache; #define input(idx) __mem(input_csh,threadIdx.x,(idx),blockDim.x) #else u32 i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15; #define input(idx) i##idx #endif /* load state */ input( 0) = __mem(g_x,tID, 0,nr_streams); input( 1) = __mem(g_x,tID, 1,nr_streams); input( 2) = __mem(g_x,tID, 2,nr_streams); input( 3) = __mem(g_x,tID, 3,nr_streams); input( 4) = __mem(g_x,tID, 4,nr_streams); input( 5) = __mem(g_x,tID, 5,nr_streams); input( 6) = __mem(g_x,tID, 6,nr_streams); input( 7) = __mem(g_x,tID, 7,nr_streams); input( 8) = __mem(g_x,tID, 8,nr_streams); input( 9) = __mem(g_x,tID, 9,nr_streams); input(10) = __mem(g_x,tID,10,nr_streams); input(11) = __mem(g_x,tID,11,nr_streams); input(12) = __mem(g_x,tID,12,nr_streams); input(13) = __mem(g_x,tID,13,nr_streams); input(14) = __mem(g_x,tID,14,nr_streams); input(15) = __mem(g_x,tID,15,nr_streams); for(int block_no=0;block_no<nr_blocks;block_no++) { /* output of Salsa20 is x0 - x 15 */ SALSA20(x); /* copy/xor-into global buffer */ if(act!=GEN_KEYSTREAM) { __mem(buff,tID, 0,nr_streams) ^= x0; __mem(buff,tID, 1,nr_streams) ^= x1; __mem(buff,tID, 2,nr_streams) ^= x2; __mem(buff,tID, 3,nr_streams) ^= x3; __mem(buff,tID, 4,nr_streams) ^= x4; __mem(buff,tID, 5,nr_streams) ^= x5; __mem(buff,tID, 6,nr_streams) ^= x6; __mem(buff,tID, 7,nr_streams) ^= x7; __mem(buff,tID, 8,nr_streams) ^= x8; __mem(buff,tID, 9,nr_streams) ^= x9; __mem(buff,tID,10,nr_streams) ^= x10; __mem(buff,tID,11,nr_streams) ^= x11; __mem(buff,tID,12,nr_streams) ^= x12; __mem(buff,tID,13,nr_streams) ^= x13; __mem(buff,tID,14,nr_streams) ^= x14; __mem(buff,tID,15,nr_streams) ^= x15; } else { __mem(buff,tID, 0,nr_streams) = x0; __mem(buff,tID, 1,nr_streams) = x1; __mem(buff,tID, 2,nr_streams) = x2; __mem(buff,tID, 3,nr_streams) = x3; __mem(buff,tID, 4,nr_streams) = x4; __mem(buff,tID, 5,nr_streams) = x5; __mem(buff,tID, 6,nr_streams) = x6; __mem(buff,tID, 7,nr_streams) = x7; __mem(buff,tID, 8,nr_streams) = x8; __mem(buff,tID, 9,nr_streams) = x9; __mem(buff,tID,10,nr_streams) = x10; __mem(buff,tID,11,nr_streams) = x11; __mem(buff,tID,12,nr_streams) = x12; __mem(buff,tID,13,nr_streams) = x13; __mem(buff,tID,14,nr_streams) = x14; __mem(buff,tID,15,nr_streams) = x15; } buff+=16*nr_streams; } /* save state */ __mem(g_x,tID, 8,nr_streams) = input( 8); __mem(g_x,tID, 9,nr_streams) = input( 9); } #endif
1652c6ef26fd8df55973bf924f44af5b62d7d4c3.cu
#ifndef __Salsa20_KERNEL_CU__ #define __Salsa20_KERNEL_CU__ #define __mem(mm,i,j,N) ((mm)[(i)+(j)*(N)]) #define max(a,b) (((a)>(b))?(a):(b)) #define min(a,b) (((a)<(b))?(a):(b)) #define rotl32(v, n) \ ((u32)((v) << (n)) | ((v) >> (32 - (n)))) #define ROTATE(v,c) (rotl32(v,c)) #define XOR(v,w) ((v) ^ (w)) #define PLUS(v,w) ((u32)((v) + (w))) #define PLUSONE(v) (PLUS((v),1)) #define SIGMA_0 0x61707865 #define SIGMA_1 0x3320646e #define SIGMA_2 0x79622d32 #define SIGMA_3 0x6b206574 #define TAU_0 0x61707865 #define TAU_1 0x3120646e #define TAU_2 0x79622d36 #define TAU_3 0x6b206574 __global__ void Salsa20_keyivsetup(u32* g_x, u32 *keys, u32 key_size, u32 *ivs, u32 iv_size) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; u32 x0, x1, x2, x3, x4, x5, x10, x11, x12, x13, x14, x15; x1 = __mem(keys,tID,0,nr_streams); x2 = __mem(keys,tID,1,nr_streams); x3 = __mem(keys,tID,2,nr_streams); x4 = __mem(keys,tID,3,nr_streams); if(key_size==256) { x11 = __mem(keys,tID,4,nr_streams); x12 = __mem(keys,tID,5,nr_streams); x13 = __mem(keys,tID,6,nr_streams); x14 = __mem(keys,tID,7,nr_streams); x0 = SIGMA_0; x5 = SIGMA_1; x10 = SIGMA_2; x15 = SIGMA_3; } else { x11 = x1; x12 = x2; x13 = x3; x14 = x4; x0 = TAU_0; x5 = TAU_1; x10 = TAU_2; x15 = TAU_3; } __mem(g_x,tID, 0,nr_streams) = x0; __mem(g_x,tID, 1,nr_streams) = x1; __mem(g_x,tID, 2,nr_streams) = x2; __mem(g_x,tID, 3,nr_streams) = x3; __mem(g_x,tID, 4,nr_streams) = x4; __mem(g_x,tID, 5,nr_streams) = x5; if(iv_size>0) { __mem(g_x,tID, 6,nr_streams) = __mem(ivs,tID,0,nr_streams); __mem(g_x,tID, 7,nr_streams) = __mem(ivs,tID,1,nr_streams); } __mem(g_x,tID, 8,nr_streams) = 0; __mem(g_x,tID, 9,nr_streams) = 0; __mem(g_x,tID,10,nr_streams) = x10; __mem(g_x,tID,11,nr_streams) = x11; __mem(g_x,tID,12,nr_streams) = x12; __mem(g_x,tID,13,nr_streams) = x13; __mem(g_x,tID,14,nr_streams) = x14; __mem(g_x,tID,15,nr_streams) = x15; } #define print_all\ printf("%d:[0x%08x],[0x%08x]\n",tID,x0 ,input( 0));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x1 ,input( 1));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x2 ,input( 2));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x3 ,input( 3));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x4 ,input( 4));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x5 ,input( 5));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x6 ,input( 6));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x7 ,input( 7));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x8 ,input( 8));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x9 ,input( 9));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x10,input(10));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x11,input(11));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x12,input(12));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x13,input(13));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x14,input(14));\ printf("%d:[0x%08x],[0x%08x]\n",tID,x15,input(15));\ printf("\n");\ #define SALSA20(x)\ do {\ int i;\ x0 = input( 0);\ x1 = input( 1);\ x2 = input( 2);\ x3 = input( 3);\ x4 = input( 4);\ x5 = input( 5);\ x6 = input( 6);\ x7 = input( 7);\ x8 = input( 8);\ x9 = input( 9);\ x10 = input(10);\ x11 = input(11);\ x12 = input(12);\ x13 = input(13);\ x14 = input(14);\ x15 = input(15);\ for (i = 20;i > 0;i -= 2) {\ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));\ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));\ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));\ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));\ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));\ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));\ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));\ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));\ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));\ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));\ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));\ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));\ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));\ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));\ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));\ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));\ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));\ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));\ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));\ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));\ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));\ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));\ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));\ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));\ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));\ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));\ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));\ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));\ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));\ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));\ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));\ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));\ }\ x0 = PLUS( x0,input( 0));\ x1 = PLUS( x1,input( 1));\ x2 = PLUS( x2,input( 2));\ x3 = PLUS( x3,input( 3));\ x4 = PLUS( x4,input( 4));\ x5 = PLUS( x5,input( 5));\ x6 = PLUS( x6,input( 6));\ x7 = PLUS( x7,input( 7));\ x8 = PLUS( x8,input( 8));\ x9 = PLUS( x9,input( 9));\ x10 = PLUS(x10,input(10));\ x11 = PLUS(x11,input(11));\ x12 = PLUS(x12,input(12));\ x13 = PLUS(x13,input(13));\ x14 = PLUS(x14,input(14));\ x15 = PLUS(x15,input(15));\ if(!(input( 8) = PLUSONE(input( 8)))) {\ input( 9) = PLUSONE(input( 9));\ }\ } while(0) extern __shared__ __align__ (__alignof(void*)) u32 smem_cache[]; __global__ void Salsa20_process_blocks(gSTREAM_action act, u32* g_x, u32 *buff, u32 nr_blocks) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; u32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; #ifdef INPUT_SHMEM u32* input_csh=(u32*) smem_cache; #define input(idx) __mem(input_csh,threadIdx.x,(idx),blockDim.x) #else u32 i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15; #define input(idx) i##idx #endif /* load state */ input( 0) = __mem(g_x,tID, 0,nr_streams); input( 1) = __mem(g_x,tID, 1,nr_streams); input( 2) = __mem(g_x,tID, 2,nr_streams); input( 3) = __mem(g_x,tID, 3,nr_streams); input( 4) = __mem(g_x,tID, 4,nr_streams); input( 5) = __mem(g_x,tID, 5,nr_streams); input( 6) = __mem(g_x,tID, 6,nr_streams); input( 7) = __mem(g_x,tID, 7,nr_streams); input( 8) = __mem(g_x,tID, 8,nr_streams); input( 9) = __mem(g_x,tID, 9,nr_streams); input(10) = __mem(g_x,tID,10,nr_streams); input(11) = __mem(g_x,tID,11,nr_streams); input(12) = __mem(g_x,tID,12,nr_streams); input(13) = __mem(g_x,tID,13,nr_streams); input(14) = __mem(g_x,tID,14,nr_streams); input(15) = __mem(g_x,tID,15,nr_streams); for(int block_no=0;block_no<nr_blocks;block_no++) { /* output of Salsa20 is x0 - x 15 */ SALSA20(x); /* copy/xor-into global buffer */ if(act!=GEN_KEYSTREAM) { __mem(buff,tID, 0,nr_streams) ^= x0; __mem(buff,tID, 1,nr_streams) ^= x1; __mem(buff,tID, 2,nr_streams) ^= x2; __mem(buff,tID, 3,nr_streams) ^= x3; __mem(buff,tID, 4,nr_streams) ^= x4; __mem(buff,tID, 5,nr_streams) ^= x5; __mem(buff,tID, 6,nr_streams) ^= x6; __mem(buff,tID, 7,nr_streams) ^= x7; __mem(buff,tID, 8,nr_streams) ^= x8; __mem(buff,tID, 9,nr_streams) ^= x9; __mem(buff,tID,10,nr_streams) ^= x10; __mem(buff,tID,11,nr_streams) ^= x11; __mem(buff,tID,12,nr_streams) ^= x12; __mem(buff,tID,13,nr_streams) ^= x13; __mem(buff,tID,14,nr_streams) ^= x14; __mem(buff,tID,15,nr_streams) ^= x15; } else { __mem(buff,tID, 0,nr_streams) = x0; __mem(buff,tID, 1,nr_streams) = x1; __mem(buff,tID, 2,nr_streams) = x2; __mem(buff,tID, 3,nr_streams) = x3; __mem(buff,tID, 4,nr_streams) = x4; __mem(buff,tID, 5,nr_streams) = x5; __mem(buff,tID, 6,nr_streams) = x6; __mem(buff,tID, 7,nr_streams) = x7; __mem(buff,tID, 8,nr_streams) = x8; __mem(buff,tID, 9,nr_streams) = x9; __mem(buff,tID,10,nr_streams) = x10; __mem(buff,tID,11,nr_streams) = x11; __mem(buff,tID,12,nr_streams) = x12; __mem(buff,tID,13,nr_streams) = x13; __mem(buff,tID,14,nr_streams) = x14; __mem(buff,tID,15,nr_streams) = x15; } buff+=16*nr_streams; } /* save state */ __mem(g_x,tID, 8,nr_streams) = input( 8); __mem(g_x,tID, 9,nr_streams) = input( 9); } #endif
b0704378685b37d01c5a53fdddc864c12202bb2e.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdlib> #include <cmath> #include <ctime> #include <cfloat> #include <algorithm> #include <chrono> #include <iomanip> #include <iostream> #include <map> #include <memory> #include <random> #include <sstream> #include <string> #include <vector> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <rocblas.h> #include <cudnn.h> #include "readubyte.h" /////////////////////////////////////////////////////////////////////////////////////////// // Definitions and helper utilities // Block width for CUDA kernels #define BW 128 #ifdef USE_GFLAGS #include <gflags/gflags.h> #ifndef _WIN32 #define gflags google #endif #else // Constant versions of gflags #define DEFINE_int32(flag, default_value, description) const int FLAGS_##flag = (default_value) #define DEFINE_uint64(flag, default_value, description) const unsigned long long FLAGS_##flag = (default_value) #define DEFINE_bool(flag, default_value, description) const bool FLAGS_##flag = (default_value) #define DEFINE_double(flag, default_value, description) const double FLAGS_##flag = (default_value) #define DEFINE_string(flag, default_value, description) const std::string FLAGS_##flag ((default_value)) #endif /** * Computes ceil(x / y) for integral nonnegative values. */ static inline unsigned int RoundUp(unsigned int nominator, unsigned int denominator) { return (nominator + denominator - 1) / denominator; } /** * Saves a PGM grayscale image out of unsigned 8-bit data */ void SavePGMFile(const unsigned char *data, size_t width, size_t height, const char *filename) { FILE *fp = fopen(filename, "wb"); if (fp) { fprintf(fp, "P5\n%lu %lu\n255\n", width, height); fwrite(data, sizeof(unsigned char), width * height, fp); fclose(fp); } } ////////////////////////////////////////////////////////////////////////////// // Error handling // Adapted from the CUDNN classification code // sample: https://developer.nvidia.com/cuDNN #define FatalError(s) do { \ std::stringstream _where, _message; \ _where << __FILE__ << ':' << __LINE__; \ _message << std::string(s) + "\n" << __FILE__ << ':' << __LINE__; \ std::cerr << _message.str() << "\nAborting...\n"; \ hipDeviceReset(); \ exit(1); \ } while(0) #define checkCUDNN(status) do { \ std::stringstream _error; \ if (status != CUDNN_STATUS_SUCCESS) { \ _error << "CUDNN failure: " << cudnnGetErrorString(status); \ FatalError(_error.str()); \ } \ } while(0) #define checkCudaErrors(status) do { \ std::stringstream _error; \ if (status != 0) { \ _error << "Cuda failure: " << status; \ FatalError(_error.str()); \ } \ } while(0) /////////////////////////////////////////////////////////////////////////////////////////// // Command-line flags // Application parameters DEFINE_int32(gpu, 0, "The GPU ID to use"); DEFINE_int32(iterations, 1000, "Number of iterations for training"); DEFINE_int32(random_seed, -1, "Override random seed (default uses std::random_device)"); DEFINE_int32(classify, -1, "Number of images to classify to compute error rate (default uses entire test set)"); // Batch parameters DEFINE_uint64(batch_size, 64, "Batch size for training"); // Filenames DEFINE_bool(pretrained, false, "Use the pretrained CUDNN model as input"); DEFINE_bool(save_data, false, "Save pretrained weights to file"); DEFINE_string(train_images, "train-images-idx3-ubyte", "Training images filename"); DEFINE_string(train_labels, "train-labels-idx1-ubyte", "Training labels filename"); DEFINE_string(test_images, "t10k-images-idx3-ubyte", "Test images filename"); DEFINE_string(test_labels, "t10k-labels-idx1-ubyte", "Test labels filename"); // Solver parameters DEFINE_double(learning_rate, 0.01, "Base learning rate"); DEFINE_double(lr_gamma, 0.0001, "Learning rate policy gamma"); DEFINE_double(lr_power, 0.75, "Learning rate policy power"); /////////////////////////////////////////////////////////////////////////////////////////// // Layer representations /** * Represents a convolutional layer with bias. */ struct ConvBiasLayer { int in_channels, out_channels, kernel_size; int in_width, in_height, out_width, out_height; std::vector<float> pconv, pbias; ConvBiasLayer(int in_channels_, int out_channels_, int kernel_size_, int in_w_, int in_h_) : pconv(in_channels_ * kernel_size_ * kernel_size_ * out_channels_), pbias(out_channels_) { in_channels = in_channels_; out_channels = out_channels_; kernel_size = kernel_size_; in_width = in_w_; in_height = in_h_; out_width = in_w_ - kernel_size_ + 1; out_height = in_h_ - kernel_size_ + 1; } bool FromFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Read weights file FILE *fp = fopen(ssf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); return false; } fread(&pconv[0], sizeof(float), in_channels * out_channels * kernel_size * kernel_size, fp); fclose(fp); // Read bias file fp = fopen(ssbf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); return false; } fread(&pbias[0], sizeof(float), out_channels, fp); fclose(fp); return true; } void ToFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Write weights file FILE *fp = fopen(ssf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); exit(2); } fwrite(&pconv[0], sizeof(float), in_channels * out_channels * kernel_size * kernel_size, fp); fclose(fp); // Write bias file fp = fopen(ssbf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); exit(2); } fwrite(&pbias[0], sizeof(float), out_channels, fp); fclose(fp); } }; /** * Represents a max-pooling layer. */ struct MaxPoolLayer { int size, stride; MaxPoolLayer(int size_, int stride_) : size(size_), stride(stride_) {} }; /** * Represents a fully-connected neural network layer with bias. */ struct FullyConnectedLayer { int inputs, outputs; std::vector<float> pneurons, pbias; FullyConnectedLayer(int inputs_, int outputs_) : outputs(outputs_), inputs(inputs_), pneurons(inputs_ * outputs_), pbias(outputs_) {} bool FromFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Read weights file FILE *fp = fopen(ssf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); return false; } fread(&pneurons[0], sizeof(float), inputs * outputs, fp); fclose(fp); // Read bias file fp = fopen(ssbf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); return false; } fread(&pbias[0], sizeof(float), outputs, fp); fclose(fp); return true; } void ToFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Write weights file FILE *fp = fopen(ssf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); exit(2); } fwrite(&pneurons[0], sizeof(float), inputs * outputs, fp); fclose(fp); // Write bias file fp = fopen(ssbf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); exit(2); } fwrite(&pbias[0], sizeof(float), outputs, fp); fclose(fp); } }; /////////////////////////////////////////////////////////////////////////////////////////// // GPU Kernels /** * Fills a floating-point array with ones. * * @param vec The array to fill. * @param size The number of elements in the array. */ __global__ void FillOnes(float *vec, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; vec[idx] = 1.0f; } /** * Computes the backpropagation results of the Softmax loss for each result in a batch. * Uses the softmax values obtained from forward propagation to compute the difference. * * @param label The training batch label values. * @param num_labels The number of possible labels. * @param batch_size The size of the trained batch. * @param diff The resulting gradient. */ __global__ void SoftmaxLossBackprop(const float *label, int num_labels, int batch_size, float *diff) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size) return; const int label_value = static_cast<int>(label[idx]); // For each item in the batch, decrease the result of the label's value by 1 diff[idx * num_labels + label_value] -= 1.0f; } /////////////////////////////////////////////////////////////////////////////////////////// // CUDNN/CUBLAS training context struct TrainingContext { cudnnHandle_t cudnnHandle; hipblasHandle_t cublasHandle; cudnnTensorDescriptor_t dataTensor, conv1Tensor, conv1BiasTensor, pool1Tensor, //data conv2Tensor, conv2BiasTensor, conv3Tensor, conv3BiasTensor, pool2Tensor, fc1Tensor, fc2Tensor; cudnnFilterDescriptor_t conv1filterDesc, conv2filterDesc, conv3filterDesc; //kernel cudnnConvolutionDescriptor_t conv1Desc, conv2Desc, conv3Desc; cudnnConvolutionFwdAlgo_t conv1algo, conv2algo, conv3algo; cudnnConvolutionBwdFilterAlgo_t conv1bwfalgo, conv2bwfalgo, conv3bwfalgo; cudnnConvolutionBwdDataAlgo_t conv2bwdalgo, conv3bwdalgo; cudnnPoolingDescriptor_t poolDesc; cudnnActivationDescriptor_t fc1Activation; int m_gpuid; int m_batchSize; size_t m_workspaceSize; FullyConnectedLayer& ref_fc1, &ref_fc2; // Disable copying TrainingContext& operator=(const TrainingContext&) = delete; TrainingContext(const TrainingContext&) = delete; TrainingContext(int gpuid, int batch_size, ConvBiasLayer& conv1, MaxPoolLayer& pool1, ConvBiasLayer& conv2, MaxPoolLayer& pool2, ConvBiasLayer& conv3, FullyConnectedLayer& fc1, FullyConnectedLayer& fc2) : ref_fc1(fc1), ref_fc2(fc2), m_gpuid(gpuid) { m_batchSize = batch_size; // Create CUBLAS and CUDNN handles checkCudaErrors(hipSetDevice(gpuid)); checkCudaErrors(hipblasCreate(&cublasHandle)); checkCUDNN(cudnnCreate(&cudnnHandle)); // Create tensor descriptors checkCUDNN(cudnnCreateTensorDescriptor(&dataTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1BiasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&pool1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv2Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv2BiasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv3Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv3BiasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&pool2Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&fc1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&fc2Tensor)); checkCUDNN(cudnnCreateActivationDescriptor(&fc1Activation)); checkCUDNN(cudnnCreateFilterDescriptor(&conv1filterDesc)); checkCUDNN(cudnnCreateFilterDescriptor(&conv2filterDesc)); checkCUDNN(cudnnCreateFilterDescriptor(&conv3filterDesc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv1Desc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv2Desc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv3Desc)); checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc)); // Set tensor descriptor sizes checkCUDNN(cudnnSetTensor4dDescriptor(conv1BiasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, conv1.out_channels, 1, 1)); checkCUDNN(cudnnSetTensor4dDescriptor(conv2BiasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, conv2.out_channels, 1, 1)); checkCUDNN(cudnnSetTensor4dDescriptor(conv3BiasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, conv3.out_channels, 1, 1)); checkCUDNN(cudnnSetPooling2dDescriptor(poolDesc, CUDNN_POOLING_MAX, CUDNN_PROPAGATE_NAN, pool1.size, pool1.size, 0, 0, pool1.stride, pool1.stride)); checkCUDNN(cudnnSetTensor4dDescriptor(pool2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, conv2.out_channels, conv2.out_height / pool2.stride, conv2.out_width / pool2.stride)); checkCUDNN(cudnnSetTensor4dDescriptor(fc1Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, fc1.outputs, 1, 1)); checkCUDNN(cudnnSetTensor4dDescriptor(fc2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, fc2.outputs, 1, 1)); checkCUDNN(cudnnSetActivationDescriptor(fc1Activation, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); // Set convolution tensor sizes and compute workspace size size_t workspace = 0; workspace = ::max(workspace, SetFwdConvolutionTensors(conv1, dataTensor, conv1Tensor, conv1filterDesc, conv1Desc, conv1algo)); workspace = ::max(workspace, SetBwdConvolutionTensors(dataTensor, conv1Tensor, conv1filterDesc, conv1Desc, &conv1bwfalgo, nullptr)); workspace = ::max(workspace, SetFwdConvolutionTensors(conv2, pool1Tensor, conv2Tensor, conv2filterDesc, conv2Desc, conv2algo)); workspace = ::max(workspace, SetBwdConvolutionTensors(pool1Tensor, conv2Tensor, conv2filterDesc, conv2Desc, &conv2bwfalgo, &conv2bwdalgo)); workspace = ::max(workspace, SetFwdConvolutionTensors(conv3, pool2Tensor, conv3Tensor, conv3filterDesc, conv3Desc, conv3algo)); workspace = ::max(workspace, SetBwdConvolutionTensors(pool2Tensor, conv3Tensor, conv3filterDesc, conv3Desc, &conv3bwfalgo, &conv3bwdalgo)); // The workspace is allocated later (if necessary) m_workspaceSize = workspace; } ~TrainingContext() { checkCudaErrors(hipSetDevice(m_gpuid)); checkCudaErrors(hipblasDestroy(cublasHandle)); checkCUDNN(cudnnDestroy(cudnnHandle)); checkCUDNN(cudnnDestroyTensorDescriptor(dataTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv1BiasTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(pool1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv2Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv2BiasTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(pool2Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv3Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv3BiasTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(fc1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(fc2Tensor)); checkCUDNN(cudnnDestroyActivationDescriptor(fc1Activation)); checkCUDNN(cudnnDestroyFilterDescriptor(conv1filterDesc)); checkCUDNN(cudnnDestroyFilterDescriptor(conv2filterDesc)); checkCUDNN(cudnnDestroyFilterDescriptor(conv3filterDesc)); checkCUDNN(cudnnDestroyConvolutionDescriptor(conv1Desc)); checkCUDNN(cudnnDestroyConvolutionDescriptor(conv2Desc)); checkCUDNN(cudnnDestroyConvolutionDescriptor(conv3Desc)); checkCUDNN(cudnnDestroyPoolingDescriptor(poolDesc)); } size_t SetFwdConvolutionTensors(ConvBiasLayer& conv, cudnnTensorDescriptor_t& srcTensorDesc, cudnnTensorDescriptor_t& dstTensorDesc, cudnnFilterDescriptor_t& filterDesc, cudnnConvolutionDescriptor_t& convDesc, cudnnConvolutionFwdAlgo_t& algo) { size_t sizeInBytes = 0; int n = m_batchSize; int c = conv.in_channels; int h = conv.in_height; int w = conv.in_width; checkCUDNN(cudnnSetTensor4dDescriptor(srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, conv.out_channels, conv.in_channels, conv.kernel_size, conv.kernel_size)); #if CUDNN_MAJOR > 5 checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); #else checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); #endif // Find dimension of convolution output checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc, srcTensorDesc, filterDesc, &n, &c, &h, &w)); checkCUDNN(cudnnSetTensor4dDescriptor(dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnnHandle, srcTensorDesc, filterDesc, convDesc, dstTensorDesc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle, srcTensorDesc, filterDesc, convDesc, dstTensorDesc, algo, &sizeInBytes)); return sizeInBytes; } void ForwardPropagation(float *data, float *conv1, float *pool1, float *conv2, float *pool2, float *conv3, float *fc1, float *fc1relu, float *fc2, float *result, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pconv3, float *pconv3bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, void *workspace, float *onevec) { float alpha = 1.0f, beta = 0.0f; checkCudaErrors(hipSetDevice(m_gpuid)); // Conv1 layer checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, dataTensor, data, conv1filterDesc, pconv1, conv1Desc, conv1algo, workspace, m_workspaceSize, &beta, conv1Tensor, conv1)); checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv1BiasTensor, pconv1bias, &alpha, conv1Tensor, conv1)); // Pool1 layer checkCUDNN(cudnnPoolingForward(cudnnHandle, poolDesc, &alpha, conv1Tensor, conv1, &beta, pool1Tensor, pool1)); // Conv2 layer checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, pool1Tensor, pool1, conv2filterDesc, pconv2, conv2Desc, conv2algo, workspace, m_workspaceSize, &beta, conv2Tensor, conv2)); checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv2BiasTensor, pconv2bias, &alpha, conv2Tensor, conv2)); // Pool2 layer checkCUDNN(cudnnPoolingForward(cudnnHandle, poolDesc, &alpha, conv2Tensor, conv2, &beta, pool2Tensor, pool2)); // Conv3 layer checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, pool2Tensor, pool2, conv3filterDesc, pconv3, conv3Desc, conv3algo, workspace, m_workspaceSize, &beta, conv3Tensor, conv3)); checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv3BiasTensor, pconv3bias, &alpha, conv3Tensor, conv3)); // FC1 layer // Forward propagate neurons using weights (fc1 = pfc1'*conv3) checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, ref_fc1.outputs, m_batchSize, ref_fc1.inputs, &alpha, pfc1, ref_fc1.inputs, /*pool2*/conv3, ref_fc1.inputs, &beta, fc1, ref_fc1.outputs)); // Add bias using GEMM's "beta" (fc1 += pfc1bias*1_vec') checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ref_fc1.outputs, m_batchSize, 1, &alpha, pfc1bias, ref_fc1.outputs, onevec, 1, &alpha, fc1, ref_fc1.outputs)); // ReLU activation checkCUDNN(cudnnActivationForward(cudnnHandle, fc1Activation, &alpha, fc1Tensor, fc1, &beta, fc1Tensor, fc1relu)); // FC2 layer // Forward propagate neurons using weights (fc2 = pfc2'*fc1relu) checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, ref_fc2.outputs, m_batchSize, ref_fc2.inputs, &alpha, pfc2, ref_fc2.inputs, fc1relu, ref_fc2.inputs, &beta, fc2, ref_fc2.outputs)); // Add bias using GEMM's "beta" (fc2 += pfc2bias*1_vec') checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ref_fc2.outputs, m_batchSize, 1, &alpha, pfc2bias, ref_fc2.outputs, onevec, 1, &alpha, fc2, ref_fc2.outputs)); // Softmax loss checkCUDNN(cudnnSoftmaxForward(cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, fc2Tensor, fc2, &beta, fc2Tensor, result)); } size_t SetBwdConvolutionTensors(cudnnTensorDescriptor_t& srcTensorDesc, cudnnTensorDescriptor_t& dstTensorDesc, cudnnFilterDescriptor_t& filterDesc, cudnnConvolutionDescriptor_t& convDesc, cudnnConvolutionBwdFilterAlgo_t *falgo, cudnnConvolutionBwdDataAlgo_t *dalgo) { size_t sizeInBytes = 0, tmpsize = 0; // If backprop filter algorithm was requested if (falgo) { checkCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, srcTensorDesc, dstTensorDesc, convDesc, filterDesc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, falgo)); checkCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, srcTensorDesc, dstTensorDesc, convDesc, filterDesc, *falgo, &tmpsize)); sizeInBytes = ::max(sizeInBytes, tmpsize); } // If backprop data algorithm was requested if (dalgo) { checkCUDNN(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filterDesc, dstTensorDesc, convDesc, srcTensorDesc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, dalgo)); checkCUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filterDesc, dstTensorDesc, convDesc, srcTensorDesc, *dalgo, &tmpsize)); sizeInBytes = ::max(sizeInBytes, tmpsize); } return sizeInBytes; } void Backpropagation(ConvBiasLayer& layer_conv1, MaxPoolLayer& layer_pool1, ConvBiasLayer& layer_conv2, MaxPoolLayer& layer_pool2,ConvBiasLayer& layer_conv3, float *data, float *labels, float *conv1, float *pool1, float *conv2, float *pool2, float *conv3, float *fc1, float *fc1relu, float *fc2, float *fc2smax, float *dloss_data, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pconv3, float *pconv3bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, float *gconv1, float *gconv1bias, float *dpool1, float *gconv2, float *gconv2bias, float *dconv2, float *dpool2, float *gconv3, float *gconv3bias, float *dconv3, float *gfc1, float *gfc1bias, float *dfc1, float *dfc1relu, float *gfc2, float *gfc2bias, float *dfc2, void *workspace, float *onevec) { float alpha = 1.0f, beta = 0.0f; float scalVal = 1.0f / static_cast<float>(m_batchSize); checkCudaErrors(hipSetDevice(m_gpuid)); // Initialization (using the training error function) checkCudaErrors(hipMemcpyAsync(dloss_data, fc2smax, sizeof(float) * m_batchSize * ref_fc2.outputs, hipMemcpyDeviceToDevice)); // Softmax layer hipLaunchKernelGGL(( SoftmaxLossBackprop), dim3(RoundUp(m_batchSize, BW)), dim3(BW), 0, 0, labels, ref_fc2.outputs, m_batchSize, dloss_data); // Accounting for batch size in SGD checkCudaErrors(hipblasSscal(cublasHandle, ref_fc2.outputs * m_batchSize, &scalVal, dloss_data, 1)); // FC2 layer // Compute derivative with respect to weights: gfc2 = (fc1relu * dfc2smax') checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, ref_fc2.inputs, ref_fc2.outputs, m_batchSize, &alpha, fc1relu, ref_fc2.inputs, dloss_data, ref_fc2.outputs, &beta, gfc2, ref_fc2.inputs)); // Compute derivative with respect to bias: gfc2bias = dfc2smax * 1_vec checkCudaErrors(hipblasSgemv(cublasHandle, HIPBLAS_OP_N, ref_fc2.outputs, m_batchSize, &alpha, dloss_data, ref_fc2.outputs, onevec, 1, &beta, gfc2bias, 1)); // Compute derivative with respect to data (for previous layer): pfc2*dfc2smax (500x10*10xN) checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ref_fc2.inputs, m_batchSize, ref_fc2.outputs, &alpha, pfc2, ref_fc2.inputs, dloss_data, ref_fc2.outputs, &beta, dfc2, ref_fc2.inputs)); // ReLU activation checkCUDNN(cudnnActivationBackward(cudnnHandle, fc1Activation, &alpha, fc1Tensor, fc1relu, fc1Tensor, dfc2, fc1Tensor, fc1, &beta, fc1Tensor, dfc1relu)); // FC1 layer // Compute derivative with respect to weights: gfc1 = (pool2 * dfc1relu') checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, ref_fc1.inputs, ref_fc1.outputs, m_batchSize, &alpha, pool2, ref_fc1.inputs, dfc1relu, ref_fc1.outputs, &beta, gfc1, ref_fc1.inputs)); // Compute derivative with respect to bias: gfc1bias = dfc1relu * 1_vec checkCudaErrors(hipblasSgemv(cublasHandle, HIPBLAS_OP_N, ref_fc1.outputs, m_batchSize, &alpha, dfc1relu, ref_fc1.outputs, onevec, 1, &beta, gfc1bias, 1)); // Compute derivative with respect to data (for previous layer): pfc1*dfc1relu (800x500*500xN) checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ref_fc1.inputs, m_batchSize, ref_fc1.outputs, &alpha, pfc1, ref_fc1.inputs, dfc1relu, ref_fc1.outputs, &beta, dfc1, ref_fc1.inputs)); // Conv3 layer checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv3Tensor, dfc1, &beta, conv3BiasTensor, gconv3bias)); checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, pool2Tensor, pool2, conv3Tensor, dfc1, conv3Desc, conv3bwfalgo, workspace, m_workspaceSize, &beta, conv3filterDesc, gconv3)); checkCUDNN(cudnnConvolutionBackwardData(cudnnHandle, &alpha, conv3filterDesc, pconv3, conv3Tensor, dfc1, conv3Desc, conv3bwdalgo, workspace, m_workspaceSize, &beta, pool2Tensor, dconv3)); // Pool2 layer checkCUDNN(cudnnPoolingBackward(cudnnHandle, poolDesc, &alpha, pool2Tensor, pool2, pool2Tensor, /*dfc1*/dconv3, conv2Tensor, conv2, &beta, conv2Tensor, dpool2)); // Conv2 layer checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv2Tensor, dpool2, &beta, conv2BiasTensor, gconv2bias)); checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, pool1Tensor, pool1, conv2Tensor, dpool2, conv2Desc, conv2bwfalgo, workspace, m_workspaceSize, &beta, conv2filterDesc, gconv2)); checkCUDNN(cudnnConvolutionBackwardData(cudnnHandle, &alpha, conv2filterDesc, pconv2, conv2Tensor, dpool2, conv2Desc, conv2bwdalgo, workspace, m_workspaceSize, &beta, pool1Tensor, dconv2)); // Pool1 layer checkCUDNN(cudnnPoolingBackward(cudnnHandle, poolDesc, &alpha, pool1Tensor, pool1, pool1Tensor, dconv2, conv1Tensor, conv1, &beta, conv1Tensor, dpool1)); // Conv1 layer checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv1Tensor, dpool1, &beta, conv1BiasTensor, gconv1bias)); checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, dataTensor, data, conv1Tensor, dpool1, conv1Desc, conv1bwfalgo, workspace, m_workspaceSize, &beta, conv1filterDesc, gconv1)); // No need for convBackwardData because there are no more layers below } void UpdateWeights(float learning_rate, ConvBiasLayer& conv1, ConvBiasLayer& conv2, ConvBiasLayer &conv3, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pconv3, float *pconv3bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, float *gconv1, float *gconv1bias, float *gconv2, float *gconv2bias, float *gconv3, float *gconv3bias, float *gfc1, float *gfc1bias, float *gfc2, float *gfc2bias) { float alpha = -learning_rate; checkCudaErrors(hipSetDevice(m_gpuid)); // Conv1 checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(conv1.pconv.size()), &alpha, gconv1, 1, pconv1, 1)); checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(conv1.pbias.size()), &alpha, gconv1bias, 1, pconv1bias, 1)); // Conv2 checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(conv2.pconv.size()), &alpha, gconv2, 1, pconv2, 1)); checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(conv2.pbias.size()), &alpha, gconv2bias, 1, pconv2bias, 1)); // Conv3 checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(conv3.pconv.size()), &alpha, gconv3, 1, pconv3, 1)); checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(conv3.pbias.size()), &alpha, gconv3bias, 1, pconv3bias, 1)); // Fully connected 1 checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(ref_fc1.pneurons.size()), &alpha, gfc1, 1, pfc1, 1)); checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(ref_fc1.pbias.size()), &alpha, gfc1bias, 1, pfc1bias, 1)); // Fully connected 2 checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(ref_fc2.pneurons.size()), &alpha, gfc2, 1, pfc2, 1)); checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(ref_fc2.pbias.size()), &alpha, gfc2bias, 1, pfc2bias, 1)); } }; /////////////////////////////////////////////////////////////////////////////////////////// // Main function int main(int argc, char **argv) { #ifdef USE_GFLAGS gflags::ParseCommandLineFlags(&argc, &argv, true); #endif size_t width, height, channels = 1; // Open input data printf("Reading input data\n"); // Read dataset sizes size_t train_size = ReadUByteDataset(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), nullptr, nullptr, width, height); size_t test_size = ReadUByteDataset(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), nullptr, nullptr, width, height); if (train_size == 0) return 1; std::vector<uint8_t> train_images(train_size * width * height * channels), train_labels(train_size); std::vector<uint8_t> test_images(test_size * width * height * channels), test_labels(test_size); // Read data from datasets if (ReadUByteDataset(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), &train_images[0], &train_labels[0], width, height) != train_size) return 2; if (ReadUByteDataset(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), &test_images[0], &test_labels[0], width, height) != test_size) return 3; printf("Done. Training dataset size: %d, Test dataset size: %d\n", (int)train_size, (int)test_size); printf("Batch size: %lld, iterations: %d\n", FLAGS_batch_size, FLAGS_iterations); // This code snippet saves a random image and its label /* std::random_device rd_image; int random_image = rd_image() % train_size; std::stringstream ss; ss << "image-" << (int)train_labels[random_image] << ".pgm"; SavePGMFile(&train_images[0] + random_image * width*height*channels, width, height, ss.str().c_str()); */ // Choose GPU int num_gpus; checkCudaErrors(hipGetDeviceCount(&num_gpus)); if (FLAGS_gpu < 0 || FLAGS_gpu >= num_gpus) { printf("ERROR: Invalid GPU ID %d (There are %d GPUs on this machine)\n", FLAGS_gpu, num_gpus); return 4; } // Create the LeNet network architecture ConvBiasLayer conv1((int)channels, 20, 5, (int)width, (int)height); MaxPoolLayer pool1(2, 2); ConvBiasLayer conv2(conv1.out_channels, 30, 3, conv1.out_width / pool1.stride, conv1.out_height / pool1.stride); MaxPoolLayer pool2(2, 2); ConvBiasLayer conv3(conv2.out_channels, 70, 3, conv2.out_width / pool1.stride , conv2.out_height/pool2.stride ); FullyConnectedLayer fc1((conv3.out_channels*conv3.out_width*conv3.out_height) , 500); //FullyConnectedLayer fc1((conv2.out_channels*conv2.out_width*conv2.out_height) , 500); FullyConnectedLayer fc2(fc1.outputs, 10); // Initialize CUDNN/CUBLAS training context TrainingContext context(FLAGS_gpu, FLAGS_batch_size, conv1, pool1, conv2, pool2, conv3, fc1, fc2); // Determine initial network structure bool bRet = true; if (FLAGS_pretrained) { bRet = conv1.FromFile("conv1"); bRet &= conv2.FromFile("conv2"); bRet &= fc1.FromFile("ip1"); bRet &= fc2.FromFile("ip2"); } if (!bRet || !FLAGS_pretrained) { // Create random network std::random_device rd; std::mt19937 gen(FLAGS_random_seed < 0 ? rd() : static_cast<unsigned int>(FLAGS_random_seed)); // Xavier weight filling float wconv1 = sqrt(3.0f / (conv1.kernel_size * conv1.kernel_size * conv1.in_channels)); std::uniform_real_distribution<> dconv1(-wconv1, wconv1); float wconv2 = sqrt(3.0f / (conv2.kernel_size * conv2.kernel_size * conv2.in_channels)); std::uniform_real_distribution<> dconv2(-wconv2, wconv2); float wconv3 = sqrt(3.0f / (conv3.kernel_size * conv3.kernel_size * conv3.in_channels)); std::uniform_real_distribution<> dconv3(-wconv3, wconv3); float wfc1 = sqrt(3.0f / (fc1.inputs * fc1.outputs)); std::uniform_real_distribution<> dfc1(-wfc1, wfc1); float wfc2 = sqrt(3.0f / (fc2.inputs * fc2.outputs)); std::uniform_real_distribution<> dfc2(-wfc2, wfc2); // Randomize network for (auto&& iter : conv1.pconv) iter = static_cast<float>(dconv1(gen)); for (auto&& iter : conv1.pbias) iter = static_cast<float>(dconv1(gen)); for (auto&& iter : conv2.pconv) iter = static_cast<float>(dconv2(gen)); for (auto&& iter : conv2.pbias) iter = static_cast<float>(dconv2(gen)); for (auto&& iter : conv3.pconv) iter = static_cast<float>(dconv3(gen)); for (auto&& iter : conv3.pbias) iter = static_cast<float>(dconv3(gen)); for (auto&& iter : fc1.pneurons) iter = static_cast<float>(dfc1(gen)); for (auto&& iter : fc1.pbias) iter = static_cast<float>(dfc1(gen)); for (auto&& iter : fc2.pneurons) iter = static_cast<float>(dfc2(gen)); for (auto&& iter : fc2.pbias) iter = static_cast<float>(dfc2(gen)); } ///////////////////////////////////////////////////////////////////////////// // Create GPU data structures // Forward propagation data float *d_data, *d_labels, *d_conv1, *d_pool1, *d_conv2, *d_pool2, *d_fc1, *d_fc1relu, *d_fc2, *d_fc2smax; float *d_conv3; // Buffer | Element | N | C | H | W //----------------------------------------------------------------------------------------------------------------------------------------- checkCudaErrors(hipMalloc(&d_data, sizeof(float) * context.m_batchSize * channels * height * width)); checkCudaErrors(hipMalloc(&d_labels, sizeof(float) * context.m_batchSize * 1 * 1 * 1)); checkCudaErrors(hipMalloc(&d_conv1, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width)); checkCudaErrors(hipMalloc(&d_pool1, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride))); checkCudaErrors(hipMalloc(&d_conv2, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width)); checkCudaErrors(hipMalloc(&d_pool2, sizeof(float) * context.m_batchSize * conv2.out_channels * (conv2.out_height / pool2.stride) * (conv2.out_width / pool2.stride))); checkCudaErrors(hipMalloc(&d_conv3, sizeof(float) * context.m_batchSize * conv3.out_channels * conv3.out_height * conv3.out_width)); checkCudaErrors(hipMalloc(&d_fc1, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(hipMalloc(&d_fc1relu, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(hipMalloc(&d_fc2, sizeof(float) * context.m_batchSize * fc2.outputs)); checkCudaErrors(hipMalloc(&d_fc2smax, sizeof(float) * context.m_batchSize * fc2.outputs)); // Network parameters float *d_pconv1, *d_pconv1bias, *d_pconv2, *d_pconv2bias, *d_pconv3, *d_pconv3bias; float *d_pfc1, *d_pfc1bias, *d_pfc2, *d_pfc2bias; checkCudaErrors(hipMalloc(&d_pconv1, sizeof(float) * conv1.pconv.size())); checkCudaErrors(hipMalloc(&d_pconv1bias, sizeof(float) * conv1.pbias.size())); checkCudaErrors(hipMalloc(&d_pconv2, sizeof(float) * conv2.pconv.size())); checkCudaErrors(hipMalloc(&d_pconv2bias, sizeof(float) * conv2.pbias.size())); checkCudaErrors(hipMalloc(&d_pconv3, sizeof(float) * conv3.pconv.size())); checkCudaErrors(hipMalloc(&d_pconv3bias, sizeof(float) * conv3.pbias.size())); checkCudaErrors(hipMalloc(&d_pfc1, sizeof(float) * fc1.pneurons.size())); checkCudaErrors(hipMalloc(&d_pfc1bias, sizeof(float) * fc1.pbias.size())); checkCudaErrors(hipMalloc(&d_pfc2, sizeof(float) * fc2.pneurons.size())); checkCudaErrors(hipMalloc(&d_pfc2bias, sizeof(float) * fc2.pbias.size())); // Network parameter gradients float *d_gconv1, *d_gconv1bias, *d_gconv2, *d_gconv2bias, *d_gconv3, *d_gconv3bias; float *d_gfc1, *d_gfc1bias, *d_gfc2, *d_gfc2bias; checkCudaErrors(hipMalloc(&d_gconv1, sizeof(float) * conv1.pconv.size())); checkCudaErrors(hipMalloc(&d_gconv1bias, sizeof(float) * conv1.pbias.size())); checkCudaErrors(hipMalloc(&d_gconv2, sizeof(float) * conv2.pconv.size())); checkCudaErrors(hipMalloc(&d_gconv2bias, sizeof(float) * conv2.pbias.size())); checkCudaErrors(hipMalloc(&d_gconv3, sizeof(float) * conv3.pconv.size())); checkCudaErrors(hipMalloc(&d_gconv3bias, sizeof(float) * conv3.pbias.size())); checkCudaErrors(hipMalloc(&d_gfc1, sizeof(float) * fc1.pneurons.size())); checkCudaErrors(hipMalloc(&d_gfc1bias, sizeof(float) * fc1.pbias.size())); checkCudaErrors(hipMalloc(&d_gfc2, sizeof(float) * fc2.pneurons.size())); checkCudaErrors(hipMalloc(&d_gfc2bias, sizeof(float) * fc2.pbias.size())); // Differentials w.r.t. data float *d_dpool1, *d_dpool2, *d_dconv2, *d_dconv3, *d_dfc1, *d_dfc1relu, *d_dfc2, *d_dfc2smax, *d_dlossdata; // Buffer | Element | N | C | H | W //----------------------------------------------------------------------------------------------------------------------------------------- checkCudaErrors(hipMalloc(&d_dpool1, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width)); checkCudaErrors(hipMalloc(&d_dpool2, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width)); checkCudaErrors(hipMalloc(&d_dconv2, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride))); checkCudaErrors(hipMalloc(&d_dconv3, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width)); checkCudaErrors(hipMalloc(&d_dfc1, sizeof(float) * context.m_batchSize * fc1.inputs)); checkCudaErrors(hipMalloc(&d_dfc1relu, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(hipMalloc(&d_dfc2, sizeof(float) * context.m_batchSize * fc2.inputs)); checkCudaErrors(hipMalloc(&d_dfc2smax, sizeof(float) * context.m_batchSize * fc2.outputs)); checkCudaErrors(hipMalloc(&d_dlossdata,sizeof(float) * context.m_batchSize * fc2.outputs)); // Temporary buffers and workspaces float *d_onevec; void *d_cudnn_workspace = nullptr; checkCudaErrors(hipMalloc(&d_onevec, sizeof(float)* context.m_batchSize)); if (context.m_workspaceSize > 0) checkCudaErrors(hipMalloc(&d_cudnn_workspace, context.m_workspaceSize)); ///////////////////////////////////////////////////////////////////////////// // Copy initial network to device checkCudaErrors(hipMemcpyAsync(d_pconv1, &conv1.pconv[0], sizeof(float) * conv1.pconv.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pconv1bias, &conv1.pbias[0], sizeof(float) * conv1.pbias.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pconv2, &conv2.pconv[0], sizeof(float) * conv2.pconv.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pconv2bias, &conv2.pbias[0], sizeof(float) * conv2.pbias.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pconv3, &conv3.pconv[0], sizeof(float) * conv3.pconv.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pconv3bias, &conv3.pbias[0], sizeof(float) * conv3.pbias.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pfc1, &fc1.pneurons[0], sizeof(float) * fc1.pneurons.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pfc1bias, &fc1.pbias[0], sizeof(float) * fc1.pbias.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pfc2, &fc2.pneurons[0], sizeof(float) * fc2.pneurons.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pfc2bias, &fc2.pbias[0], sizeof(float) * fc2.pbias.size(), hipMemcpyHostToDevice)); // Fill one-vector with ones hipLaunchKernelGGL(( FillOnes), dim3(RoundUp(context.m_batchSize, BW)), dim3(BW), 0, 0, d_onevec, context.m_batchSize); printf("Preparing dataset\n"); // Normalize training set to be in [0,1] std::vector<float> train_images_float(train_images.size()), train_labels_float(train_size); for (size_t i = 0; i < train_size * channels * width * height; ++i) train_images_float[i] = (float)train_images[i] / 255.0f; for (size_t i = 0; i < train_size; ++i) train_labels_float[i] = (float)train_labels[i]; printf("Training...\n"); // Use SGD to train the network checkCudaErrors(hipDeviceSynchronize()); auto t1 = std::chrono::high_resolution_clock::now(); for (int iter = 0; iter < FLAGS_iterations; ++iter) { // Train int imageid = iter % (train_size / context.m_batchSize); // Prepare current batch on device checkCudaErrors(hipMemcpyAsync(d_data, &train_images_float[imageid * context.m_batchSize * width*height*channels], sizeof(float) * context.m_batchSize * channels * width * height, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_labels, &train_labels_float[imageid * context.m_batchSize], sizeof(float) * context.m_batchSize, hipMemcpyHostToDevice)); // Forward propagation context.ForwardPropagation(d_data, d_conv1, d_pool1, d_conv2, d_pool2, d_conv3, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pconv3, d_pconv3bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_cudnn_workspace, d_onevec); // Backward propagation context.Backpropagation(conv1, pool1, conv2, pool2, conv3, d_data, d_labels, d_conv1, d_pool1, d_conv2, d_pool2, d_conv3, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_dlossdata, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pconv3, d_pconv3bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_gconv1, d_gconv1bias, d_dpool1, d_gconv2, d_gconv2bias, d_gconv3, d_gconv3bias, d_dconv2, d_dpool2, d_dconv3, d_gfc1, d_gfc1bias, d_dfc1, d_dfc1relu, d_gfc2, d_gfc2bias, d_dfc2, d_cudnn_workspace, d_onevec); // Compute learning rate float learningRate = static_cast<float>(FLAGS_learning_rate * pow((1.0 + FLAGS_lr_gamma * iter), (-FLAGS_lr_power))); // Update weights context.UpdateWeights(learningRate, conv1, conv2, conv3, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pconv3, d_pconv3bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_gconv1, d_gconv1bias, d_gconv2, d_gconv2bias, d_gconv3, d_gconv3bias, d_gfc1, d_gfc1bias, d_gfc2, d_gfc2bias); } checkCudaErrors(hipDeviceSynchronize()); auto t2 = std::chrono::high_resolution_clock::now(); printf("Iteration time: %f ms\n", std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() / 1000.0f / FLAGS_iterations); if (FLAGS_save_data) { // Copy trained weights from GPU to CPU checkCudaErrors(hipMemcpy(&conv1.pconv[0], d_pconv1, sizeof(float) * conv1.pconv.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&conv1.pbias[0], d_pconv1bias, sizeof(float) * conv1.pbias.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&conv2.pconv[0], d_pconv2, sizeof(float) * conv2.pconv.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&conv2.pbias[0], d_pconv2bias, sizeof(float) * conv2.pbias.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&conv3.pconv[0], d_pconv3, sizeof(float) * conv3.pconv.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&conv3.pbias[0], d_pconv3bias, sizeof(float) * conv3.pbias.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&fc1.pneurons[0], d_pfc1, sizeof(float) * fc1.pneurons.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&fc1.pbias[0], d_pfc1bias, sizeof(float) * fc1.pbias.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&fc2.pneurons[0], d_pfc2, sizeof(float) * fc2.pneurons.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&fc2.pbias[0], d_pfc2bias, sizeof(float) * fc2.pbias.size(), hipMemcpyDeviceToHost)); // Now save data printf("Saving data to file\n"); conv1.ToFile("conv1"); conv2.ToFile("conv2"); fc1.ToFile("ip1"); fc2.ToFile("ip2"); } float classification_error = 1.0f; int classifications = FLAGS_classify; if (classifications < 0) classifications = (int)test_size; // Test the resulting neural network's classification if (classifications > 0) { // Initialize a TrainingContext structure for testing (different batch size) TrainingContext test_context(FLAGS_gpu, 1, conv1, pool1, conv2, pool2, conv3, fc1, fc2); // Ensure correct workspaceSize is allocated for testing if (context.m_workspaceSize < test_context.m_workspaceSize) { checkCudaErrors(hipFree(d_cudnn_workspace)); checkCudaErrors(hipMalloc(&d_cudnn_workspace, test_context.m_workspaceSize)); } int num_errors = 0; for (int i = 0; i < classifications; ++i) { std::vector<float> data(width * height); // Normalize image to be in [0,1] for (int j = 0; j < width * height; ++j) data[j] = (float)test_images[i * width*height*channels + j] / 255.0f; checkCudaErrors(hipMemcpyAsync(d_data, &data[0], sizeof(float) * width * height, hipMemcpyHostToDevice)); // Forward propagate test image test_context.ForwardPropagation(d_data, d_conv1, d_pool1, d_conv2, d_pool2, d_conv3, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pconv3, d_pconv3bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_cudnn_workspace, d_onevec); // Perform classification std::vector<float> class_vec(10); // Copy back result checkCudaErrors(hipMemcpy(&class_vec[0], d_fc2smax, sizeof(float) * 10, hipMemcpyDeviceToHost)); // Determine classification according to maximal response int chosen = 0; for (int id = 1; id < 10; ++id) { if (class_vec[chosen] < class_vec[id]) chosen = id; } if (chosen != test_labels[i]) ++num_errors; } classification_error = (float)num_errors / (float)classifications; printf("Classification result: %.2f%% error (used %d images)\n", classification_error * 100.0f, (int)classifications); } // Free data structures checkCudaErrors(hipFree(d_data)); checkCudaErrors(hipFree(d_conv1)); checkCudaErrors(hipFree(d_pool1)); checkCudaErrors(hipFree(d_conv2)); checkCudaErrors(hipFree(d_pool2)); checkCudaErrors(hipFree(d_conv3)); checkCudaErrors(hipFree(d_fc1)); checkCudaErrors(hipFree(d_fc2)); checkCudaErrors(hipFree(d_pconv1)); checkCudaErrors(hipFree(d_pconv1bias)); checkCudaErrors(hipFree(d_pconv2)); checkCudaErrors(hipFree(d_pconv2bias)); checkCudaErrors(hipFree(d_pconv3)); checkCudaErrors(hipFree(d_pconv3bias)); checkCudaErrors(hipFree(d_pfc1)); checkCudaErrors(hipFree(d_pfc1bias)); checkCudaErrors(hipFree(d_pfc2)); checkCudaErrors(hipFree(d_pfc2bias)); checkCudaErrors(hipFree(d_gconv1)); checkCudaErrors(hipFree(d_gconv1bias)); checkCudaErrors(hipFree(d_gconv2)); checkCudaErrors(hipFree(d_gconv2bias)); checkCudaErrors(hipFree(d_gconv3)); checkCudaErrors(hipFree(d_gconv3bias)); checkCudaErrors(hipFree(d_gfc1)); checkCudaErrors(hipFree(d_gfc1bias)); checkCudaErrors(hipFree(d_dfc1)); checkCudaErrors(hipFree(d_gfc2)); checkCudaErrors(hipFree(d_gfc2bias)); checkCudaErrors(hipFree(d_dfc2)); checkCudaErrors(hipFree(d_dpool1)); checkCudaErrors(hipFree(d_dconv2)); checkCudaErrors(hipFree(d_dpool2)); checkCudaErrors(hipFree(d_dconv3)); checkCudaErrors(hipFree(d_labels)); checkCudaErrors(hipFree(d_dlossdata)); checkCudaErrors(hipFree(d_onevec)); if (d_cudnn_workspace != nullptr) checkCudaErrors(hipFree(d_cudnn_workspace)); return 0; }
b0704378685b37d01c5a53fdddc864c12202bb2e.cu
#include <cstdio> #include <cstdlib> #include <cmath> #include <ctime> #include <cfloat> #include <algorithm> #include <chrono> #include <iomanip> #include <iostream> #include <map> #include <memory> #include <random> #include <sstream> #include <string> #include <vector> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cublas_v2.h> #include <cudnn.h> #include "readubyte.h" /////////////////////////////////////////////////////////////////////////////////////////// // Definitions and helper utilities // Block width for CUDA kernels #define BW 128 #ifdef USE_GFLAGS #include <gflags/gflags.h> #ifndef _WIN32 #define gflags google #endif #else // Constant versions of gflags #define DEFINE_int32(flag, default_value, description) const int FLAGS_##flag = (default_value) #define DEFINE_uint64(flag, default_value, description) const unsigned long long FLAGS_##flag = (default_value) #define DEFINE_bool(flag, default_value, description) const bool FLAGS_##flag = (default_value) #define DEFINE_double(flag, default_value, description) const double FLAGS_##flag = (default_value) #define DEFINE_string(flag, default_value, description) const std::string FLAGS_##flag ((default_value)) #endif /** * Computes ceil(x / y) for integral nonnegative values. */ static inline unsigned int RoundUp(unsigned int nominator, unsigned int denominator) { return (nominator + denominator - 1) / denominator; } /** * Saves a PGM grayscale image out of unsigned 8-bit data */ void SavePGMFile(const unsigned char *data, size_t width, size_t height, const char *filename) { FILE *fp = fopen(filename, "wb"); if (fp) { fprintf(fp, "P5\n%lu %lu\n255\n", width, height); fwrite(data, sizeof(unsigned char), width * height, fp); fclose(fp); } } ////////////////////////////////////////////////////////////////////////////// // Error handling // Adapted from the CUDNN classification code // sample: https://developer.nvidia.com/cuDNN #define FatalError(s) do { \ std::stringstream _where, _message; \ _where << __FILE__ << ':' << __LINE__; \ _message << std::string(s) + "\n" << __FILE__ << ':' << __LINE__; \ std::cerr << _message.str() << "\nAborting...\n"; \ cudaDeviceReset(); \ exit(1); \ } while(0) #define checkCUDNN(status) do { \ std::stringstream _error; \ if (status != CUDNN_STATUS_SUCCESS) { \ _error << "CUDNN failure: " << cudnnGetErrorString(status); \ FatalError(_error.str()); \ } \ } while(0) #define checkCudaErrors(status) do { \ std::stringstream _error; \ if (status != 0) { \ _error << "Cuda failure: " << status; \ FatalError(_error.str()); \ } \ } while(0) /////////////////////////////////////////////////////////////////////////////////////////// // Command-line flags // Application parameters DEFINE_int32(gpu, 0, "The GPU ID to use"); DEFINE_int32(iterations, 1000, "Number of iterations for training"); DEFINE_int32(random_seed, -1, "Override random seed (default uses std::random_device)"); DEFINE_int32(classify, -1, "Number of images to classify to compute error rate (default uses entire test set)"); // Batch parameters DEFINE_uint64(batch_size, 64, "Batch size for training"); // Filenames DEFINE_bool(pretrained, false, "Use the pretrained CUDNN model as input"); DEFINE_bool(save_data, false, "Save pretrained weights to file"); DEFINE_string(train_images, "train-images-idx3-ubyte", "Training images filename"); DEFINE_string(train_labels, "train-labels-idx1-ubyte", "Training labels filename"); DEFINE_string(test_images, "t10k-images-idx3-ubyte", "Test images filename"); DEFINE_string(test_labels, "t10k-labels-idx1-ubyte", "Test labels filename"); // Solver parameters DEFINE_double(learning_rate, 0.01, "Base learning rate"); DEFINE_double(lr_gamma, 0.0001, "Learning rate policy gamma"); DEFINE_double(lr_power, 0.75, "Learning rate policy power"); /////////////////////////////////////////////////////////////////////////////////////////// // Layer representations /** * Represents a convolutional layer with bias. */ struct ConvBiasLayer { int in_channels, out_channels, kernel_size; int in_width, in_height, out_width, out_height; std::vector<float> pconv, pbias; ConvBiasLayer(int in_channels_, int out_channels_, int kernel_size_, int in_w_, int in_h_) : pconv(in_channels_ * kernel_size_ * kernel_size_ * out_channels_), pbias(out_channels_) { in_channels = in_channels_; out_channels = out_channels_; kernel_size = kernel_size_; in_width = in_w_; in_height = in_h_; out_width = in_w_ - kernel_size_ + 1; out_height = in_h_ - kernel_size_ + 1; } bool FromFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Read weights file FILE *fp = fopen(ssf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); return false; } fread(&pconv[0], sizeof(float), in_channels * out_channels * kernel_size * kernel_size, fp); fclose(fp); // Read bias file fp = fopen(ssbf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); return false; } fread(&pbias[0], sizeof(float), out_channels, fp); fclose(fp); return true; } void ToFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Write weights file FILE *fp = fopen(ssf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); exit(2); } fwrite(&pconv[0], sizeof(float), in_channels * out_channels * kernel_size * kernel_size, fp); fclose(fp); // Write bias file fp = fopen(ssbf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); exit(2); } fwrite(&pbias[0], sizeof(float), out_channels, fp); fclose(fp); } }; /** * Represents a max-pooling layer. */ struct MaxPoolLayer { int size, stride; MaxPoolLayer(int size_, int stride_) : size(size_), stride(stride_) {} }; /** * Represents a fully-connected neural network layer with bias. */ struct FullyConnectedLayer { int inputs, outputs; std::vector<float> pneurons, pbias; FullyConnectedLayer(int inputs_, int outputs_) : outputs(outputs_), inputs(inputs_), pneurons(inputs_ * outputs_), pbias(outputs_) {} bool FromFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Read weights file FILE *fp = fopen(ssf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); return false; } fread(&pneurons[0], sizeof(float), inputs * outputs, fp); fclose(fp); // Read bias file fp = fopen(ssbf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); return false; } fread(&pbias[0], sizeof(float), outputs, fp); fclose(fp); return true; } void ToFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Write weights file FILE *fp = fopen(ssf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); exit(2); } fwrite(&pneurons[0], sizeof(float), inputs * outputs, fp); fclose(fp); // Write bias file fp = fopen(ssbf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); exit(2); } fwrite(&pbias[0], sizeof(float), outputs, fp); fclose(fp); } }; /////////////////////////////////////////////////////////////////////////////////////////// // GPU Kernels /** * Fills a floating-point array with ones. * * @param vec The array to fill. * @param size The number of elements in the array. */ __global__ void FillOnes(float *vec, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; vec[idx] = 1.0f; } /** * Computes the backpropagation results of the Softmax loss for each result in a batch. * Uses the softmax values obtained from forward propagation to compute the difference. * * @param label The training batch label values. * @param num_labels The number of possible labels. * @param batch_size The size of the trained batch. * @param diff The resulting gradient. */ __global__ void SoftmaxLossBackprop(const float *label, int num_labels, int batch_size, float *diff) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size) return; const int label_value = static_cast<int>(label[idx]); // For each item in the batch, decrease the result of the label's value by 1 diff[idx * num_labels + label_value] -= 1.0f; } /////////////////////////////////////////////////////////////////////////////////////////// // CUDNN/CUBLAS training context struct TrainingContext { cudnnHandle_t cudnnHandle; cublasHandle_t cublasHandle; cudnnTensorDescriptor_t dataTensor, conv1Tensor, conv1BiasTensor, pool1Tensor, //data conv2Tensor, conv2BiasTensor, conv3Tensor, conv3BiasTensor, pool2Tensor, fc1Tensor, fc2Tensor; cudnnFilterDescriptor_t conv1filterDesc, conv2filterDesc, conv3filterDesc; //kernel cudnnConvolutionDescriptor_t conv1Desc, conv2Desc, conv3Desc; cudnnConvolutionFwdAlgo_t conv1algo, conv2algo, conv3algo; cudnnConvolutionBwdFilterAlgo_t conv1bwfalgo, conv2bwfalgo, conv3bwfalgo; cudnnConvolutionBwdDataAlgo_t conv2bwdalgo, conv3bwdalgo; cudnnPoolingDescriptor_t poolDesc; cudnnActivationDescriptor_t fc1Activation; int m_gpuid; int m_batchSize; size_t m_workspaceSize; FullyConnectedLayer& ref_fc1, &ref_fc2; // Disable copying TrainingContext& operator=(const TrainingContext&) = delete; TrainingContext(const TrainingContext&) = delete; TrainingContext(int gpuid, int batch_size, ConvBiasLayer& conv1, MaxPoolLayer& pool1, ConvBiasLayer& conv2, MaxPoolLayer& pool2, ConvBiasLayer& conv3, FullyConnectedLayer& fc1, FullyConnectedLayer& fc2) : ref_fc1(fc1), ref_fc2(fc2), m_gpuid(gpuid) { m_batchSize = batch_size; // Create CUBLAS and CUDNN handles checkCudaErrors(cudaSetDevice(gpuid)); checkCudaErrors(cublasCreate(&cublasHandle)); checkCUDNN(cudnnCreate(&cudnnHandle)); // Create tensor descriptors checkCUDNN(cudnnCreateTensorDescriptor(&dataTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1BiasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&pool1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv2Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv2BiasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv3Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv3BiasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&pool2Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&fc1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&fc2Tensor)); checkCUDNN(cudnnCreateActivationDescriptor(&fc1Activation)); checkCUDNN(cudnnCreateFilterDescriptor(&conv1filterDesc)); checkCUDNN(cudnnCreateFilterDescriptor(&conv2filterDesc)); checkCUDNN(cudnnCreateFilterDescriptor(&conv3filterDesc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv1Desc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv2Desc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv3Desc)); checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc)); // Set tensor descriptor sizes checkCUDNN(cudnnSetTensor4dDescriptor(conv1BiasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, conv1.out_channels, 1, 1)); checkCUDNN(cudnnSetTensor4dDescriptor(conv2BiasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, conv2.out_channels, 1, 1)); checkCUDNN(cudnnSetTensor4dDescriptor(conv3BiasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, conv3.out_channels, 1, 1)); checkCUDNN(cudnnSetPooling2dDescriptor(poolDesc, CUDNN_POOLING_MAX, CUDNN_PROPAGATE_NAN, pool1.size, pool1.size, 0, 0, pool1.stride, pool1.stride)); checkCUDNN(cudnnSetTensor4dDescriptor(pool2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, conv2.out_channels, conv2.out_height / pool2.stride, conv2.out_width / pool2.stride)); checkCUDNN(cudnnSetTensor4dDescriptor(fc1Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, fc1.outputs, 1, 1)); checkCUDNN(cudnnSetTensor4dDescriptor(fc2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, fc2.outputs, 1, 1)); checkCUDNN(cudnnSetActivationDescriptor(fc1Activation, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); // Set convolution tensor sizes and compute workspace size size_t workspace = 0; workspace = std::max(workspace, SetFwdConvolutionTensors(conv1, dataTensor, conv1Tensor, conv1filterDesc, conv1Desc, conv1algo)); workspace = std::max(workspace, SetBwdConvolutionTensors(dataTensor, conv1Tensor, conv1filterDesc, conv1Desc, &conv1bwfalgo, nullptr)); workspace = std::max(workspace, SetFwdConvolutionTensors(conv2, pool1Tensor, conv2Tensor, conv2filterDesc, conv2Desc, conv2algo)); workspace = std::max(workspace, SetBwdConvolutionTensors(pool1Tensor, conv2Tensor, conv2filterDesc, conv2Desc, &conv2bwfalgo, &conv2bwdalgo)); workspace = std::max(workspace, SetFwdConvolutionTensors(conv3, pool2Tensor, conv3Tensor, conv3filterDesc, conv3Desc, conv3algo)); workspace = std::max(workspace, SetBwdConvolutionTensors(pool2Tensor, conv3Tensor, conv3filterDesc, conv3Desc, &conv3bwfalgo, &conv3bwdalgo)); // The workspace is allocated later (if necessary) m_workspaceSize = workspace; } ~TrainingContext() { checkCudaErrors(cudaSetDevice(m_gpuid)); checkCudaErrors(cublasDestroy(cublasHandle)); checkCUDNN(cudnnDestroy(cudnnHandle)); checkCUDNN(cudnnDestroyTensorDescriptor(dataTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv1BiasTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(pool1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv2Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv2BiasTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(pool2Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv3Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv3BiasTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(fc1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(fc2Tensor)); checkCUDNN(cudnnDestroyActivationDescriptor(fc1Activation)); checkCUDNN(cudnnDestroyFilterDescriptor(conv1filterDesc)); checkCUDNN(cudnnDestroyFilterDescriptor(conv2filterDesc)); checkCUDNN(cudnnDestroyFilterDescriptor(conv3filterDesc)); checkCUDNN(cudnnDestroyConvolutionDescriptor(conv1Desc)); checkCUDNN(cudnnDestroyConvolutionDescriptor(conv2Desc)); checkCUDNN(cudnnDestroyConvolutionDescriptor(conv3Desc)); checkCUDNN(cudnnDestroyPoolingDescriptor(poolDesc)); } size_t SetFwdConvolutionTensors(ConvBiasLayer& conv, cudnnTensorDescriptor_t& srcTensorDesc, cudnnTensorDescriptor_t& dstTensorDesc, cudnnFilterDescriptor_t& filterDesc, cudnnConvolutionDescriptor_t& convDesc, cudnnConvolutionFwdAlgo_t& algo) { size_t sizeInBytes = 0; int n = m_batchSize; int c = conv.in_channels; int h = conv.in_height; int w = conv.in_width; checkCUDNN(cudnnSetTensor4dDescriptor(srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, conv.out_channels, conv.in_channels, conv.kernel_size, conv.kernel_size)); #if CUDNN_MAJOR > 5 checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); #else checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); #endif // Find dimension of convolution output checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc, srcTensorDesc, filterDesc, &n, &c, &h, &w)); checkCUDNN(cudnnSetTensor4dDescriptor(dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnnHandle, srcTensorDesc, filterDesc, convDesc, dstTensorDesc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle, srcTensorDesc, filterDesc, convDesc, dstTensorDesc, algo, &sizeInBytes)); return sizeInBytes; } void ForwardPropagation(float *data, float *conv1, float *pool1, float *conv2, float *pool2, float *conv3, float *fc1, float *fc1relu, float *fc2, float *result, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pconv3, float *pconv3bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, void *workspace, float *onevec) { float alpha = 1.0f, beta = 0.0f; checkCudaErrors(cudaSetDevice(m_gpuid)); // Conv1 layer checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, dataTensor, data, conv1filterDesc, pconv1, conv1Desc, conv1algo, workspace, m_workspaceSize, &beta, conv1Tensor, conv1)); checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv1BiasTensor, pconv1bias, &alpha, conv1Tensor, conv1)); // Pool1 layer checkCUDNN(cudnnPoolingForward(cudnnHandle, poolDesc, &alpha, conv1Tensor, conv1, &beta, pool1Tensor, pool1)); // Conv2 layer checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, pool1Tensor, pool1, conv2filterDesc, pconv2, conv2Desc, conv2algo, workspace, m_workspaceSize, &beta, conv2Tensor, conv2)); checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv2BiasTensor, pconv2bias, &alpha, conv2Tensor, conv2)); // Pool2 layer checkCUDNN(cudnnPoolingForward(cudnnHandle, poolDesc, &alpha, conv2Tensor, conv2, &beta, pool2Tensor, pool2)); // Conv3 layer checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, pool2Tensor, pool2, conv3filterDesc, pconv3, conv3Desc, conv3algo, workspace, m_workspaceSize, &beta, conv3Tensor, conv3)); checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv3BiasTensor, pconv3bias, &alpha, conv3Tensor, conv3)); // FC1 layer // Forward propagate neurons using weights (fc1 = pfc1'*conv3) checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, ref_fc1.outputs, m_batchSize, ref_fc1.inputs, &alpha, pfc1, ref_fc1.inputs, /*pool2*/conv3, ref_fc1.inputs, &beta, fc1, ref_fc1.outputs)); // Add bias using GEMM's "beta" (fc1 += pfc1bias*1_vec') checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ref_fc1.outputs, m_batchSize, 1, &alpha, pfc1bias, ref_fc1.outputs, onevec, 1, &alpha, fc1, ref_fc1.outputs)); // ReLU activation checkCUDNN(cudnnActivationForward(cudnnHandle, fc1Activation, &alpha, fc1Tensor, fc1, &beta, fc1Tensor, fc1relu)); // FC2 layer // Forward propagate neurons using weights (fc2 = pfc2'*fc1relu) checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, ref_fc2.outputs, m_batchSize, ref_fc2.inputs, &alpha, pfc2, ref_fc2.inputs, fc1relu, ref_fc2.inputs, &beta, fc2, ref_fc2.outputs)); // Add bias using GEMM's "beta" (fc2 += pfc2bias*1_vec') checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ref_fc2.outputs, m_batchSize, 1, &alpha, pfc2bias, ref_fc2.outputs, onevec, 1, &alpha, fc2, ref_fc2.outputs)); // Softmax loss checkCUDNN(cudnnSoftmaxForward(cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, fc2Tensor, fc2, &beta, fc2Tensor, result)); } size_t SetBwdConvolutionTensors(cudnnTensorDescriptor_t& srcTensorDesc, cudnnTensorDescriptor_t& dstTensorDesc, cudnnFilterDescriptor_t& filterDesc, cudnnConvolutionDescriptor_t& convDesc, cudnnConvolutionBwdFilterAlgo_t *falgo, cudnnConvolutionBwdDataAlgo_t *dalgo) { size_t sizeInBytes = 0, tmpsize = 0; // If backprop filter algorithm was requested if (falgo) { checkCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, srcTensorDesc, dstTensorDesc, convDesc, filterDesc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, falgo)); checkCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, srcTensorDesc, dstTensorDesc, convDesc, filterDesc, *falgo, &tmpsize)); sizeInBytes = std::max(sizeInBytes, tmpsize); } // If backprop data algorithm was requested if (dalgo) { checkCUDNN(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filterDesc, dstTensorDesc, convDesc, srcTensorDesc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, dalgo)); checkCUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filterDesc, dstTensorDesc, convDesc, srcTensorDesc, *dalgo, &tmpsize)); sizeInBytes = std::max(sizeInBytes, tmpsize); } return sizeInBytes; } void Backpropagation(ConvBiasLayer& layer_conv1, MaxPoolLayer& layer_pool1, ConvBiasLayer& layer_conv2, MaxPoolLayer& layer_pool2,ConvBiasLayer& layer_conv3, float *data, float *labels, float *conv1, float *pool1, float *conv2, float *pool2, float *conv3, float *fc1, float *fc1relu, float *fc2, float *fc2smax, float *dloss_data, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pconv3, float *pconv3bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, float *gconv1, float *gconv1bias, float *dpool1, float *gconv2, float *gconv2bias, float *dconv2, float *dpool2, float *gconv3, float *gconv3bias, float *dconv3, float *gfc1, float *gfc1bias, float *dfc1, float *dfc1relu, float *gfc2, float *gfc2bias, float *dfc2, void *workspace, float *onevec) { float alpha = 1.0f, beta = 0.0f; float scalVal = 1.0f / static_cast<float>(m_batchSize); checkCudaErrors(cudaSetDevice(m_gpuid)); // Initialization (using the training error function) checkCudaErrors(cudaMemcpyAsync(dloss_data, fc2smax, sizeof(float) * m_batchSize * ref_fc2.outputs, cudaMemcpyDeviceToDevice)); // Softmax layer SoftmaxLossBackprop<<<RoundUp(m_batchSize, BW), BW>>>(labels, ref_fc2.outputs, m_batchSize, dloss_data); // Accounting for batch size in SGD checkCudaErrors(cublasSscal(cublasHandle, ref_fc2.outputs * m_batchSize, &scalVal, dloss_data, 1)); // FC2 layer // Compute derivative with respect to weights: gfc2 = (fc1relu * dfc2smax') checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, ref_fc2.inputs, ref_fc2.outputs, m_batchSize, &alpha, fc1relu, ref_fc2.inputs, dloss_data, ref_fc2.outputs, &beta, gfc2, ref_fc2.inputs)); // Compute derivative with respect to bias: gfc2bias = dfc2smax * 1_vec checkCudaErrors(cublasSgemv(cublasHandle, CUBLAS_OP_N, ref_fc2.outputs, m_batchSize, &alpha, dloss_data, ref_fc2.outputs, onevec, 1, &beta, gfc2bias, 1)); // Compute derivative with respect to data (for previous layer): pfc2*dfc2smax (500x10*10xN) checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ref_fc2.inputs, m_batchSize, ref_fc2.outputs, &alpha, pfc2, ref_fc2.inputs, dloss_data, ref_fc2.outputs, &beta, dfc2, ref_fc2.inputs)); // ReLU activation checkCUDNN(cudnnActivationBackward(cudnnHandle, fc1Activation, &alpha, fc1Tensor, fc1relu, fc1Tensor, dfc2, fc1Tensor, fc1, &beta, fc1Tensor, dfc1relu)); // FC1 layer // Compute derivative with respect to weights: gfc1 = (pool2 * dfc1relu') checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, ref_fc1.inputs, ref_fc1.outputs, m_batchSize, &alpha, pool2, ref_fc1.inputs, dfc1relu, ref_fc1.outputs, &beta, gfc1, ref_fc1.inputs)); // Compute derivative with respect to bias: gfc1bias = dfc1relu * 1_vec checkCudaErrors(cublasSgemv(cublasHandle, CUBLAS_OP_N, ref_fc1.outputs, m_batchSize, &alpha, dfc1relu, ref_fc1.outputs, onevec, 1, &beta, gfc1bias, 1)); // Compute derivative with respect to data (for previous layer): pfc1*dfc1relu (800x500*500xN) checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ref_fc1.inputs, m_batchSize, ref_fc1.outputs, &alpha, pfc1, ref_fc1.inputs, dfc1relu, ref_fc1.outputs, &beta, dfc1, ref_fc1.inputs)); // Conv3 layer checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv3Tensor, dfc1, &beta, conv3BiasTensor, gconv3bias)); checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, pool2Tensor, pool2, conv3Tensor, dfc1, conv3Desc, conv3bwfalgo, workspace, m_workspaceSize, &beta, conv3filterDesc, gconv3)); checkCUDNN(cudnnConvolutionBackwardData(cudnnHandle, &alpha, conv3filterDesc, pconv3, conv3Tensor, dfc1, conv3Desc, conv3bwdalgo, workspace, m_workspaceSize, &beta, pool2Tensor, dconv3)); // Pool2 layer checkCUDNN(cudnnPoolingBackward(cudnnHandle, poolDesc, &alpha, pool2Tensor, pool2, pool2Tensor, /*dfc1*/dconv3, conv2Tensor, conv2, &beta, conv2Tensor, dpool2)); // Conv2 layer checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv2Tensor, dpool2, &beta, conv2BiasTensor, gconv2bias)); checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, pool1Tensor, pool1, conv2Tensor, dpool2, conv2Desc, conv2bwfalgo, workspace, m_workspaceSize, &beta, conv2filterDesc, gconv2)); checkCUDNN(cudnnConvolutionBackwardData(cudnnHandle, &alpha, conv2filterDesc, pconv2, conv2Tensor, dpool2, conv2Desc, conv2bwdalgo, workspace, m_workspaceSize, &beta, pool1Tensor, dconv2)); // Pool1 layer checkCUDNN(cudnnPoolingBackward(cudnnHandle, poolDesc, &alpha, pool1Tensor, pool1, pool1Tensor, dconv2, conv1Tensor, conv1, &beta, conv1Tensor, dpool1)); // Conv1 layer checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv1Tensor, dpool1, &beta, conv1BiasTensor, gconv1bias)); checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, dataTensor, data, conv1Tensor, dpool1, conv1Desc, conv1bwfalgo, workspace, m_workspaceSize, &beta, conv1filterDesc, gconv1)); // No need for convBackwardData because there are no more layers below } void UpdateWeights(float learning_rate, ConvBiasLayer& conv1, ConvBiasLayer& conv2, ConvBiasLayer &conv3, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pconv3, float *pconv3bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, float *gconv1, float *gconv1bias, float *gconv2, float *gconv2bias, float *gconv3, float *gconv3bias, float *gfc1, float *gfc1bias, float *gfc2, float *gfc2bias) { float alpha = -learning_rate; checkCudaErrors(cudaSetDevice(m_gpuid)); // Conv1 checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(conv1.pconv.size()), &alpha, gconv1, 1, pconv1, 1)); checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(conv1.pbias.size()), &alpha, gconv1bias, 1, pconv1bias, 1)); // Conv2 checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(conv2.pconv.size()), &alpha, gconv2, 1, pconv2, 1)); checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(conv2.pbias.size()), &alpha, gconv2bias, 1, pconv2bias, 1)); // Conv3 checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(conv3.pconv.size()), &alpha, gconv3, 1, pconv3, 1)); checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(conv3.pbias.size()), &alpha, gconv3bias, 1, pconv3bias, 1)); // Fully connected 1 checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(ref_fc1.pneurons.size()), &alpha, gfc1, 1, pfc1, 1)); checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(ref_fc1.pbias.size()), &alpha, gfc1bias, 1, pfc1bias, 1)); // Fully connected 2 checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(ref_fc2.pneurons.size()), &alpha, gfc2, 1, pfc2, 1)); checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(ref_fc2.pbias.size()), &alpha, gfc2bias, 1, pfc2bias, 1)); } }; /////////////////////////////////////////////////////////////////////////////////////////// // Main function int main(int argc, char **argv) { #ifdef USE_GFLAGS gflags::ParseCommandLineFlags(&argc, &argv, true); #endif size_t width, height, channels = 1; // Open input data printf("Reading input data\n"); // Read dataset sizes size_t train_size = ReadUByteDataset(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), nullptr, nullptr, width, height); size_t test_size = ReadUByteDataset(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), nullptr, nullptr, width, height); if (train_size == 0) return 1; std::vector<uint8_t> train_images(train_size * width * height * channels), train_labels(train_size); std::vector<uint8_t> test_images(test_size * width * height * channels), test_labels(test_size); // Read data from datasets if (ReadUByteDataset(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), &train_images[0], &train_labels[0], width, height) != train_size) return 2; if (ReadUByteDataset(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), &test_images[0], &test_labels[0], width, height) != test_size) return 3; printf("Done. Training dataset size: %d, Test dataset size: %d\n", (int)train_size, (int)test_size); printf("Batch size: %lld, iterations: %d\n", FLAGS_batch_size, FLAGS_iterations); // This code snippet saves a random image and its label /* std::random_device rd_image; int random_image = rd_image() % train_size; std::stringstream ss; ss << "image-" << (int)train_labels[random_image] << ".pgm"; SavePGMFile(&train_images[0] + random_image * width*height*channels, width, height, ss.str().c_str()); */ // Choose GPU int num_gpus; checkCudaErrors(cudaGetDeviceCount(&num_gpus)); if (FLAGS_gpu < 0 || FLAGS_gpu >= num_gpus) { printf("ERROR: Invalid GPU ID %d (There are %d GPUs on this machine)\n", FLAGS_gpu, num_gpus); return 4; } // Create the LeNet network architecture ConvBiasLayer conv1((int)channels, 20, 5, (int)width, (int)height); MaxPoolLayer pool1(2, 2); ConvBiasLayer conv2(conv1.out_channels, 30, 3, conv1.out_width / pool1.stride, conv1.out_height / pool1.stride); MaxPoolLayer pool2(2, 2); ConvBiasLayer conv3(conv2.out_channels, 70, 3, conv2.out_width / pool1.stride , conv2.out_height/pool2.stride ); FullyConnectedLayer fc1((conv3.out_channels*conv3.out_width*conv3.out_height) , 500); //FullyConnectedLayer fc1((conv2.out_channels*conv2.out_width*conv2.out_height) , 500); FullyConnectedLayer fc2(fc1.outputs, 10); // Initialize CUDNN/CUBLAS training context TrainingContext context(FLAGS_gpu, FLAGS_batch_size, conv1, pool1, conv2, pool2, conv3, fc1, fc2); // Determine initial network structure bool bRet = true; if (FLAGS_pretrained) { bRet = conv1.FromFile("conv1"); bRet &= conv2.FromFile("conv2"); bRet &= fc1.FromFile("ip1"); bRet &= fc2.FromFile("ip2"); } if (!bRet || !FLAGS_pretrained) { // Create random network std::random_device rd; std::mt19937 gen(FLAGS_random_seed < 0 ? rd() : static_cast<unsigned int>(FLAGS_random_seed)); // Xavier weight filling float wconv1 = sqrt(3.0f / (conv1.kernel_size * conv1.kernel_size * conv1.in_channels)); std::uniform_real_distribution<> dconv1(-wconv1, wconv1); float wconv2 = sqrt(3.0f / (conv2.kernel_size * conv2.kernel_size * conv2.in_channels)); std::uniform_real_distribution<> dconv2(-wconv2, wconv2); float wconv3 = sqrt(3.0f / (conv3.kernel_size * conv3.kernel_size * conv3.in_channels)); std::uniform_real_distribution<> dconv3(-wconv3, wconv3); float wfc1 = sqrt(3.0f / (fc1.inputs * fc1.outputs)); std::uniform_real_distribution<> dfc1(-wfc1, wfc1); float wfc2 = sqrt(3.0f / (fc2.inputs * fc2.outputs)); std::uniform_real_distribution<> dfc2(-wfc2, wfc2); // Randomize network for (auto&& iter : conv1.pconv) iter = static_cast<float>(dconv1(gen)); for (auto&& iter : conv1.pbias) iter = static_cast<float>(dconv1(gen)); for (auto&& iter : conv2.pconv) iter = static_cast<float>(dconv2(gen)); for (auto&& iter : conv2.pbias) iter = static_cast<float>(dconv2(gen)); for (auto&& iter : conv3.pconv) iter = static_cast<float>(dconv3(gen)); for (auto&& iter : conv3.pbias) iter = static_cast<float>(dconv3(gen)); for (auto&& iter : fc1.pneurons) iter = static_cast<float>(dfc1(gen)); for (auto&& iter : fc1.pbias) iter = static_cast<float>(dfc1(gen)); for (auto&& iter : fc2.pneurons) iter = static_cast<float>(dfc2(gen)); for (auto&& iter : fc2.pbias) iter = static_cast<float>(dfc2(gen)); } ///////////////////////////////////////////////////////////////////////////// // Create GPU data structures // Forward propagation data float *d_data, *d_labels, *d_conv1, *d_pool1, *d_conv2, *d_pool2, *d_fc1, *d_fc1relu, *d_fc2, *d_fc2smax; float *d_conv3; // Buffer | Element | N | C | H | W //----------------------------------------------------------------------------------------------------------------------------------------- checkCudaErrors(cudaMalloc(&d_data, sizeof(float) * context.m_batchSize * channels * height * width)); checkCudaErrors(cudaMalloc(&d_labels, sizeof(float) * context.m_batchSize * 1 * 1 * 1)); checkCudaErrors(cudaMalloc(&d_conv1, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width)); checkCudaErrors(cudaMalloc(&d_pool1, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride))); checkCudaErrors(cudaMalloc(&d_conv2, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width)); checkCudaErrors(cudaMalloc(&d_pool2, sizeof(float) * context.m_batchSize * conv2.out_channels * (conv2.out_height / pool2.stride) * (conv2.out_width / pool2.stride))); checkCudaErrors(cudaMalloc(&d_conv3, sizeof(float) * context.m_batchSize * conv3.out_channels * conv3.out_height * conv3.out_width)); checkCudaErrors(cudaMalloc(&d_fc1, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(cudaMalloc(&d_fc1relu, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(cudaMalloc(&d_fc2, sizeof(float) * context.m_batchSize * fc2.outputs)); checkCudaErrors(cudaMalloc(&d_fc2smax, sizeof(float) * context.m_batchSize * fc2.outputs)); // Network parameters float *d_pconv1, *d_pconv1bias, *d_pconv2, *d_pconv2bias, *d_pconv3, *d_pconv3bias; float *d_pfc1, *d_pfc1bias, *d_pfc2, *d_pfc2bias; checkCudaErrors(cudaMalloc(&d_pconv1, sizeof(float) * conv1.pconv.size())); checkCudaErrors(cudaMalloc(&d_pconv1bias, sizeof(float) * conv1.pbias.size())); checkCudaErrors(cudaMalloc(&d_pconv2, sizeof(float) * conv2.pconv.size())); checkCudaErrors(cudaMalloc(&d_pconv2bias, sizeof(float) * conv2.pbias.size())); checkCudaErrors(cudaMalloc(&d_pconv3, sizeof(float) * conv3.pconv.size())); checkCudaErrors(cudaMalloc(&d_pconv3bias, sizeof(float) * conv3.pbias.size())); checkCudaErrors(cudaMalloc(&d_pfc1, sizeof(float) * fc1.pneurons.size())); checkCudaErrors(cudaMalloc(&d_pfc1bias, sizeof(float) * fc1.pbias.size())); checkCudaErrors(cudaMalloc(&d_pfc2, sizeof(float) * fc2.pneurons.size())); checkCudaErrors(cudaMalloc(&d_pfc2bias, sizeof(float) * fc2.pbias.size())); // Network parameter gradients float *d_gconv1, *d_gconv1bias, *d_gconv2, *d_gconv2bias, *d_gconv3, *d_gconv3bias; float *d_gfc1, *d_gfc1bias, *d_gfc2, *d_gfc2bias; checkCudaErrors(cudaMalloc(&d_gconv1, sizeof(float) * conv1.pconv.size())); checkCudaErrors(cudaMalloc(&d_gconv1bias, sizeof(float) * conv1.pbias.size())); checkCudaErrors(cudaMalloc(&d_gconv2, sizeof(float) * conv2.pconv.size())); checkCudaErrors(cudaMalloc(&d_gconv2bias, sizeof(float) * conv2.pbias.size())); checkCudaErrors(cudaMalloc(&d_gconv3, sizeof(float) * conv3.pconv.size())); checkCudaErrors(cudaMalloc(&d_gconv3bias, sizeof(float) * conv3.pbias.size())); checkCudaErrors(cudaMalloc(&d_gfc1, sizeof(float) * fc1.pneurons.size())); checkCudaErrors(cudaMalloc(&d_gfc1bias, sizeof(float) * fc1.pbias.size())); checkCudaErrors(cudaMalloc(&d_gfc2, sizeof(float) * fc2.pneurons.size())); checkCudaErrors(cudaMalloc(&d_gfc2bias, sizeof(float) * fc2.pbias.size())); // Differentials w.r.t. data float *d_dpool1, *d_dpool2, *d_dconv2, *d_dconv3, *d_dfc1, *d_dfc1relu, *d_dfc2, *d_dfc2smax, *d_dlossdata; // Buffer | Element | N | C | H | W //----------------------------------------------------------------------------------------------------------------------------------------- checkCudaErrors(cudaMalloc(&d_dpool1, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width)); checkCudaErrors(cudaMalloc(&d_dpool2, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width)); checkCudaErrors(cudaMalloc(&d_dconv2, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride))); checkCudaErrors(cudaMalloc(&d_dconv3, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width)); checkCudaErrors(cudaMalloc(&d_dfc1, sizeof(float) * context.m_batchSize * fc1.inputs)); checkCudaErrors(cudaMalloc(&d_dfc1relu, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(cudaMalloc(&d_dfc2, sizeof(float) * context.m_batchSize * fc2.inputs)); checkCudaErrors(cudaMalloc(&d_dfc2smax, sizeof(float) * context.m_batchSize * fc2.outputs)); checkCudaErrors(cudaMalloc(&d_dlossdata,sizeof(float) * context.m_batchSize * fc2.outputs)); // Temporary buffers and workspaces float *d_onevec; void *d_cudnn_workspace = nullptr; checkCudaErrors(cudaMalloc(&d_onevec, sizeof(float)* context.m_batchSize)); if (context.m_workspaceSize > 0) checkCudaErrors(cudaMalloc(&d_cudnn_workspace, context.m_workspaceSize)); ///////////////////////////////////////////////////////////////////////////// // Copy initial network to device checkCudaErrors(cudaMemcpyAsync(d_pconv1, &conv1.pconv[0], sizeof(float) * conv1.pconv.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pconv1bias, &conv1.pbias[0], sizeof(float) * conv1.pbias.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pconv2, &conv2.pconv[0], sizeof(float) * conv2.pconv.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pconv2bias, &conv2.pbias[0], sizeof(float) * conv2.pbias.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pconv3, &conv3.pconv[0], sizeof(float) * conv3.pconv.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pconv3bias, &conv3.pbias[0], sizeof(float) * conv3.pbias.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pfc1, &fc1.pneurons[0], sizeof(float) * fc1.pneurons.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pfc1bias, &fc1.pbias[0], sizeof(float) * fc1.pbias.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pfc2, &fc2.pneurons[0], sizeof(float) * fc2.pneurons.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pfc2bias, &fc2.pbias[0], sizeof(float) * fc2.pbias.size(), cudaMemcpyHostToDevice)); // Fill one-vector with ones FillOnes<<<RoundUp(context.m_batchSize, BW), BW>>>(d_onevec, context.m_batchSize); printf("Preparing dataset\n"); // Normalize training set to be in [0,1] std::vector<float> train_images_float(train_images.size()), train_labels_float(train_size); for (size_t i = 0; i < train_size * channels * width * height; ++i) train_images_float[i] = (float)train_images[i] / 255.0f; for (size_t i = 0; i < train_size; ++i) train_labels_float[i] = (float)train_labels[i]; printf("Training...\n"); // Use SGD to train the network checkCudaErrors(cudaDeviceSynchronize()); auto t1 = std::chrono::high_resolution_clock::now(); for (int iter = 0; iter < FLAGS_iterations; ++iter) { // Train int imageid = iter % (train_size / context.m_batchSize); // Prepare current batch on device checkCudaErrors(cudaMemcpyAsync(d_data, &train_images_float[imageid * context.m_batchSize * width*height*channels], sizeof(float) * context.m_batchSize * channels * width * height, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_labels, &train_labels_float[imageid * context.m_batchSize], sizeof(float) * context.m_batchSize, cudaMemcpyHostToDevice)); // Forward propagation context.ForwardPropagation(d_data, d_conv1, d_pool1, d_conv2, d_pool2, d_conv3, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pconv3, d_pconv3bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_cudnn_workspace, d_onevec); // Backward propagation context.Backpropagation(conv1, pool1, conv2, pool2, conv3, d_data, d_labels, d_conv1, d_pool1, d_conv2, d_pool2, d_conv3, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_dlossdata, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pconv3, d_pconv3bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_gconv1, d_gconv1bias, d_dpool1, d_gconv2, d_gconv2bias, d_gconv3, d_gconv3bias, d_dconv2, d_dpool2, d_dconv3, d_gfc1, d_gfc1bias, d_dfc1, d_dfc1relu, d_gfc2, d_gfc2bias, d_dfc2, d_cudnn_workspace, d_onevec); // Compute learning rate float learningRate = static_cast<float>(FLAGS_learning_rate * pow((1.0 + FLAGS_lr_gamma * iter), (-FLAGS_lr_power))); // Update weights context.UpdateWeights(learningRate, conv1, conv2, conv3, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pconv3, d_pconv3bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_gconv1, d_gconv1bias, d_gconv2, d_gconv2bias, d_gconv3, d_gconv3bias, d_gfc1, d_gfc1bias, d_gfc2, d_gfc2bias); } checkCudaErrors(cudaDeviceSynchronize()); auto t2 = std::chrono::high_resolution_clock::now(); printf("Iteration time: %f ms\n", std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() / 1000.0f / FLAGS_iterations); if (FLAGS_save_data) { // Copy trained weights from GPU to CPU checkCudaErrors(cudaMemcpy(&conv1.pconv[0], d_pconv1, sizeof(float) * conv1.pconv.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&conv1.pbias[0], d_pconv1bias, sizeof(float) * conv1.pbias.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&conv2.pconv[0], d_pconv2, sizeof(float) * conv2.pconv.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&conv2.pbias[0], d_pconv2bias, sizeof(float) * conv2.pbias.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&conv3.pconv[0], d_pconv3, sizeof(float) * conv3.pconv.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&conv3.pbias[0], d_pconv3bias, sizeof(float) * conv3.pbias.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&fc1.pneurons[0], d_pfc1, sizeof(float) * fc1.pneurons.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&fc1.pbias[0], d_pfc1bias, sizeof(float) * fc1.pbias.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&fc2.pneurons[0], d_pfc2, sizeof(float) * fc2.pneurons.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&fc2.pbias[0], d_pfc2bias, sizeof(float) * fc2.pbias.size(), cudaMemcpyDeviceToHost)); // Now save data printf("Saving data to file\n"); conv1.ToFile("conv1"); conv2.ToFile("conv2"); fc1.ToFile("ip1"); fc2.ToFile("ip2"); } float classification_error = 1.0f; int classifications = FLAGS_classify; if (classifications < 0) classifications = (int)test_size; // Test the resulting neural network's classification if (classifications > 0) { // Initialize a TrainingContext structure for testing (different batch size) TrainingContext test_context(FLAGS_gpu, 1, conv1, pool1, conv2, pool2, conv3, fc1, fc2); // Ensure correct workspaceSize is allocated for testing if (context.m_workspaceSize < test_context.m_workspaceSize) { checkCudaErrors(cudaFree(d_cudnn_workspace)); checkCudaErrors(cudaMalloc(&d_cudnn_workspace, test_context.m_workspaceSize)); } int num_errors = 0; for (int i = 0; i < classifications; ++i) { std::vector<float> data(width * height); // Normalize image to be in [0,1] for (int j = 0; j < width * height; ++j) data[j] = (float)test_images[i * width*height*channels + j] / 255.0f; checkCudaErrors(cudaMemcpyAsync(d_data, &data[0], sizeof(float) * width * height, cudaMemcpyHostToDevice)); // Forward propagate test image test_context.ForwardPropagation(d_data, d_conv1, d_pool1, d_conv2, d_pool2, d_conv3, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pconv3, d_pconv3bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_cudnn_workspace, d_onevec); // Perform classification std::vector<float> class_vec(10); // Copy back result checkCudaErrors(cudaMemcpy(&class_vec[0], d_fc2smax, sizeof(float) * 10, cudaMemcpyDeviceToHost)); // Determine classification according to maximal response int chosen = 0; for (int id = 1; id < 10; ++id) { if (class_vec[chosen] < class_vec[id]) chosen = id; } if (chosen != test_labels[i]) ++num_errors; } classification_error = (float)num_errors / (float)classifications; printf("Classification result: %.2f%% error (used %d images)\n", classification_error * 100.0f, (int)classifications); } // Free data structures checkCudaErrors(cudaFree(d_data)); checkCudaErrors(cudaFree(d_conv1)); checkCudaErrors(cudaFree(d_pool1)); checkCudaErrors(cudaFree(d_conv2)); checkCudaErrors(cudaFree(d_pool2)); checkCudaErrors(cudaFree(d_conv3)); checkCudaErrors(cudaFree(d_fc1)); checkCudaErrors(cudaFree(d_fc2)); checkCudaErrors(cudaFree(d_pconv1)); checkCudaErrors(cudaFree(d_pconv1bias)); checkCudaErrors(cudaFree(d_pconv2)); checkCudaErrors(cudaFree(d_pconv2bias)); checkCudaErrors(cudaFree(d_pconv3)); checkCudaErrors(cudaFree(d_pconv3bias)); checkCudaErrors(cudaFree(d_pfc1)); checkCudaErrors(cudaFree(d_pfc1bias)); checkCudaErrors(cudaFree(d_pfc2)); checkCudaErrors(cudaFree(d_pfc2bias)); checkCudaErrors(cudaFree(d_gconv1)); checkCudaErrors(cudaFree(d_gconv1bias)); checkCudaErrors(cudaFree(d_gconv2)); checkCudaErrors(cudaFree(d_gconv2bias)); checkCudaErrors(cudaFree(d_gconv3)); checkCudaErrors(cudaFree(d_gconv3bias)); checkCudaErrors(cudaFree(d_gfc1)); checkCudaErrors(cudaFree(d_gfc1bias)); checkCudaErrors(cudaFree(d_dfc1)); checkCudaErrors(cudaFree(d_gfc2)); checkCudaErrors(cudaFree(d_gfc2bias)); checkCudaErrors(cudaFree(d_dfc2)); checkCudaErrors(cudaFree(d_dpool1)); checkCudaErrors(cudaFree(d_dconv2)); checkCudaErrors(cudaFree(d_dpool2)); checkCudaErrors(cudaFree(d_dconv3)); checkCudaErrors(cudaFree(d_labels)); checkCudaErrors(cudaFree(d_dlossdata)); checkCudaErrors(cudaFree(d_onevec)); if (d_cudnn_workspace != nullptr) checkCudaErrors(cudaFree(d_cudnn_workspace)); return 0; }
a36cefd361cd5105a41e4075b0c0227e255656d3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <mpi.h> #include "collectives/collectives.h" int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); cudaMPI_Init(MPI_COMM_WORLD); if (argc < 2) { fprintf(stderr, "give a value for N\n"); exit(EXIT_FAILURE); } const size_t N = atoi(argv[1]); int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); // find how many GPUs are connected here int deviceCount; CUDACHECK( hipGetDeviceCount(&deviceCount) ); printf("rank %d has %d cuda device(s)\n", rank, deviceCount); double **sendbuf = (double**)malloc(deviceCount * sizeof(double*)); double **recvbuf = (double**)malloc(deviceCount * sizeof(double*)); hipEvent_t *start = (hipEvent_t*)malloc(deviceCount * sizeof(hipEvent_t)); hipEvent_t *stop = (hipEvent_t*)malloc(deviceCount * sizeof(hipEvent_t)); hipStream_t *s = (hipStream_t*)malloc(deviceCount * sizeof(hipStream_t)); for (int device = 0; device < deviceCount; ++device) { CUDACHECK( hipSetDevice(device) ); CUDACHECK( hipEventCreate(start+device) ); CUDACHECK( hipEventCreate(stop+device) ); CUDACHECK( hipStreamCreate(s+device) ); CUDACHECK( hipMalloc(&sendbuf[device], N * sizeof(double)) ); CUDACHECK( hipMalloc(&recvbuf[device], N * sizeof(double)) ); CUDACHECK( hipMemset(sendbuf[device], 0, N * sizeof(double)) ); CUDACHECK( hipMemset(recvbuf[device], 0, N * sizeof(double)) ); } // allocate some host space for data filling/checking double *h_data = NULL; CUDACHECK( hipHostMalloc((void **)&h_data, N * sizeof(double)) ); if (rank == 0) { // initialize some data on rank 0 for (size_t i = 0; i < N; ++i) h_data[i] = (double)i; CUDACHECK( hipMemcpy(sendbuf[0], h_data, N * sizeof(double), hipMemcpyDefault) ); } for (int device = 0; device < deviceCount; ++device) { CUDACHECK( hipSetDevice(device) ); CUDACHECK( hipEventRecord(start[device]) ); } cudaMPI_Bcast(sendbuf[0], recvbuf, N, 0, MPI_COMM_WORLD, s); // stop timers and synchronize for (int device = 0; device < deviceCount; ++device) { CUDACHECK( hipSetDevice(device) ); CUDACHECK( hipEventRecord(stop[device]) ); CUDACHECK( hipDeviceSynchronize() ); } // make sure the broadcast was successful for (int device = 0; device < deviceCount; ++device) { // skip root GPU memset(h_data, 0, N * sizeof(double)); CUDACHECK( hipMemcpy(h_data, recvbuf[device], N * sizeof(double), hipMemcpyDefault) ); for (size_t i = 0; i < N; ++i) { if (h_data[i] != (double)i) { fprintf(stderr, "data mismatch: h_data[%d]=%lf\n", (int)i, h_data[i]); exit(EXIT_FAILURE); } } } for (int device = 0; device < deviceCount; ++device) { float time; CUDACHECK( hipEventElapsedTime(&time, start[device], stop[device]) ); printf("dev %d time %f ms\n", device, time); } for (int device = 0; device < deviceCount; ++device) { CUDACHECK( hipFree(sendbuf[device]) ); CUDACHECK( hipFree(recvbuf[device]) ); } CUDACHECK( hipHostFree(h_data) ); free(sendbuf); free(recvbuf); free(start); free(stop); free(s); cudaMPI_Finalize(); MPI_Finalize(); }
a36cefd361cd5105a41e4075b0c0227e255656d3.cu
#include <stdio.h> #include <string.h> #include <cuda_runtime.h> #include <mpi.h> #include "collectives/collectives.h" int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); cudaMPI_Init(MPI_COMM_WORLD); if (argc < 2) { fprintf(stderr, "give a value for N\n"); exit(EXIT_FAILURE); } const size_t N = atoi(argv[1]); int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); // find how many GPUs are connected here int deviceCount; CUDACHECK( cudaGetDeviceCount(&deviceCount) ); printf("rank %d has %d cuda device(s)\n", rank, deviceCount); double **sendbuf = (double**)malloc(deviceCount * sizeof(double*)); double **recvbuf = (double**)malloc(deviceCount * sizeof(double*)); cudaEvent_t *start = (cudaEvent_t*)malloc(deviceCount * sizeof(cudaEvent_t)); cudaEvent_t *stop = (cudaEvent_t*)malloc(deviceCount * sizeof(cudaEvent_t)); cudaStream_t *s = (cudaStream_t*)malloc(deviceCount * sizeof(cudaStream_t)); for (int device = 0; device < deviceCount; ++device) { CUDACHECK( cudaSetDevice(device) ); CUDACHECK( cudaEventCreate(start+device) ); CUDACHECK( cudaEventCreate(stop+device) ); CUDACHECK( cudaStreamCreate(s+device) ); CUDACHECK( cudaMalloc(&sendbuf[device], N * sizeof(double)) ); CUDACHECK( cudaMalloc(&recvbuf[device], N * sizeof(double)) ); CUDACHECK( cudaMemset(sendbuf[device], 0, N * sizeof(double)) ); CUDACHECK( cudaMemset(recvbuf[device], 0, N * sizeof(double)) ); } // allocate some host space for data filling/checking double *h_data = NULL; CUDACHECK( cudaMallocHost((void **)&h_data, N * sizeof(double)) ); if (rank == 0) { // initialize some data on rank 0 for (size_t i = 0; i < N; ++i) h_data[i] = (double)i; CUDACHECK( cudaMemcpy(sendbuf[0], h_data, N * sizeof(double), cudaMemcpyDefault) ); } for (int device = 0; device < deviceCount; ++device) { CUDACHECK( cudaSetDevice(device) ); CUDACHECK( cudaEventRecord(start[device]) ); } cudaMPI_Bcast(sendbuf[0], recvbuf, N, 0, MPI_COMM_WORLD, s); // stop timers and synchronize for (int device = 0; device < deviceCount; ++device) { CUDACHECK( cudaSetDevice(device) ); CUDACHECK( cudaEventRecord(stop[device]) ); CUDACHECK( cudaDeviceSynchronize() ); } // make sure the broadcast was successful for (int device = 0; device < deviceCount; ++device) { // skip root GPU memset(h_data, 0, N * sizeof(double)); CUDACHECK( cudaMemcpy(h_data, recvbuf[device], N * sizeof(double), cudaMemcpyDefault) ); for (size_t i = 0; i < N; ++i) { if (h_data[i] != (double)i) { fprintf(stderr, "data mismatch: h_data[%d]=%lf\n", (int)i, h_data[i]); exit(EXIT_FAILURE); } } } for (int device = 0; device < deviceCount; ++device) { float time; CUDACHECK( cudaEventElapsedTime(&time, start[device], stop[device]) ); printf("dev %d time %f ms\n", device, time); } for (int device = 0; device < deviceCount; ++device) { CUDACHECK( cudaFree(sendbuf[device]) ); CUDACHECK( cudaFree(recvbuf[device]) ); } CUDACHECK( cudaFreeHost(h_data) ); free(sendbuf); free(recvbuf); free(start); free(stop); free(s); cudaMPI_Finalize(); MPI_Finalize(); }
5b916c93542126d6ad76fb9db12a0953e9a5eff1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "THHudaTensor_kernel_copy.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *dst = NULL; hipMalloc(&dst, XSIZE*YSIZE); long *dst_sz = NULL; hipMalloc(&dst_sz, XSIZE*YSIZE); long *dst_st = NULL; hipMalloc(&dst_st, XSIZE*YSIZE); int dst_dim = 1; float *src = NULL; hipMalloc(&src, XSIZE*YSIZE); long *src_sz = NULL; hipMalloc(&src_sz, XSIZE*YSIZE); long *src_st = NULL; hipMalloc(&src_st, XSIZE*YSIZE); int src_dim = 1; long n_elem = 1; long innerdim = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( THCudaTensor_kernel_copy), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,dst_sz,dst_st,dst_dim,src,src_sz,src_st,src_dim,n_elem,innerdim); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( THCudaTensor_kernel_copy), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,dst_sz,dst_st,dst_dim,src,src_sz,src_st,src_dim,n_elem,innerdim); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( THCudaTensor_kernel_copy), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,dst_sz,dst_st,dst_dim,src,src_sz,src_st,src_dim,n_elem,innerdim); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5b916c93542126d6ad76fb9db12a0953e9a5eff1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "THCudaTensor_kernel_copy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *dst = NULL; cudaMalloc(&dst, XSIZE*YSIZE); long *dst_sz = NULL; cudaMalloc(&dst_sz, XSIZE*YSIZE); long *dst_st = NULL; cudaMalloc(&dst_st, XSIZE*YSIZE); int dst_dim = 1; float *src = NULL; cudaMalloc(&src, XSIZE*YSIZE); long *src_sz = NULL; cudaMalloc(&src_sz, XSIZE*YSIZE); long *src_st = NULL; cudaMalloc(&src_st, XSIZE*YSIZE); int src_dim = 1; long n_elem = 1; long innerdim = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); THCudaTensor_kernel_copy<<<gridBlock,threadBlock>>>(dst,dst_sz,dst_st,dst_dim,src,src_sz,src_st,src_dim,n_elem,innerdim); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { THCudaTensor_kernel_copy<<<gridBlock,threadBlock>>>(dst,dst_sz,dst_st,dst_dim,src,src_sz,src_st,src_dim,n_elem,innerdim); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { THCudaTensor_kernel_copy<<<gridBlock,threadBlock>>>(dst,dst_sz,dst_st,dst_dim,src,src_sz,src_st,src_dim,n_elem,innerdim); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4be5a9431ec3c01dc399ccce47b81325fb3811fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" #include <math.h> __global__ void test(int n, float *a, float*b, float *res) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i<n) { res[i] = exp(-a[i]*b[i]); } }
4be5a9431ec3c01dc399ccce47b81325fb3811fd.cu
extern "C" #include <math.h> __global__ void test(int n, float *a, float*b, float *res) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i<n) { res[i] = exp(-a[i]*b[i]); } }
a8d6a695f247a8661b252a6437502c8c91b92e8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstring> #include <vector> #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/util/im2col.hpp" #include "caffe/vision_layers.hpp" #include "caffe/test/test_caffe_main.hpp" namespace caffe { // Forward declare kernel functions template <typename Dtype, int num_axes> __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, Dtype* data_col); extern hipDeviceProp_t CAFFE_TEST_CUDA_PROP; template <typename Dtype> class Im2colKernelTest : public ::testing::Test { protected: Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(5, 500, 10, 10)), blob_kernel_shape_(new Blob<int>()), blob_stride_(new Blob<int>()), blob_pad_(new Blob<int>()), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); vector<int> dim_blob_shape(1, 2); blob_kernel_shape_->Reshape(dim_blob_shape); blob_stride_->Reshape(dim_blob_shape); blob_pad_->Reshape(dim_blob_shape); height_ = blob_bottom_->height(); width_ = blob_bottom_->width(); channels_ = blob_bottom_->channels(); pad_ = 0; stride_ = 2; kernel_size_ = 3; height_col_ = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1; for (int i = 0; i < 2; ++i) { blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; blob_stride_->mutable_cpu_data()[i] = stride_; blob_pad_->mutable_cpu_data()[i] = pad_; } } virtual ~Im2colKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; delete blob_kernel_shape_; delete blob_stride_; delete blob_pad_; } Blob<int>* const blob_kernel_shape_; Blob<int>* const blob_stride_; Blob<int>* const blob_pad_; Blob<Dtype>* const blob_bottom_; Blob<Dtype>* const blob_top_; Blob<Dtype>* const blob_top_cpu_; int height_; int width_; int channels_; int pad_; int stride_; int kernel_size_; int height_col_; int width_col_; }; TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, TestGPU) { Caffe::set_mode(Caffe::GPU); // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->ReshapeLike(*this->blob_top_); const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, this->blob_bottom_->shape().data() + 1, this->blob_top_cpu_->shape().data() + 1, this->blob_kernel_shape_->cpu_data(), this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), top_data_cpu + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { const int grid_dim = default_grid_dim / grid_div; TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_gpu_kernel<TypeParam, 2>), dim3(grid_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), this->blob_stride_->gpu_data(), top_data_gpu + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = top_data_cpu[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } int blobShape_im2col[5]={5, 500, 10, 10, 10}; template <typename Dtype> class Im2col3DKernelTest : public ::testing::Test { protected: Im2col3DKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(std::vector<int>(blobShape_im2col,blobShape_im2col+5))), blob_kernel_shape_(new Blob<int>()), blob_stride_(new Blob<int>()), blob_pad_(new Blob<int>()), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); vector<int> dim_blob_shape(1, 3); blob_kernel_shape_->Reshape(dim_blob_shape); blob_stride_->Reshape(dim_blob_shape); blob_pad_->Reshape(dim_blob_shape); shape_col_.resize(3); shape_ = blob_bottom_->shape(); pad_ = 0; stride_ = 2; kernel_size_ = 3; for (int i = 0; i < 3; ++i) { blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; blob_stride_->mutable_cpu_data()[i] = stride_; blob_pad_->mutable_cpu_data()[i] = pad_; shape_col_[i] = (shape_[i+2] + 2 * pad_ - kernel_size_) / stride_ + 1; } } virtual ~Im2col3DKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; delete blob_kernel_shape_; delete blob_stride_; delete blob_pad_; } Blob<int>* const blob_kernel_shape_; Blob<int>* const blob_stride_; Blob<int>* const blob_pad_; Blob<Dtype>* const blob_bottom_; Blob<Dtype>* const blob_top_; Blob<Dtype>* const blob_top_cpu_; std::vector<int> shape_; std::vector<int> shape_col_; int pad_; int stride_; int kernel_size_; }; TYPED_TEST_CASE(Im2col3DKernelTest, TestDtypes); TYPED_TEST(Im2col3DKernelTest, TestGPU3D) { Caffe::set_mode(Caffe::GPU); std::vector<int> blob_shape(5,0); blob_shape[0] = this->shape_[0]; blob_shape[1] = this->shape_[1] * this->kernel_size_ * this->kernel_size_ * this->kernel_size_; blob_shape[2] = this->shape_col_[0]; blob_shape[3] = this->shape_col_[1]; blob_shape[4] = this->shape_col_[2]; // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(blob_shape); this->blob_top_cpu_->ReshapeLike(*this->blob_top_); const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); std::vector<int> offset_vector(5,0); // CPU Version for (int n = 0; n < this->blob_bottom_->shape(0); ++n) { offset_vector[0] = n; im2col_cpu(bottom_data_cpu + this->blob_bottom_->offset(offset_vector), 3, this->blob_bottom_->shape().data() + 1, this->blob_top_cpu_->shape().data() + 1, this->blob_kernel_shape_->cpu_data(), this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), top_data_cpu + this->blob_top_cpu_->offset(offset_vector)); } // GPU version int num_kernels = this->shape_[1]; for(int i=0; i<3; i++) { num_kernels *= this->shape_col_[i]; } int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->shape(0); ++n) { const int grid_dim = default_grid_dim / grid_div; TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) offset_vector[0] = n; hipLaunchKernelGGL(( im2col_gpu_kernel<TypeParam, 3>), dim3(grid_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data_gpu + this->blob_bottom_->offset(offset_vector), this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), this->blob_stride_->gpu_data(), top_data_gpu + this->blob_top_->offset(offset_vector)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = top_data_cpu[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } } // namespace caffe
a8d6a695f247a8661b252a6437502c8c91b92e8d.cu
#include <cstring> #include <vector> #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/util/im2col.hpp" #include "caffe/vision_layers.hpp" #include "caffe/test/test_caffe_main.hpp" namespace caffe { // Forward declare kernel functions template <typename Dtype, int num_axes> __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, Dtype* data_col); extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; template <typename Dtype> class Im2colKernelTest : public ::testing::Test { protected: Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(5, 500, 10, 10)), blob_kernel_shape_(new Blob<int>()), blob_stride_(new Blob<int>()), blob_pad_(new Blob<int>()), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); vector<int> dim_blob_shape(1, 2); blob_kernel_shape_->Reshape(dim_blob_shape); blob_stride_->Reshape(dim_blob_shape); blob_pad_->Reshape(dim_blob_shape); height_ = blob_bottom_->height(); width_ = blob_bottom_->width(); channels_ = blob_bottom_->channels(); pad_ = 0; stride_ = 2; kernel_size_ = 3; height_col_ = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1; for (int i = 0; i < 2; ++i) { blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; blob_stride_->mutable_cpu_data()[i] = stride_; blob_pad_->mutable_cpu_data()[i] = pad_; } } virtual ~Im2colKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; delete blob_kernel_shape_; delete blob_stride_; delete blob_pad_; } Blob<int>* const blob_kernel_shape_; Blob<int>* const blob_stride_; Blob<int>* const blob_pad_; Blob<Dtype>* const blob_bottom_; Blob<Dtype>* const blob_top_; Blob<Dtype>* const blob_top_cpu_; int height_; int width_; int channels_; int pad_; int stride_; int kernel_size_; int height_col_; int width_col_; }; TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, TestGPU) { Caffe::set_mode(Caffe::GPU); // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->ReshapeLike(*this->blob_top_); const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, this->blob_bottom_->shape().data() + 1, this->blob_top_cpu_->shape().data() + 1, this->blob_kernel_shape_->cpu_data(), this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), top_data_cpu + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { const int grid_dim = default_grid_dim / grid_div; TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel<TypeParam, 2><<<grid_dim, CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), this->blob_stride_->gpu_data(), top_data_gpu + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = top_data_cpu[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } int blobShape_im2col[5]={5, 500, 10, 10, 10}; template <typename Dtype> class Im2col3DKernelTest : public ::testing::Test { protected: Im2col3DKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(std::vector<int>(blobShape_im2col,blobShape_im2col+5))), blob_kernel_shape_(new Blob<int>()), blob_stride_(new Blob<int>()), blob_pad_(new Blob<int>()), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); vector<int> dim_blob_shape(1, 3); blob_kernel_shape_->Reshape(dim_blob_shape); blob_stride_->Reshape(dim_blob_shape); blob_pad_->Reshape(dim_blob_shape); shape_col_.resize(3); shape_ = blob_bottom_->shape(); pad_ = 0; stride_ = 2; kernel_size_ = 3; for (int i = 0; i < 3; ++i) { blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; blob_stride_->mutable_cpu_data()[i] = stride_; blob_pad_->mutable_cpu_data()[i] = pad_; shape_col_[i] = (shape_[i+2] + 2 * pad_ - kernel_size_) / stride_ + 1; } } virtual ~Im2col3DKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; delete blob_kernel_shape_; delete blob_stride_; delete blob_pad_; } Blob<int>* const blob_kernel_shape_; Blob<int>* const blob_stride_; Blob<int>* const blob_pad_; Blob<Dtype>* const blob_bottom_; Blob<Dtype>* const blob_top_; Blob<Dtype>* const blob_top_cpu_; std::vector<int> shape_; std::vector<int> shape_col_; int pad_; int stride_; int kernel_size_; }; TYPED_TEST_CASE(Im2col3DKernelTest, TestDtypes); TYPED_TEST(Im2col3DKernelTest, TestGPU3D) { Caffe::set_mode(Caffe::GPU); std::vector<int> blob_shape(5,0); blob_shape[0] = this->shape_[0]; blob_shape[1] = this->shape_[1] * this->kernel_size_ * this->kernel_size_ * this->kernel_size_; blob_shape[2] = this->shape_col_[0]; blob_shape[3] = this->shape_col_[1]; blob_shape[4] = this->shape_col_[2]; // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(blob_shape); this->blob_top_cpu_->ReshapeLike(*this->blob_top_); const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); std::vector<int> offset_vector(5,0); // CPU Version for (int n = 0; n < this->blob_bottom_->shape(0); ++n) { offset_vector[0] = n; im2col_cpu(bottom_data_cpu + this->blob_bottom_->offset(offset_vector), 3, this->blob_bottom_->shape().data() + 1, this->blob_top_cpu_->shape().data() + 1, this->blob_kernel_shape_->cpu_data(), this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), top_data_cpu + this->blob_top_cpu_->offset(offset_vector)); } // GPU version int num_kernels = this->shape_[1]; for(int i=0; i<3; i++) { num_kernels *= this->shape_col_[i]; } int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->shape(0); ++n) { const int grid_dim = default_grid_dim / grid_div; TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) offset_vector[0] = n; im2col_gpu_kernel<TypeParam, 3><<<grid_dim, CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data_gpu + this->blob_bottom_->offset(offset_vector), this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), this->blob_stride_->gpu_data(), top_data_gpu + this->blob_top_->offset(offset_vector)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = top_data_cpu[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } } // namespace caffe
5fb652fe141452d088f8c61101d216acda0901ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "legendre_rule.h" #include "matrixelements.h" #include <stdio.h> #include <vector> __device__ double me; __device__ double m2; __device__ double Gf; __device__ double g; __device__ int orderE1; __device__ int orderE2; __device__ int orderct1; __device__ int orderct2; __device__ int orderct3; __device__ int orderp2; __device__ int orderp3; __device__ double m; struct Settings { double me = 0.000511; double m2 = 0.01; double Gf = 0.0000116637; double g = 1.0; int orderE1 = 32; int orderE2 = 32; int orderct1 = 32; int orderct2 = 32; int orderct3 = 32; int orderp2 = 32; int orderp3 = 32; double m = 0.105660; int done = 0; string filename = " "; bool operator==(const Settings &a) const { return (m2 == a.m2 && done == a.done && orderE1 == a.orderE1 && orderE2 == a.orderE2 && orderct1 == a.orderct1 && orderct2 == a.orderct2 && orderct3 == a.orderct3 && orderp2 == a.orderp2 && orderp3 == a.orderp3); } } sett; // Calculate derivative appearing in the phasespace integral __device__ double Deriv(double E1, double E2, double ct1, double ct2, double ct3, double ph2, double ph3) { return 2 * (E1 + E2 - m - (ct2 * ct3 + cos(ph2 - ph3) * pow(1 - pow(ct2, 2), 0.5) * pow(1 - pow(ct3, 2), 0.5)) * pow(pow(E2, 2) - pow(m2, 2), 0.5) - pow(pow(E1, 2) - pow(me, 2), 0.5) * (ct1 * ct3 + pow(1 - pow(ct1, 2), 0.5) * pow(1 - pow(ct3, 2), 0.5) * sin(ph3))); } // Solve kinetic equation for E3 __device__ double CalcE3(double E1, double E2, double ct1, double ct2, double ct3, double ph2, double ph3) { return -(pow(E1 + E2 - m - (ct2 * ct3 + cos(ph2 - ph3) * pow(1 - pow(ct2, 2), 0.5) * pow(1 - pow(ct3, 2), 0.5)) * pow(pow(E2, 2) - pow(m2, 2), 0.5) - pow(pow(E1, 2) - pow(me, 2), 0.5) * (ct1 * ct3 + pow(1 - pow(ct1, 2), 0.5) * pow(1 - pow(ct3, 2), 0.5) * sin(ph3)), -1) * (2 * E1 * E2 - 2 * E1 * m - 2 * E2 * m + pow(m, 2) + pow(m2, 2) + pow(me, 2) - 2 * pow(pow(E2, 2) - pow(m2, 2), 0.5) * pow(pow(E1, 2) - pow(me, 2), 0.5) * (ct1 * ct2 + pow(1 - pow(ct1, 2), 0.5) * pow(1 - pow(ct2, 2), 0.5) * sin(ph2)))) / 2.; } // Implements a test for physicality, thus avoiding complicated region __device__ int isPhysical(double E1, double E2, double E3) { if (E1 < me || E2 < 0 || E3 < 0 || E1 + E2 + E3 > m) return 0; return 1; } // Calculate the integrals on the GPU __global__ void CalcRes(double *d_wE1, double *d_xE1, double *d_wE2, double *d_xE2, double *d_wct1, double *d_xct1, double *d_wct2, double *d_xct2, double *d_wct3, double *d_xct3, double *d_wp2, double *d_xp2, double *d_wp3, double *d_xp3, double *d_res) { int i = blockIdx.x * blockDim.x + threadIdx.x; double weight = 0.0; d_res[i] = 0.0; double E1, E2, E3, ct1, ct2, ct3, ph2, ph3; if (i < orderE1 * orderE2 * orderct1) { int iE1 = i % orderE1; int iCt1 = (i % (orderct1 * orderE1)) / orderE1; int iE2 = i / (orderE1 * orderct1); for (int iCt2 = 0; iCt2 < orderct2; iCt2++) { for (int iCt3 = 0; iCt3 < orderct3; iCt3++) { for (int ip2 = 0; ip2 < orderp2; ip2++) { for (int ip3 = 0; ip3 < orderp3; ip3++) { E1 = d_xE1[iE1]; E2 = d_xE2[iE2]; ct1 = d_xct1[iCt1]; ct2 = d_xct2[iCt2]; ct3 = d_xct3[iCt3]; ph2 = d_xp2[ip2]; ph3 = d_xp3[ip3]; E3 = CalcE3(E1, E2, ct1, ct2, ct3, ph2, ph3); if (isPhysical(E1, E2, E3)) { weight = pow(2.0 * M_PI, -7) / 8.0 * sqrt(E1 * E1 - me * me) * sqrt(E2 * E2 - m2 * m2) * E3 / abs(Deriv(E1, E2, ct1, ct2, ct3, ph2, ph3)) * d_wct2[iCt2] * d_wct3[iCt3] * d_wp2[ip2] * d_wp3[ip3]; d_res[i] += weight * MatrixElements::M2ScalarToLepton( E1, E2, E3, ct1, ct2, ct3, ph2, ph3); } } } } } } } // Carry out all integrals on the GPU, pull back results void IntegrateOnGPU(const char *Filename) { // Allocate memory for the quadrature nodes and results double *d_wE1, *d_xE1, *d_wE2, *d_xE2, *d_wct1, *d_xct1, *d_wct2, *d_xct2, *d_wct3, *d_xct3, *d_wp2, *d_xp2, *d_wp3, *d_xp3, *d_res; double *wE1 = (double *)malloc(sett.orderE1 * sizeof(double)); double *xE1 = (double *)malloc(sett.orderE1 * sizeof(double)); double *wE2 = (double *)malloc(sett.orderE2 * sizeof(double)); double *xE2 = (double *)malloc(sett.orderE2 * sizeof(double)); double *wct1 = (double *)malloc(sett.orderct1 * sizeof(double)); double *xct1 = (double *)malloc(sett.orderct1 * sizeof(double)); double *wct2 = (double *)malloc(sett.orderct2 * sizeof(double)); double *xct2 = (double *)malloc(sett.orderct2 * sizeof(double)); double *wct3 = (double *)malloc(sett.orderct3 * sizeof(double)); double *xct3 = (double *)malloc(sett.orderct3 * sizeof(double)); double *wp2 = (double *)malloc(sett.orderp2 * sizeof(double)); double *xp2 = (double *)malloc(sett.orderp2 * sizeof(double)); double *wp3 = (double *)malloc(sett.orderp3 * sizeof(double)); double *xp3 = (double *)malloc(sett.orderp3 * sizeof(double)); double *resVec = (double *)malloc(sett.orderE1 * sett.orderE2 * sett.orderct1 * sizeof(double)); // Allocate memory on the GPU for the nodes and results hipMalloc(&d_wE1, sett.orderE1 * sizeof(double)); hipMalloc(&d_xE1, sett.orderE1 * sizeof(double)); hipMalloc(&d_wE2, sett.orderE2 * sizeof(double)); hipMalloc(&d_xE2, sett.orderE2 * sizeof(double)); hipMalloc(&d_wct1, sett.orderct1 * sizeof(double)); hipMalloc(&d_xct1, sett.orderct1 * sizeof(double)); hipMalloc(&d_wct2, sett.orderct2 * sizeof(double)); hipMalloc(&d_xct2, sett.orderct2 * sizeof(double)); hipMalloc(&d_xct3, sett.orderct3 * sizeof(double)); hipMalloc(&d_wct3, sett.orderct3 * sizeof(double)); hipMalloc(&d_wp2, sett.orderp2 * sizeof(double)); hipMalloc(&d_xp2, sett.orderp2 * sizeof(double)); hipMalloc(&d_wp3, sett.orderp3 * sizeof(double)); hipMalloc(&d_xp3, sett.orderp3 * sizeof(double)); hipMalloc(&d_res, sett.orderE1 * sett.orderE2 * sett.orderct1 * sizeof(double)); // Calculate quadrature nodes cgqf(sett.orderct1, 1, 0, 0, -1.0, 1.0, xct1, wct1); cgqf(sett.orderct2, 1, 0, 0, -1.0, 1.0, xct2, wct2); cgqf(sett.orderct3, 1, 0, 0, -1.0, 1.0, xct3, wct3); cgqf(sett.orderp2, 1, 0, 0, 0.0, 2.0 * M_PI, xp2, wp2); cgqf(sett.orderp3, 1, 0, 0, 0.0, 2.0 * M_PI, xp3, wp3); cgqf(sett.orderE1, 1, 0, 0, sett.me, sett.m / 2, xE1, wE1); cgqf(sett.orderE2, 1, 0, 0, sett.m2, (sett.m + sett.m2) / 2.0, xE2, wE2); // Transfere to GPU hipMemcpy(d_wE1, wE1, sett.orderE1 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_xE1, xE1, sett.orderE1 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_wE2, wE2, sett.orderE2 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_xE2, xE2, sett.orderE2 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_wct1, wct1, sett.orderct1 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_xct1, xct1, sett.orderct1 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_wct2, wct2, sett.orderct2 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_xct2, xct2, sett.orderct2 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_wct3, wct3, sett.orderct3 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_xct3, xct3, sett.orderct3 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_wp2, wp2, sett.orderp2 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_xp2, xp2, sett.orderp2 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_wp3, wp3, sett.orderp3 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_xp3, xp3, sett.orderp3 * sizeof(double), hipMemcpyHostToDevice); // Break work into chunks and solve each on the GPU int N = sett.orderE1 * sett.orderE2 * sett.orderct1; hipLaunchKernelGGL(( CalcRes), dim3((N + 511) / 512), dim3(512), 0, 0, d_wE1, d_xE1, d_wE2, d_xE2, d_wct1, d_xct1, d_wct2, d_xct2, d_wct3, d_xct3, d_wp2, d_xp2, d_wp3, d_xp3, d_res); // Pull back results from the GPU hipMemcpy(resVec, d_res, N * sizeof(double), hipMemcpyDeviceToHost); // Calculate the total decay width to crosscheck with montecarlo estimates double res = 0.0; for (int iE1 = 0; iE1 < sett.orderE1; iE1++) { for (int iCt1 = 0; iCt1 < sett.orderct1; iCt1++) { for (int iE2 = 0; iE2 < sett.orderE2; iE2++) { res += resVec[iE1 + iE2 * sett.orderE1 * sett.orderct1 + iCt1 * sett.orderE1] * wE1[iE1] * wE2[iE2] * wct1[iCt1] / (2.0 * sett.m); } } } // Write the complete spectrum to a file FILE *pFile; pFile = fopen(Filename, "w"); fprintf(pFile, "###### %.12E %f %f %i %i %i " "%i %i %i %i\n", res, sett.m2, sett.m, sett.orderE1, sett.orderct1, sett.orderE2, sett.orderct2, sett.orderct3, sett.orderp2, sett.orderp3); for (int iE1 = 0; iE1 < sett.orderE1; iE1++) { for (int iCt1 = 0; iCt1 < sett.orderct1; iCt1++) { double buff = 0.0; for (int iE2 = 0; iE2 < sett.orderE2; iE2++) { buff += resVec[iE1 + iE2 * sett.orderE1 * sett.orderct1 + iCt1 * sett.orderE1] * wE1[iE1] * wE2[iE2] * wct1[iCt1]; } fprintf(pFile, "%.12E,%.12E,%.12E\n", xE1[iE1] * 2.0 / sett.m, xct1[iCt1], buff); } } fclose(pFile); printf("Total Width : %E\n Created File : %s", res, Filename); // Free allocated memory free(wE1); free(xE1); free(wE2); free(xE2); free(wct1); free(xct1); free(wct2); free(xct2); free(wct3); free(xct3); free(wp2); free(xp2); free(wp3); free(xp3); hipFree(d_wE1); hipFree(d_xE1); hipFree(d_wE2); hipFree(d_xE2); hipFree(d_wct1); hipFree(d_xct1); hipFree(d_wct2); hipFree(d_xct2); hipFree(d_wct3); hipFree(d_xct3); hipFree(d_wp2); hipFree(d_xp2); hipFree(d_wp3); hipFree(d_xp3); } // Estimate the runtime of the given choice of number of nodes void EstimateRuntime() { double i = (5580000.0 * sett.orderE1 * sett.orderE2 * sett.orderct1 * sett.orderct2 * sett.orderct3 * sett.orderp2 * sett.orderp3) / 34359738368; printf("%ih%im%is%ims\n", (int)(i / (60 * 60 * 1000)), (int)(i / (1000 * 60)) % 60, (int)(i / 1000) % 60, ((int)i) % 1000); } // Initialise fixed parameters on the device void InitParameters() { hipMemcpyToSymbol(m, &sett.m, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(me, &sett.me, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(m2, &sett.m2, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(Gf, &sett.Gf, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(g, &sett.g, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(orderE1, &sett.orderE1, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(orderE2, &sett.orderE2, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(orderct1, &sett.orderct1, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(orderct2, &sett.orderct2, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(orderct3, &sett.orderct3, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(orderp2, &sett.orderp2, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(orderp3, &sett.orderp3, sizeof(int), 0, hipMemcpyHostToDevice); } // Read space-seperated field of a line const char *getfield(char *line, int num) { const char *tok; for (tok = strtok(strdup(line), " "); tok && *tok; tok = strtok(NULL, " \n")) { if (!--num) return tok; } return NULL; } // Parse char* line to settings se void ReadSettings(char *line, struct Settings &se) { se.m2 = atof(getfield(line, 1)); se.orderE1 = atoi(getfield(line, 2)); se.orderE2 = atoi(getfield(line, 3)); se.orderct1 = atoi(getfield(line, 4)); se.orderct2 = atoi(getfield(line, 5)); se.orderct3 = atoi(getfield(line, 6)); se.orderp2 = atoi(getfield(line, 7)); se.orderp3 = atoi(getfield(line, 8)); se.done = atoi(getfield(line, 9)); if (se.done) se.filename = string(getfield(line, 10)); } // Read schedule from external file to avoid recompiling void ReadSchedule(vector<Settings> &listSett) { listSett.clear(); FILE *stream = fopen("schedule.txt", "r"); char line[1024]; while (fgets(line, 1024, stream)) { char *tmp = strdup(line); struct Settings se; ReadSettings(tmp, se); listSett.push_back(se); free(tmp); } fclose(stream); } // Write new schedule-status to file void WriteSchedule(vector<Settings> listSett) { FILE *stream = fopen("schedule.txt", "w"); for (int i = 0; i < static_cast<int>(listSett.size()); i++) { Settings curr = listSett[i]; if (curr.done) fprintf(stream, "%f %i %i %i %i %i %i " "%i %i %s\n", curr.m2, curr.orderE1, curr.orderE2, curr.orderct1, curr.orderct2, curr.orderct3, curr.orderp2, curr.orderp3, curr.done, curr.filename.c_str()); else fprintf( stream, "%f %i %i %i %i %i %i %i %i\n", curr.m2, curr.orderE1, curr.orderE2, curr.orderct1, curr.orderct2, curr.orderct3, curr.orderp2, curr.orderp3, curr.done); } fclose(stream); } // Check for new work and load settings int FetchNewWork(vector<Settings> &listSett) { ReadSchedule(listSett); for (int i = 0; i < static_cast<int>(listSett.size()); i++) { if (!listSett[i].done) { sett = listSett[i]; printf("\n Found new Work\n"); return 1; } } return 0; } // Runs integrations as long as new work is scheduled void RunScheduledJobs() { srand(time(NULL)); vector<Settings> listSett; while (FetchNewWork(listSett)) { InitParameters(); EstimateRuntime(); string filenam = "Results/Scalar/" + to_string(rand() % 100000) + ".csv"; IntegrateOnGPU(filenam.c_str()); ReadSchedule(listSett); for (int i = 0; i < static_cast<int>(listSett.size()); i++) { if (sett == listSett[i]) { listSett[i].done = 1; listSett[i].filename = filenam; break; } } WriteSchedule(listSett); } } // Serves as a quick test for debug-purposes void Quicktest() { sett.orderE1 = 10; sett.orderE2 = 10; sett.orderct1 = 10; sett.orderct2 = 10; sett.orderct3 = 10; sett.orderp2 = 10; sett.orderp3 = 10; sett.m2 = 0.01; EstimateRuntime(); InitParameters(); IntegrateOnGPU("/tmp/test.txt"); } int main(int argc, char *argv[]) { Quicktest(); return 0; }
5fb652fe141452d088f8c61101d216acda0901ef.cu
#include "legendre_rule.h" #include "matrixelements.h" #include <stdio.h> #include <vector> __device__ double me; __device__ double m2; __device__ double Gf; __device__ double g; __device__ int orderE1; __device__ int orderE2; __device__ int orderct1; __device__ int orderct2; __device__ int orderct3; __device__ int orderp2; __device__ int orderp3; __device__ double m; struct Settings { double me = 0.000511; double m2 = 0.01; double Gf = 0.0000116637; double g = 1.0; int orderE1 = 32; int orderE2 = 32; int orderct1 = 32; int orderct2 = 32; int orderct3 = 32; int orderp2 = 32; int orderp3 = 32; double m = 0.105660; int done = 0; string filename = " "; bool operator==(const Settings &a) const { return (m2 == a.m2 && done == a.done && orderE1 == a.orderE1 && orderE2 == a.orderE2 && orderct1 == a.orderct1 && orderct2 == a.orderct2 && orderct3 == a.orderct3 && orderp2 == a.orderp2 && orderp3 == a.orderp3); } } sett; // Calculate derivative appearing in the phasespace integral __device__ double Deriv(double E1, double E2, double ct1, double ct2, double ct3, double ph2, double ph3) { return 2 * (E1 + E2 - m - (ct2 * ct3 + cos(ph2 - ph3) * pow(1 - pow(ct2, 2), 0.5) * pow(1 - pow(ct3, 2), 0.5)) * pow(pow(E2, 2) - pow(m2, 2), 0.5) - pow(pow(E1, 2) - pow(me, 2), 0.5) * (ct1 * ct3 + pow(1 - pow(ct1, 2), 0.5) * pow(1 - pow(ct3, 2), 0.5) * sin(ph3))); } // Solve kinetic equation for E3 __device__ double CalcE3(double E1, double E2, double ct1, double ct2, double ct3, double ph2, double ph3) { return -(pow(E1 + E2 - m - (ct2 * ct3 + cos(ph2 - ph3) * pow(1 - pow(ct2, 2), 0.5) * pow(1 - pow(ct3, 2), 0.5)) * pow(pow(E2, 2) - pow(m2, 2), 0.5) - pow(pow(E1, 2) - pow(me, 2), 0.5) * (ct1 * ct3 + pow(1 - pow(ct1, 2), 0.5) * pow(1 - pow(ct3, 2), 0.5) * sin(ph3)), -1) * (2 * E1 * E2 - 2 * E1 * m - 2 * E2 * m + pow(m, 2) + pow(m2, 2) + pow(me, 2) - 2 * pow(pow(E2, 2) - pow(m2, 2), 0.5) * pow(pow(E1, 2) - pow(me, 2), 0.5) * (ct1 * ct2 + pow(1 - pow(ct1, 2), 0.5) * pow(1 - pow(ct2, 2), 0.5) * sin(ph2)))) / 2.; } // Implements a test for physicality, thus avoiding complicated region __device__ int isPhysical(double E1, double E2, double E3) { if (E1 < me || E2 < 0 || E3 < 0 || E1 + E2 + E3 > m) return 0; return 1; } // Calculate the integrals on the GPU __global__ void CalcRes(double *d_wE1, double *d_xE1, double *d_wE2, double *d_xE2, double *d_wct1, double *d_xct1, double *d_wct2, double *d_xct2, double *d_wct3, double *d_xct3, double *d_wp2, double *d_xp2, double *d_wp3, double *d_xp3, double *d_res) { int i = blockIdx.x * blockDim.x + threadIdx.x; double weight = 0.0; d_res[i] = 0.0; double E1, E2, E3, ct1, ct2, ct3, ph2, ph3; if (i < orderE1 * orderE2 * orderct1) { int iE1 = i % orderE1; int iCt1 = (i % (orderct1 * orderE1)) / orderE1; int iE2 = i / (orderE1 * orderct1); for (int iCt2 = 0; iCt2 < orderct2; iCt2++) { for (int iCt3 = 0; iCt3 < orderct3; iCt3++) { for (int ip2 = 0; ip2 < orderp2; ip2++) { for (int ip3 = 0; ip3 < orderp3; ip3++) { E1 = d_xE1[iE1]; E2 = d_xE2[iE2]; ct1 = d_xct1[iCt1]; ct2 = d_xct2[iCt2]; ct3 = d_xct3[iCt3]; ph2 = d_xp2[ip2]; ph3 = d_xp3[ip3]; E3 = CalcE3(E1, E2, ct1, ct2, ct3, ph2, ph3); if (isPhysical(E1, E2, E3)) { weight = pow(2.0 * M_PI, -7) / 8.0 * sqrt(E1 * E1 - me * me) * sqrt(E2 * E2 - m2 * m2) * E3 / abs(Deriv(E1, E2, ct1, ct2, ct3, ph2, ph3)) * d_wct2[iCt2] * d_wct3[iCt3] * d_wp2[ip2] * d_wp3[ip3]; d_res[i] += weight * MatrixElements::M2ScalarToLepton( E1, E2, E3, ct1, ct2, ct3, ph2, ph3); } } } } } } } // Carry out all integrals on the GPU, pull back results void IntegrateOnGPU(const char *Filename) { // Allocate memory for the quadrature nodes and results double *d_wE1, *d_xE1, *d_wE2, *d_xE2, *d_wct1, *d_xct1, *d_wct2, *d_xct2, *d_wct3, *d_xct3, *d_wp2, *d_xp2, *d_wp3, *d_xp3, *d_res; double *wE1 = (double *)malloc(sett.orderE1 * sizeof(double)); double *xE1 = (double *)malloc(sett.orderE1 * sizeof(double)); double *wE2 = (double *)malloc(sett.orderE2 * sizeof(double)); double *xE2 = (double *)malloc(sett.orderE2 * sizeof(double)); double *wct1 = (double *)malloc(sett.orderct1 * sizeof(double)); double *xct1 = (double *)malloc(sett.orderct1 * sizeof(double)); double *wct2 = (double *)malloc(sett.orderct2 * sizeof(double)); double *xct2 = (double *)malloc(sett.orderct2 * sizeof(double)); double *wct3 = (double *)malloc(sett.orderct3 * sizeof(double)); double *xct3 = (double *)malloc(sett.orderct3 * sizeof(double)); double *wp2 = (double *)malloc(sett.orderp2 * sizeof(double)); double *xp2 = (double *)malloc(sett.orderp2 * sizeof(double)); double *wp3 = (double *)malloc(sett.orderp3 * sizeof(double)); double *xp3 = (double *)malloc(sett.orderp3 * sizeof(double)); double *resVec = (double *)malloc(sett.orderE1 * sett.orderE2 * sett.orderct1 * sizeof(double)); // Allocate memory on the GPU for the nodes and results cudaMalloc(&d_wE1, sett.orderE1 * sizeof(double)); cudaMalloc(&d_xE1, sett.orderE1 * sizeof(double)); cudaMalloc(&d_wE2, sett.orderE2 * sizeof(double)); cudaMalloc(&d_xE2, sett.orderE2 * sizeof(double)); cudaMalloc(&d_wct1, sett.orderct1 * sizeof(double)); cudaMalloc(&d_xct1, sett.orderct1 * sizeof(double)); cudaMalloc(&d_wct2, sett.orderct2 * sizeof(double)); cudaMalloc(&d_xct2, sett.orderct2 * sizeof(double)); cudaMalloc(&d_xct3, sett.orderct3 * sizeof(double)); cudaMalloc(&d_wct3, sett.orderct3 * sizeof(double)); cudaMalloc(&d_wp2, sett.orderp2 * sizeof(double)); cudaMalloc(&d_xp2, sett.orderp2 * sizeof(double)); cudaMalloc(&d_wp3, sett.orderp3 * sizeof(double)); cudaMalloc(&d_xp3, sett.orderp3 * sizeof(double)); cudaMalloc(&d_res, sett.orderE1 * sett.orderE2 * sett.orderct1 * sizeof(double)); // Calculate quadrature nodes cgqf(sett.orderct1, 1, 0, 0, -1.0, 1.0, xct1, wct1); cgqf(sett.orderct2, 1, 0, 0, -1.0, 1.0, xct2, wct2); cgqf(sett.orderct3, 1, 0, 0, -1.0, 1.0, xct3, wct3); cgqf(sett.orderp2, 1, 0, 0, 0.0, 2.0 * M_PI, xp2, wp2); cgqf(sett.orderp3, 1, 0, 0, 0.0, 2.0 * M_PI, xp3, wp3); cgqf(sett.orderE1, 1, 0, 0, sett.me, sett.m / 2, xE1, wE1); cgqf(sett.orderE2, 1, 0, 0, sett.m2, (sett.m + sett.m2) / 2.0, xE2, wE2); // Transfere to GPU cudaMemcpy(d_wE1, wE1, sett.orderE1 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_xE1, xE1, sett.orderE1 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_wE2, wE2, sett.orderE2 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_xE2, xE2, sett.orderE2 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_wct1, wct1, sett.orderct1 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_xct1, xct1, sett.orderct1 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_wct2, wct2, sett.orderct2 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_xct2, xct2, sett.orderct2 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_wct3, wct3, sett.orderct3 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_xct3, xct3, sett.orderct3 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_wp2, wp2, sett.orderp2 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_xp2, xp2, sett.orderp2 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_wp3, wp3, sett.orderp3 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_xp3, xp3, sett.orderp3 * sizeof(double), cudaMemcpyHostToDevice); // Break work into chunks and solve each on the GPU int N = sett.orderE1 * sett.orderE2 * sett.orderct1; CalcRes<<<(N + 511) / 512, 512>>>(d_wE1, d_xE1, d_wE2, d_xE2, d_wct1, d_xct1, d_wct2, d_xct2, d_wct3, d_xct3, d_wp2, d_xp2, d_wp3, d_xp3, d_res); // Pull back results from the GPU cudaMemcpy(resVec, d_res, N * sizeof(double), cudaMemcpyDeviceToHost); // Calculate the total decay width to crosscheck with montecarlo estimates double res = 0.0; for (int iE1 = 0; iE1 < sett.orderE1; iE1++) { for (int iCt1 = 0; iCt1 < sett.orderct1; iCt1++) { for (int iE2 = 0; iE2 < sett.orderE2; iE2++) { res += resVec[iE1 + iE2 * sett.orderE1 * sett.orderct1 + iCt1 * sett.orderE1] * wE1[iE1] * wE2[iE2] * wct1[iCt1] / (2.0 * sett.m); } } } // Write the complete spectrum to a file FILE *pFile; pFile = fopen(Filename, "w"); fprintf(pFile, "###### %.12E %f %f %i %i %i " "%i %i %i %i\n", res, sett.m2, sett.m, sett.orderE1, sett.orderct1, sett.orderE2, sett.orderct2, sett.orderct3, sett.orderp2, sett.orderp3); for (int iE1 = 0; iE1 < sett.orderE1; iE1++) { for (int iCt1 = 0; iCt1 < sett.orderct1; iCt1++) { double buff = 0.0; for (int iE2 = 0; iE2 < sett.orderE2; iE2++) { buff += resVec[iE1 + iE2 * sett.orderE1 * sett.orderct1 + iCt1 * sett.orderE1] * wE1[iE1] * wE2[iE2] * wct1[iCt1]; } fprintf(pFile, "%.12E,%.12E,%.12E\n", xE1[iE1] * 2.0 / sett.m, xct1[iCt1], buff); } } fclose(pFile); printf("Total Width : %E\n Created File : %s", res, Filename); // Free allocated memory free(wE1); free(xE1); free(wE2); free(xE2); free(wct1); free(xct1); free(wct2); free(xct2); free(wct3); free(xct3); free(wp2); free(xp2); free(wp3); free(xp3); cudaFree(d_wE1); cudaFree(d_xE1); cudaFree(d_wE2); cudaFree(d_xE2); cudaFree(d_wct1); cudaFree(d_xct1); cudaFree(d_wct2); cudaFree(d_xct2); cudaFree(d_wct3); cudaFree(d_xct3); cudaFree(d_wp2); cudaFree(d_xp2); cudaFree(d_wp3); cudaFree(d_xp3); } // Estimate the runtime of the given choice of number of nodes void EstimateRuntime() { double i = (5580000.0 * sett.orderE1 * sett.orderE2 * sett.orderct1 * sett.orderct2 * sett.orderct3 * sett.orderp2 * sett.orderp3) / 34359738368; printf("%ih%im%is%ims\n", (int)(i / (60 * 60 * 1000)), (int)(i / (1000 * 60)) % 60, (int)(i / 1000) % 60, ((int)i) % 1000); } // Initialise fixed parameters on the device void InitParameters() { cudaMemcpyToSymbol(m, &sett.m, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(me, &sett.me, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(m2, &sett.m2, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Gf, &sett.Gf, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(g, &sett.g, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(orderE1, &sett.orderE1, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(orderE2, &sett.orderE2, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(orderct1, &sett.orderct1, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(orderct2, &sett.orderct2, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(orderct3, &sett.orderct3, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(orderp2, &sett.orderp2, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(orderp3, &sett.orderp3, sizeof(int), 0, cudaMemcpyHostToDevice); } // Read space-seperated field of a line const char *getfield(char *line, int num) { const char *tok; for (tok = strtok(strdup(line), " "); tok && *tok; tok = strtok(NULL, " \n")) { if (!--num) return tok; } return NULL; } // Parse char* line to settings se void ReadSettings(char *line, struct Settings &se) { se.m2 = atof(getfield(line, 1)); se.orderE1 = atoi(getfield(line, 2)); se.orderE2 = atoi(getfield(line, 3)); se.orderct1 = atoi(getfield(line, 4)); se.orderct2 = atoi(getfield(line, 5)); se.orderct3 = atoi(getfield(line, 6)); se.orderp2 = atoi(getfield(line, 7)); se.orderp3 = atoi(getfield(line, 8)); se.done = atoi(getfield(line, 9)); if (se.done) se.filename = string(getfield(line, 10)); } // Read schedule from external file to avoid recompiling void ReadSchedule(vector<Settings> &listSett) { listSett.clear(); FILE *stream = fopen("schedule.txt", "r"); char line[1024]; while (fgets(line, 1024, stream)) { char *tmp = strdup(line); struct Settings se; ReadSettings(tmp, se); listSett.push_back(se); free(tmp); } fclose(stream); } // Write new schedule-status to file void WriteSchedule(vector<Settings> listSett) { FILE *stream = fopen("schedule.txt", "w"); for (int i = 0; i < static_cast<int>(listSett.size()); i++) { Settings curr = listSett[i]; if (curr.done) fprintf(stream, "%f %i %i %i %i %i %i " "%i %i %s\n", curr.m2, curr.orderE1, curr.orderE2, curr.orderct1, curr.orderct2, curr.orderct3, curr.orderp2, curr.orderp3, curr.done, curr.filename.c_str()); else fprintf( stream, "%f %i %i %i %i %i %i %i %i\n", curr.m2, curr.orderE1, curr.orderE2, curr.orderct1, curr.orderct2, curr.orderct3, curr.orderp2, curr.orderp3, curr.done); } fclose(stream); } // Check for new work and load settings int FetchNewWork(vector<Settings> &listSett) { ReadSchedule(listSett); for (int i = 0; i < static_cast<int>(listSett.size()); i++) { if (!listSett[i].done) { sett = listSett[i]; printf("\n Found new Work\n"); return 1; } } return 0; } // Runs integrations as long as new work is scheduled void RunScheduledJobs() { srand(time(NULL)); vector<Settings> listSett; while (FetchNewWork(listSett)) { InitParameters(); EstimateRuntime(); string filenam = "Results/Scalar/" + to_string(rand() % 100000) + ".csv"; IntegrateOnGPU(filenam.c_str()); ReadSchedule(listSett); for (int i = 0; i < static_cast<int>(listSett.size()); i++) { if (sett == listSett[i]) { listSett[i].done = 1; listSett[i].filename = filenam; break; } } WriteSchedule(listSett); } } // Serves as a quick test for debug-purposes void Quicktest() { sett.orderE1 = 10; sett.orderE2 = 10; sett.orderct1 = 10; sett.orderct2 = 10; sett.orderct3 = 10; sett.orderp2 = 10; sett.orderp3 = 10; sett.m2 = 0.01; EstimateRuntime(); InitParameters(); IntegrateOnGPU("/tmp/test.txt"); } int main(int argc, char *argv[]) { Quicktest(); return 0; }
5c19d32665c6c4a0de246f31fe9d3612c6379729.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "calcularCRS.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *val = NULL; hipMalloc(&val, XSIZE*YSIZE); int *col_ind = NULL; hipMalloc(&col_ind, XSIZE*YSIZE); int *row_ptr = NULL; hipMalloc(&row_ptr, XSIZE*YSIZE); int *u = NULL; hipMalloc(&u, XSIZE*YSIZE); int *resultado = NULL; hipMalloc(&resultado, XSIZE*YSIZE); int l = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( calcularCRS), dim3(gridBlock),dim3(threadBlock), 0, 0, val,col_ind,row_ptr,u,resultado,l); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( calcularCRS), dim3(gridBlock),dim3(threadBlock), 0, 0, val,col_ind,row_ptr,u,resultado,l); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( calcularCRS), dim3(gridBlock),dim3(threadBlock), 0, 0, val,col_ind,row_ptr,u,resultado,l); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5c19d32665c6c4a0de246f31fe9d3612c6379729.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "calcularCRS.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *val = NULL; cudaMalloc(&val, XSIZE*YSIZE); int *col_ind = NULL; cudaMalloc(&col_ind, XSIZE*YSIZE); int *row_ptr = NULL; cudaMalloc(&row_ptr, XSIZE*YSIZE); int *u = NULL; cudaMalloc(&u, XSIZE*YSIZE); int *resultado = NULL; cudaMalloc(&resultado, XSIZE*YSIZE); int l = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); calcularCRS<<<gridBlock,threadBlock>>>(val,col_ind,row_ptr,u,resultado,l); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { calcularCRS<<<gridBlock,threadBlock>>>(val,col_ind,row_ptr,u,resultado,l); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { calcularCRS<<<gridBlock,threadBlock>>>(val,col_ind,row_ptr,u,resultado,l); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6017a35718116910d191d6be1b42111e8bd56274.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GSRBBricks.h" int GSRBBricksCode(brickd& phi, brickd& phi_new, brickd& rhs, brickd& alpha, brickd& beta_i, brickd& beta_j, brickd& beta_k, brickd& lambda, brick_list& blist) { for (long o = 0; o < blist.len; ++o) { long b = blist.dat[o]; long i, j, k; for (k = 0; k < phi.info->dim_z; ++k) { for (j = 0; j < phi.info->dim_y; ++j) { for (i = 0; i < phi.info->dim_x; ++i) { float helmholtz = alpha.elem(b,k,j,i) * phi.elem(b,k,j,i) - H2INV * ( beta_i.elem(b,k, j, i+1) * (phi.elem(b,k, j, i+1) - phi.elem(b,k, j, i )) - beta_i.elem(b,k, j, i ) * (phi.elem(b,k, j, i ) - phi.elem(b,k, j, i-1)) + beta_j.elem(b,k, j+1,i ) * (phi.elem(b,k, j+1,i ) - phi.elem(b,k, j, i )) - beta_j.elem(b,k, j, i ) * (phi.elem(b,k, j, i ) - phi.elem(b,k, j-1,i )) + beta_k.elem(b,k+1,j, i ) * (phi.elem(b,k+1,j, i ) - phi.elem(b,k, j, i )) - beta_k.elem(b,k, j, i ) * (phi.elem(b,k, j, i ) - phi.elem(b,k-1,j, i )) ); phi_new.elem(b,k,j,i) = phi.elem(b,k,j,i) - lambda.elem(b,k,j,i) * (helmholtz-rhs.elem(b,k,j,i)); } } } } return 1; } //long b = blist.dat[blockIdx.x]; // long lid = threadIdx.x & 31; __global__ void GSRBGenerated(struct ::brickd &phi, struct ::brickd &phi_new, struct ::brickd &rhs, struct ::brickd &alpha, struct ::brickd &beta_i, struct ::brickd &beta_j, struct ::brickd &beta_k, struct ::brickd &lambda, struct ::brick_list &blist) { long b = blist.dat[blockIdx.x]; long lid = threadIdx.x & 31; for (long o = 0; o < blist.len; ++o) { long b = blist.dat[o]; long i; long j; long k; { brick_info *binfo = phi_new.info; long b0 = binfo->adj[b][0]; long b1 = binfo->adj[b][1]; long b2 = binfo->adj[b][2]; long b3 = binfo->adj[b][3]; long b4 = binfo->adj[b][4]; long b5 = binfo->adj[b][5]; long b6 = binfo->adj[b][6]; long b7 = binfo->adj[b][7]; long b8 = binfo->adj[b][8]; long b9 = binfo->adj[b][9]; long b10 = binfo->adj[b][10]; long b11 = binfo->adj[b][11]; long b12 = binfo->adj[b][12]; long b13 = b; long b14 = binfo->adj[b][13]; long b15 = binfo->adj[b][14]; long b16 = binfo->adj[b][15]; long b17 = binfo->adj[b][16]; long b18 = binfo->adj[b][17]; long b19 = binfo->adj[b][18]; long b20 = binfo->adj[b][19]; long b21 = binfo->adj[b][20]; long b22 = binfo->adj[b][21]; long b23 = binfo->adj[b][22]; long b24 = binfo->adj[b][23]; long b25 = binfo->adj[b][24]; long b26 = binfo->adj[b][25]; bElem buf0[2]; bElem buf1[2]; bElem buf2[2]; bElem buf3[2]; bElem buf4[2]; bElem buf5[2]; bElem buf6[2]; bElem buf7[2]; bElem buf8[2]; bElem buf9[2]; { for (long iit = 0; iit < 2; ++iit) buf0[iit] = 0; for (long iit = 0; iit < 2; ++iit) buf1[iit] = 0; for (long iit = 0; iit < 2; ++iit) buf3[iit] = 0; for (long iit = 0; iit < 2; ++iit) buf4[iit] = 0; float phi_1_0_0_l; float phi_1_0_0_r; { { long irel2 = 0; for (long zit = 0; zit < 4; zit += 2) { long rel = irel2; { // Alignment 2, 4, 4 { float phi_1_0_0; { { phi_1_0_0_l = phi.dat[b12 * phi.step + 0 + rel * 32 + lid]; } { phi_1_0_0_r = phi.dat[b13 * phi.step + 0 + rel * 32 + lid]; } cu_shl(phi_1_0_0, phi_1_0_0_l, phi_1_0_0_r, 1, 4, lid & 3); } { buf1[rel + 0] -= phi_1_0_0; } } // Alignment 3, 4, 4 { float phi_1_0_0; { phi_1_0_0_l = phi_1_0_0_r; phi_1_0_0 = phi_1_0_0_l; } { buf0[rel + 0] -= phi_1_0_0; buf1[rel + 0] -= phi_1_0_0; } } // Alignment 4, 4, 4 { float phi_1_0_0; { { phi_1_0_0_r = phi.dat[b14 * phi.step + 0 + rel * 32 + lid]; } cu_shl(phi_1_0_0, phi_1_0_0_l, phi_1_0_0_r, 3, 4, lid & 3); } { buf0[rel + 0] -= phi_1_0_0; } } } irel2 += 1; } } { long irel2 = 0; for (long zit = 0; zit < 4; zit += 2) { long rel = irel2; { // Alignment 3, 3, 4 { float phi_1_0_0; { { float tbelow; tbelow = phi.dat[b10 * phi.step + 0 + rel * 32 + lid]; float tabove; tabove = phi.dat[b13 * phi.step + 0 + rel * 32 + lid]; cu_shl(phi_1_0_0_l, tbelow, tabove, 4, 16, lid & 15); } phi_1_0_0 = phi_1_0_0_l; } { buf4[rel + 0] -= phi_1_0_0; } } // Alignment 3, 4, 4 { float phi_1_0_0; { { phi_1_0_0_l = phi.dat[b13 * phi.step + 0 + rel * 32 + lid]; } phi_1_0_0 = phi_1_0_0_l; } { buf3[rel + 0] -= phi_1_0_0; buf4[rel + 0] -= phi_1_0_0; } } // Alignment 3, 5, 4 { float phi_1_0_0; { { float tbelow; tbelow = phi.dat[b13 * phi.step + 0 + rel * 32 + lid]; float tabove; tabove = phi.dat[b16 * phi.step + 0 + rel * 32 + lid]; cu_shl(phi_1_0_0_l, tbelow, tabove, 12, 16, lid & 15); } phi_1_0_0 = phi_1_0_0_l; } { buf3[rel + 0] -= phi_1_0_0; } } } irel2 += 1; } } } } { for (long iit = 0; iit < 2; ++iit) buf2[iit] = 0; for (long iit = 0; iit < 2; ++iit) buf6[iit] = 0; for (long iit = 0; iit < 2; ++iit) buf7[iit] = 0; float beta_i_1_0_0_l; float beta_i_1_0_0_r; float phi_0_0_1_l; float phi_0_0_1_r; { { long irel2 = 0; for (long zit = 0; zit < 4; zit += 2) { long rel = irel2; { // Alignment 3, 4, 4 { float beta_i_1_0_0; { { beta_i_1_0_0_l = beta_i.dat[b13 * beta_i.step + 0 + rel * 32 + lid]; } beta_i_1_0_0 = beta_i_1_0_0_l; } { buf2[rel + 0] -= beta_i_1_0_0 * buf1[0 + rel]; } } // Alignment 4, 4, 4 { float beta_i_1_0_0; { { beta_i_1_0_0_r = beta_i.dat[b14 * beta_i.step + 0 + rel * 32 + lid]; } cu_shl(beta_i_1_0_0, beta_i_1_0_0_l, beta_i_1_0_0_r, 3, 4, lid & 3); } { buf2[rel + 0] -= beta_i_1_0_0 * buf0[0 + rel]; } } } irel2 += 1; } } { { // Alignment 4, 4, 2 { float phi_0_0_1; { { { float tfront; tfront = phi.dat[b4 * phi.step + 32 + lid]; float tback; tback = phi.dat[b13 * phi.step + 0 + lid]; cu_shl(phi_0_0_1_l, tfront, tback, 16, 32, lid); } } phi_0_0_1 = phi_0_0_1_l; } { buf7[0] -= phi_0_0_1; } } // Alignment 4, 4, 3 { float phi_0_0_1; { { phi_0_0_1_l = phi.dat[b13 * phi.step + 0 + lid]; } phi_0_0_1 = phi_0_0_1_l; } { buf6[0] -= phi_0_0_1; buf7[0] -= phi_0_0_1; } } // Alignment 4, 4, 4 { float phi_0_0_1; { { { float tfront; tfront = phi.dat[b13 * phi.step + 0 + lid]; float tback; tback = phi.dat[b13 * phi.step + 32 + lid]; cu_shl(phi_0_0_1_l, tfront, tback, 16, 32, lid); } } phi_0_0_1 = phi_0_0_1_l; } { buf6[0] -= phi_0_0_1; buf7[1] -= phi_0_0_1; } } // Alignment 4, 4, 5 { float phi_0_0_1; { { phi_0_0_1_l = phi.dat[b13 * phi.step + 32 + lid]; } phi_0_0_1 = phi_0_0_1_l; } { buf6[1] -= phi_0_0_1; buf7[1] -= phi_0_0_1; } } // Alignment 4, 4, 6 { float phi_0_0_1; { { { float tfront; tfront = phi.dat[b13 * phi.step + 32 + lid]; float tback; tback = phi.dat[b22 * phi.step + 0 + lid]; cu_shl(phi_0_0_1_l, tfront, tback, 16, 32, lid); } } phi_0_0_1 = phi_0_0_1_l; } { buf6[1] -= phi_0_0_1; } } } } } } { for (long iit = 0; iit < 2; ++iit) buf5[iit] = 0; float beta_j_0_1_0_l; float beta_j_0_1_0_r; { { long irel2 = 0; for (long zit = 0; zit < 4; zit += 2) { long rel = irel2; { // Alignment 4, 3, 4 { float beta_j_0_1_0; { { beta_j_0_1_0_l = beta_j.dat[b13 * beta_j.step + 0 + rel * 32 + lid]; } beta_j_0_1_0 = beta_j_0_1_0_l; } { buf5[rel + 0] -= beta_j_0_1_0 * buf4[0 + rel]; } } // Alignment 4, 4, 4 { float beta_j_0_1_0; { { float tbelow; tbelow = beta_j.dat[b13 * beta_j.step + 0 + rel * 32 + lid]; float tabove; tabove = beta_j.dat[b16 * beta_j.step + 0 + rel * 32 + lid]; cu_shl(beta_j_0_1_0_l, tbelow, tabove, 12, 16, lid & 15); } beta_j_0_1_0 = beta_j_0_1_0_l; } { buf5[rel + 0] -= buf2[0 + rel] + beta_j_0_1_0 * buf3[0 + rel]; } } } irel2 += 1; } } } } { for (long iit = 0; iit < 2; ++iit) buf8[iit] = 0; float beta_k_0_0_1_l; float beta_k_0_0_1_r; { { { // Alignment 4, 4, 3 { float beta_k_0_0_1; { { beta_k_0_0_1_l = beta_k.dat[b13 * beta_k.step + 0 + lid]; } beta_k_0_0_1 = beta_k_0_0_1_l; } { buf8[0] -= beta_k_0_0_1 * buf7[0]; } } // Alignment 4, 4, 4 { float beta_k_0_0_1; { { { float tfront; tfront = beta_k.dat[b13 * beta_k.step + 0 + lid]; float tback; tback = beta_k.dat[b13 * beta_k.step + 32 + lid]; cu_shl(beta_k_0_0_1_l, tfront, tback, 16, 32, lid); } } beta_k_0_0_1 = beta_k_0_0_1_l; } { buf8[0] -= buf5[0] + beta_k_0_0_1 * buf6[0]; } } // Alignment 4, 4, 5 { float beta_k_0_0_1; { { beta_k_0_0_1_l = beta_k.dat[b13 * beta_k.step + 32 + lid]; } beta_k_0_0_1 = beta_k_0_0_1_l; } { buf8[1] -= beta_k_0_0_1 * buf7[1]; } } // Alignment 4, 4, 6 { float beta_k_0_0_1; { { { float tfront; tfront = beta_k.dat[b13 * beta_k.step + 32 + lid]; float tback; tback = beta_k.dat[b22 * beta_k.step + 0 + lid]; cu_shl(beta_k_0_0_1_l, tfront, tback, 16, 32, lid); } } beta_k_0_0_1 = beta_k_0_0_1_l; } { buf8[1] -= buf5[1] + beta_k_0_0_1 * buf6[1]; } } } } } } { for (long iit = 0; iit < 2; ++iit) buf9[iit] = 0; float alpha_0_0_0_l; float alpha_0_0_0_r; float lambda_0_0_0_l; float lambda_0_0_0_r; float phi_0_0_0_l; float phi_0_0_0_r; float rhs_0_0_0_l; float rhs_0_0_0_r; { { long irel2 = 0; for (long zit = 0; zit < 4; zit += 2) { long rel = irel2; { // Alignment 4, 4, 4 { float phi_0_0_0; { { phi_0_0_0_l = phi.dat[b13 * phi.step + 0 + rel * 32 + lid]; } phi_0_0_0 = phi_0_0_0_l; } float lambda_0_0_0; { { lambda_0_0_0_l = lambda.dat[b13 * lambda.step + 0 + rel * 32 + lid]; } lambda_0_0_0 = lambda_0_0_0_l; } float alpha_0_0_0; { { alpha_0_0_0_l = alpha.dat[b13 * alpha.step + 0 + rel * 32 + lid]; } alpha_0_0_0 = alpha_0_0_0_l; } float rhs_0_0_0; { { rhs_0_0_0_l = rhs.dat[b13 * rhs.step + 0 + rel * 32 + lid]; } rhs_0_0_0 = rhs_0_0_0_l; } { buf9[rel + 0] -= phi_0_0_0; buf9[rel + 0] -= lambda_0_0_0 * (alpha_0_0_0 * phi_0_0_0 - 0.800000012f * buf8[0 + rel] - rhs_0_0_0); } } } irel2 += 1; } } } } { bElem *stp = &phi_new.dat[phi_new.step * b]; for (long sti = 0; sti < 2; ++sti) stp[sti * 32 + lid] = buf9[sti]; } } } return; } int GSRBBricks(double* phi, double* phi_new, double* rhs, double* alpha, double* beta_i, double* beta_j, double* beta_k, double* lambda) { brick_info binfo(BDIM_Z, BDIM_Y, BDIM_X); // Create bricks according to the mapping brick_list blist = binfo.genList(n / BDIM_Z + 2, n / BDIM_Y + 2, n / BDIM_X + 2, RZ, RY, RX, BZ, BY, BX); // Need to convert double* to brickd* brickd bricks_phi (&binfo); brickd bricks_phi_new(&binfo); brickd bricks_rhs (&binfo); brickd bricks_alpha (&binfo); brickd bricks_beta_i (&binfo); brickd bricks_beta_j (&binfo); brickd bricks_beta_k (&binfo); brickd bricks_lambda (&binfo); bricks_phi.dat = (bElem*)phi; bricks_phi_new.dat = (bElem*)phi_new; bricks_rhs.dat = (bElem*)rhs; bricks_alpha.dat = (bElem*)alpha; bricks_beta_i.dat = (bElem*)beta_i; bricks_beta_j.dat = (bElem*)beta_j; bricks_beta_k.dat = (bElem*)beta_k; bricks_lambda.dat = (bElem*)lambda; printf("GSRBBricks Starting..\n"); auto t1 = std::chrono::high_resolution_clock::now(); for (int timestep = 0; timestep < 1; timestep++) { hipLaunchKernelGGL(( GSRBGenerated), dim3(blist.len), dim3(32), 0, 0, bricks_phi, bricks_phi_new, bricks_rhs, bricks_alpha, bricks_beta_i, bricks_beta_j, bricks_beta_k, bricks_lambda, blist); hipLaunchKernelGGL(( GSRBGenerated), dim3(blist.len), dim3(32), 0, 0, bricks_phi, bricks_phi_new, bricks_rhs, bricks_alpha, bricks_beta_i, bricks_beta_j, bricks_beta_k, bricks_lambda, blist); // Swap Phi and Phi_new } auto t2 = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> fp_ms = t2 - t1; std::cout << "Brick Time is " << fp_ms.count() << " milliseconds\n"; phi = (double*)bricks_phi.dat; phi_new = (double*)bricks_phi_new.dat; rhs = (double*)bricks_rhs.dat; alpha = (double*)bricks_alpha.dat; beta_i = (double*)bricks_beta_i.dat; beta_j = (double*)bricks_beta_j.dat; beta_k = (double*)bricks_beta_k.dat; lambda = (double*)bricks_lambda.dat; return 1; }
6017a35718116910d191d6be1b42111e8bd56274.cu
#include "GSRBBricks.h" int GSRBBricksCode(brickd& phi, brickd& phi_new, brickd& rhs, brickd& alpha, brickd& beta_i, brickd& beta_j, brickd& beta_k, brickd& lambda, brick_list& blist) { for (long o = 0; o < blist.len; ++o) { long b = blist.dat[o]; long i, j, k; for (k = 0; k < phi.info->dim_z; ++k) { for (j = 0; j < phi.info->dim_y; ++j) { for (i = 0; i < phi.info->dim_x; ++i) { float helmholtz = alpha.elem(b,k,j,i) * phi.elem(b,k,j,i) - H2INV * ( beta_i.elem(b,k, j, i+1) * (phi.elem(b,k, j, i+1) - phi.elem(b,k, j, i )) - beta_i.elem(b,k, j, i ) * (phi.elem(b,k, j, i ) - phi.elem(b,k, j, i-1)) + beta_j.elem(b,k, j+1,i ) * (phi.elem(b,k, j+1,i ) - phi.elem(b,k, j, i )) - beta_j.elem(b,k, j, i ) * (phi.elem(b,k, j, i ) - phi.elem(b,k, j-1,i )) + beta_k.elem(b,k+1,j, i ) * (phi.elem(b,k+1,j, i ) - phi.elem(b,k, j, i )) - beta_k.elem(b,k, j, i ) * (phi.elem(b,k, j, i ) - phi.elem(b,k-1,j, i )) ); phi_new.elem(b,k,j,i) = phi.elem(b,k,j,i) - lambda.elem(b,k,j,i) * (helmholtz-rhs.elem(b,k,j,i)); } } } } return 1; } //long b = blist.dat[blockIdx.x]; // long lid = threadIdx.x & 31; __global__ void GSRBGenerated(struct ::brickd &phi, struct ::brickd &phi_new, struct ::brickd &rhs, struct ::brickd &alpha, struct ::brickd &beta_i, struct ::brickd &beta_j, struct ::brickd &beta_k, struct ::brickd &lambda, struct ::brick_list &blist) { long b = blist.dat[blockIdx.x]; long lid = threadIdx.x & 31; for (long o = 0; o < blist.len; ++o) { long b = blist.dat[o]; long i; long j; long k; { brick_info *binfo = phi_new.info; long b0 = binfo->adj[b][0]; long b1 = binfo->adj[b][1]; long b2 = binfo->adj[b][2]; long b3 = binfo->adj[b][3]; long b4 = binfo->adj[b][4]; long b5 = binfo->adj[b][5]; long b6 = binfo->adj[b][6]; long b7 = binfo->adj[b][7]; long b8 = binfo->adj[b][8]; long b9 = binfo->adj[b][9]; long b10 = binfo->adj[b][10]; long b11 = binfo->adj[b][11]; long b12 = binfo->adj[b][12]; long b13 = b; long b14 = binfo->adj[b][13]; long b15 = binfo->adj[b][14]; long b16 = binfo->adj[b][15]; long b17 = binfo->adj[b][16]; long b18 = binfo->adj[b][17]; long b19 = binfo->adj[b][18]; long b20 = binfo->adj[b][19]; long b21 = binfo->adj[b][20]; long b22 = binfo->adj[b][21]; long b23 = binfo->adj[b][22]; long b24 = binfo->adj[b][23]; long b25 = binfo->adj[b][24]; long b26 = binfo->adj[b][25]; bElem buf0[2]; bElem buf1[2]; bElem buf2[2]; bElem buf3[2]; bElem buf4[2]; bElem buf5[2]; bElem buf6[2]; bElem buf7[2]; bElem buf8[2]; bElem buf9[2]; { for (long iit = 0; iit < 2; ++iit) buf0[iit] = 0; for (long iit = 0; iit < 2; ++iit) buf1[iit] = 0; for (long iit = 0; iit < 2; ++iit) buf3[iit] = 0; for (long iit = 0; iit < 2; ++iit) buf4[iit] = 0; float phi_1_0_0_l; float phi_1_0_0_r; { { long irel2 = 0; for (long zit = 0; zit < 4; zit += 2) { long rel = irel2; { // Alignment 2, 4, 4 { float phi_1_0_0; { { phi_1_0_0_l = phi.dat[b12 * phi.step + 0 + rel * 32 + lid]; } { phi_1_0_0_r = phi.dat[b13 * phi.step + 0 + rel * 32 + lid]; } cu_shl(phi_1_0_0, phi_1_0_0_l, phi_1_0_0_r, 1, 4, lid & 3); } { buf1[rel + 0] -= phi_1_0_0; } } // Alignment 3, 4, 4 { float phi_1_0_0; { phi_1_0_0_l = phi_1_0_0_r; phi_1_0_0 = phi_1_0_0_l; } { buf0[rel + 0] -= phi_1_0_0; buf1[rel + 0] -= phi_1_0_0; } } // Alignment 4, 4, 4 { float phi_1_0_0; { { phi_1_0_0_r = phi.dat[b14 * phi.step + 0 + rel * 32 + lid]; } cu_shl(phi_1_0_0, phi_1_0_0_l, phi_1_0_0_r, 3, 4, lid & 3); } { buf0[rel + 0] -= phi_1_0_0; } } } irel2 += 1; } } { long irel2 = 0; for (long zit = 0; zit < 4; zit += 2) { long rel = irel2; { // Alignment 3, 3, 4 { float phi_1_0_0; { { float tbelow; tbelow = phi.dat[b10 * phi.step + 0 + rel * 32 + lid]; float tabove; tabove = phi.dat[b13 * phi.step + 0 + rel * 32 + lid]; cu_shl(phi_1_0_0_l, tbelow, tabove, 4, 16, lid & 15); } phi_1_0_0 = phi_1_0_0_l; } { buf4[rel + 0] -= phi_1_0_0; } } // Alignment 3, 4, 4 { float phi_1_0_0; { { phi_1_0_0_l = phi.dat[b13 * phi.step + 0 + rel * 32 + lid]; } phi_1_0_0 = phi_1_0_0_l; } { buf3[rel + 0] -= phi_1_0_0; buf4[rel + 0] -= phi_1_0_0; } } // Alignment 3, 5, 4 { float phi_1_0_0; { { float tbelow; tbelow = phi.dat[b13 * phi.step + 0 + rel * 32 + lid]; float tabove; tabove = phi.dat[b16 * phi.step + 0 + rel * 32 + lid]; cu_shl(phi_1_0_0_l, tbelow, tabove, 12, 16, lid & 15); } phi_1_0_0 = phi_1_0_0_l; } { buf3[rel + 0] -= phi_1_0_0; } } } irel2 += 1; } } } } { for (long iit = 0; iit < 2; ++iit) buf2[iit] = 0; for (long iit = 0; iit < 2; ++iit) buf6[iit] = 0; for (long iit = 0; iit < 2; ++iit) buf7[iit] = 0; float beta_i_1_0_0_l; float beta_i_1_0_0_r; float phi_0_0_1_l; float phi_0_0_1_r; { { long irel2 = 0; for (long zit = 0; zit < 4; zit += 2) { long rel = irel2; { // Alignment 3, 4, 4 { float beta_i_1_0_0; { { beta_i_1_0_0_l = beta_i.dat[b13 * beta_i.step + 0 + rel * 32 + lid]; } beta_i_1_0_0 = beta_i_1_0_0_l; } { buf2[rel + 0] -= beta_i_1_0_0 * buf1[0 + rel]; } } // Alignment 4, 4, 4 { float beta_i_1_0_0; { { beta_i_1_0_0_r = beta_i.dat[b14 * beta_i.step + 0 + rel * 32 + lid]; } cu_shl(beta_i_1_0_0, beta_i_1_0_0_l, beta_i_1_0_0_r, 3, 4, lid & 3); } { buf2[rel + 0] -= beta_i_1_0_0 * buf0[0 + rel]; } } } irel2 += 1; } } { { // Alignment 4, 4, 2 { float phi_0_0_1; { { { float tfront; tfront = phi.dat[b4 * phi.step + 32 + lid]; float tback; tback = phi.dat[b13 * phi.step + 0 + lid]; cu_shl(phi_0_0_1_l, tfront, tback, 16, 32, lid); } } phi_0_0_1 = phi_0_0_1_l; } { buf7[0] -= phi_0_0_1; } } // Alignment 4, 4, 3 { float phi_0_0_1; { { phi_0_0_1_l = phi.dat[b13 * phi.step + 0 + lid]; } phi_0_0_1 = phi_0_0_1_l; } { buf6[0] -= phi_0_0_1; buf7[0] -= phi_0_0_1; } } // Alignment 4, 4, 4 { float phi_0_0_1; { { { float tfront; tfront = phi.dat[b13 * phi.step + 0 + lid]; float tback; tback = phi.dat[b13 * phi.step + 32 + lid]; cu_shl(phi_0_0_1_l, tfront, tback, 16, 32, lid); } } phi_0_0_1 = phi_0_0_1_l; } { buf6[0] -= phi_0_0_1; buf7[1] -= phi_0_0_1; } } // Alignment 4, 4, 5 { float phi_0_0_1; { { phi_0_0_1_l = phi.dat[b13 * phi.step + 32 + lid]; } phi_0_0_1 = phi_0_0_1_l; } { buf6[1] -= phi_0_0_1; buf7[1] -= phi_0_0_1; } } // Alignment 4, 4, 6 { float phi_0_0_1; { { { float tfront; tfront = phi.dat[b13 * phi.step + 32 + lid]; float tback; tback = phi.dat[b22 * phi.step + 0 + lid]; cu_shl(phi_0_0_1_l, tfront, tback, 16, 32, lid); } } phi_0_0_1 = phi_0_0_1_l; } { buf6[1] -= phi_0_0_1; } } } } } } { for (long iit = 0; iit < 2; ++iit) buf5[iit] = 0; float beta_j_0_1_0_l; float beta_j_0_1_0_r; { { long irel2 = 0; for (long zit = 0; zit < 4; zit += 2) { long rel = irel2; { // Alignment 4, 3, 4 { float beta_j_0_1_0; { { beta_j_0_1_0_l = beta_j.dat[b13 * beta_j.step + 0 + rel * 32 + lid]; } beta_j_0_1_0 = beta_j_0_1_0_l; } { buf5[rel + 0] -= beta_j_0_1_0 * buf4[0 + rel]; } } // Alignment 4, 4, 4 { float beta_j_0_1_0; { { float tbelow; tbelow = beta_j.dat[b13 * beta_j.step + 0 + rel * 32 + lid]; float tabove; tabove = beta_j.dat[b16 * beta_j.step + 0 + rel * 32 + lid]; cu_shl(beta_j_0_1_0_l, tbelow, tabove, 12, 16, lid & 15); } beta_j_0_1_0 = beta_j_0_1_0_l; } { buf5[rel + 0] -= buf2[0 + rel] + beta_j_0_1_0 * buf3[0 + rel]; } } } irel2 += 1; } } } } { for (long iit = 0; iit < 2; ++iit) buf8[iit] = 0; float beta_k_0_0_1_l; float beta_k_0_0_1_r; { { { // Alignment 4, 4, 3 { float beta_k_0_0_1; { { beta_k_0_0_1_l = beta_k.dat[b13 * beta_k.step + 0 + lid]; } beta_k_0_0_1 = beta_k_0_0_1_l; } { buf8[0] -= beta_k_0_0_1 * buf7[0]; } } // Alignment 4, 4, 4 { float beta_k_0_0_1; { { { float tfront; tfront = beta_k.dat[b13 * beta_k.step + 0 + lid]; float tback; tback = beta_k.dat[b13 * beta_k.step + 32 + lid]; cu_shl(beta_k_0_0_1_l, tfront, tback, 16, 32, lid); } } beta_k_0_0_1 = beta_k_0_0_1_l; } { buf8[0] -= buf5[0] + beta_k_0_0_1 * buf6[0]; } } // Alignment 4, 4, 5 { float beta_k_0_0_1; { { beta_k_0_0_1_l = beta_k.dat[b13 * beta_k.step + 32 + lid]; } beta_k_0_0_1 = beta_k_0_0_1_l; } { buf8[1] -= beta_k_0_0_1 * buf7[1]; } } // Alignment 4, 4, 6 { float beta_k_0_0_1; { { { float tfront; tfront = beta_k.dat[b13 * beta_k.step + 32 + lid]; float tback; tback = beta_k.dat[b22 * beta_k.step + 0 + lid]; cu_shl(beta_k_0_0_1_l, tfront, tback, 16, 32, lid); } } beta_k_0_0_1 = beta_k_0_0_1_l; } { buf8[1] -= buf5[1] + beta_k_0_0_1 * buf6[1]; } } } } } } { for (long iit = 0; iit < 2; ++iit) buf9[iit] = 0; float alpha_0_0_0_l; float alpha_0_0_0_r; float lambda_0_0_0_l; float lambda_0_0_0_r; float phi_0_0_0_l; float phi_0_0_0_r; float rhs_0_0_0_l; float rhs_0_0_0_r; { { long irel2 = 0; for (long zit = 0; zit < 4; zit += 2) { long rel = irel2; { // Alignment 4, 4, 4 { float phi_0_0_0; { { phi_0_0_0_l = phi.dat[b13 * phi.step + 0 + rel * 32 + lid]; } phi_0_0_0 = phi_0_0_0_l; } float lambda_0_0_0; { { lambda_0_0_0_l = lambda.dat[b13 * lambda.step + 0 + rel * 32 + lid]; } lambda_0_0_0 = lambda_0_0_0_l; } float alpha_0_0_0; { { alpha_0_0_0_l = alpha.dat[b13 * alpha.step + 0 + rel * 32 + lid]; } alpha_0_0_0 = alpha_0_0_0_l; } float rhs_0_0_0; { { rhs_0_0_0_l = rhs.dat[b13 * rhs.step + 0 + rel * 32 + lid]; } rhs_0_0_0 = rhs_0_0_0_l; } { buf9[rel + 0] -= phi_0_0_0; buf9[rel + 0] -= lambda_0_0_0 * (alpha_0_0_0 * phi_0_0_0 - 0.800000012f * buf8[0 + rel] - rhs_0_0_0); } } } irel2 += 1; } } } } { bElem *stp = &phi_new.dat[phi_new.step * b]; for (long sti = 0; sti < 2; ++sti) stp[sti * 32 + lid] = buf9[sti]; } } } return; } int GSRBBricks(double* phi, double* phi_new, double* rhs, double* alpha, double* beta_i, double* beta_j, double* beta_k, double* lambda) { brick_info binfo(BDIM_Z, BDIM_Y, BDIM_X); // Create bricks according to the mapping brick_list blist = binfo.genList(n / BDIM_Z + 2, n / BDIM_Y + 2, n / BDIM_X + 2, RZ, RY, RX, BZ, BY, BX); // Need to convert double* to brickd* brickd bricks_phi (&binfo); brickd bricks_phi_new(&binfo); brickd bricks_rhs (&binfo); brickd bricks_alpha (&binfo); brickd bricks_beta_i (&binfo); brickd bricks_beta_j (&binfo); brickd bricks_beta_k (&binfo); brickd bricks_lambda (&binfo); bricks_phi.dat = (bElem*)phi; bricks_phi_new.dat = (bElem*)phi_new; bricks_rhs.dat = (bElem*)rhs; bricks_alpha.dat = (bElem*)alpha; bricks_beta_i.dat = (bElem*)beta_i; bricks_beta_j.dat = (bElem*)beta_j; bricks_beta_k.dat = (bElem*)beta_k; bricks_lambda.dat = (bElem*)lambda; printf("GSRBBricks Starting..\n"); auto t1 = std::chrono::high_resolution_clock::now(); for (int timestep = 0; timestep < 1; timestep++) { GSRBGenerated<<<blist.len, 32>>>(bricks_phi, bricks_phi_new, bricks_rhs, bricks_alpha, bricks_beta_i, bricks_beta_j, bricks_beta_k, bricks_lambda, blist); GSRBGenerated<<<blist.len, 32>>>(bricks_phi, bricks_phi_new, bricks_rhs, bricks_alpha, bricks_beta_i, bricks_beta_j, bricks_beta_k, bricks_lambda, blist); // Swap Phi and Phi_new } auto t2 = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> fp_ms = t2 - t1; std::cout << "Brick Time is " << fp_ms.count() << " milliseconds\n"; phi = (double*)bricks_phi.dat; phi_new = (double*)bricks_phi_new.dat; rhs = (double*)bricks_rhs.dat; alpha = (double*)bricks_alpha.dat; beta_i = (double*)bricks_beta_i.dat; beta_j = (double*)bricks_beta_j.dat; beta_k = (double*)bricks_beta_k.dat; lambda = (double*)bricks_lambda.dat; return 1; }
d97cba6937654298230bdea917544bbdded6c406.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zlarfgx-v2.cu normal z -> c, Fri Jul 18 17:34:12 2014 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define PRECISION_c __global__ void magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv, magmaFloatComplex *c, magmaFloatComplex *dwork, magmaFloatComplex *tau); __global__ void magma_ctrmv_kernel(const magmaFloatComplex *T, int ldt, magmaFloatComplex *v); __global__ void magma_ctrmv_kernel2(const magmaFloatComplex *T, int ldt, magmaFloatComplex *v, magmaFloatComplex *y, magmaFloatComplex *tau); //============================================================================== __global__ void magma_clarfgx_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx, magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex *dA, int it) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ magmaFloatComplex scale; __shared__ float xnorm; magmaFloatComplex dxi; if ( j < n-1 ) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; if ( xnorm == 0 || n == 1) { *dtau = MAGMA_C_ZERO; *dA = *dx0; } else { #if (defined(PRECISION_s) || defined(PRECISION_d)) float alpha = *dx0; // no need to compute the norm as it is passed as input float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j==0){ *dtau = (beta - alpha) / beta; //*dx0 = 1.; *dA = beta; } scale = 1. / (alpha - beta); #else magmaFloatComplex alpha = *dx0; float alphar = MAGMA_C_REAL(alpha), alphai = MAGMA_C_IMAG(alpha); // no need to compute the norm as it is passed as input float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j==0){ *dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta); //*dx0 = MAGMA_C_MAKE( 1., 0.); *dA = MAGMA_C_MAKE(beta, 0.); } alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha)); scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_C_MUL(dxi, scale); if (j<it){ *( dA-it+j) = *(dx0-it+j); *(dx0-it+j) = MAGMA_C_MAKE(0., 0.); } } //============================================================================== /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's clarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). */ extern "C" void magma_clarfgx_gpu(magma_int_t n, magmaFloatComplex *dx0, magmaFloatComplex *dx, magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex *dA, magma_int_t it) { dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_clarfgx_gpu_kernel), dim3(blocks), dim3(threads), 0, magma_stream , n, dx0, dx, dtau, dxnorm, dA, it); } //============================================================================== /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's clarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). */ extern "C" void magma_clarfgtx_gpu(magma_int_t n, magmaFloatComplex *dx0, magmaFloatComplex *dx, magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex *dA, magma_int_t i, magmaFloatComplex *V, magma_int_t ldv, magmaFloatComplex *T, magma_int_t ldt, magmaFloatComplex *work) { /* Generate the elementary reflector H(i) */ magma_clarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, i); if (i==0) { magmaFloatComplex tt = MAGMA_C_ONE; magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, T+i+i*ldt, 1); magma_csetmatrix(1,1, &tt,1, dx0,1); } else { /* Compute the i-th column of T */ hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(i), dim3(BLOCK_SIZE), 0, magma_stream , n, V, ldv, dx0, work, dtau); hipLaunchKernelGGL(( magma_ctrmv_kernel2), dim3(i), dim3(i), 0, magma_stream , T, ldt, work, T+i*ldt, dtau); } } //==============================================================================
d97cba6937654298230bdea917544bbdded6c406.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zlarfgx-v2.cu normal z -> c, Fri Jul 18 17:34:12 2014 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define PRECISION_c __global__ void magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv, magmaFloatComplex *c, magmaFloatComplex *dwork, magmaFloatComplex *tau); __global__ void magma_ctrmv_kernel(const magmaFloatComplex *T, int ldt, magmaFloatComplex *v); __global__ void magma_ctrmv_kernel2(const magmaFloatComplex *T, int ldt, magmaFloatComplex *v, magmaFloatComplex *y, magmaFloatComplex *tau); //============================================================================== __global__ void magma_clarfgx_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx, magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex *dA, int it) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ magmaFloatComplex scale; __shared__ float xnorm; magmaFloatComplex dxi; if ( j < n-1 ) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; if ( xnorm == 0 || n == 1) { *dtau = MAGMA_C_ZERO; *dA = *dx0; } else { #if (defined(PRECISION_s) || defined(PRECISION_d)) float alpha = *dx0; // no need to compute the norm as it is passed as input float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j==0){ *dtau = (beta - alpha) / beta; //*dx0 = 1.; *dA = beta; } scale = 1. / (alpha - beta); #else magmaFloatComplex alpha = *dx0; float alphar = MAGMA_C_REAL(alpha), alphai = MAGMA_C_IMAG(alpha); // no need to compute the norm as it is passed as input float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j==0){ *dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta); //*dx0 = MAGMA_C_MAKE( 1., 0.); *dA = MAGMA_C_MAKE(beta, 0.); } alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha)); scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_C_MUL(dxi, scale); if (j<it){ *( dA-it+j) = *(dx0-it+j); *(dx0-it+j) = MAGMA_C_MAKE(0., 0.); } } //============================================================================== /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = ±norm( [dx0, dx] ) = ±dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's clarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). */ extern "C" void magma_clarfgx_gpu(magma_int_t n, magmaFloatComplex *dx0, magmaFloatComplex *dx, magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex *dA, magma_int_t it) { dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE); dim3 threads( BLOCK_SIZE ); magma_clarfgx_gpu_kernel<<< blocks, threads, 0, magma_stream >>>( n, dx0, dx, dtau, dxnorm, dA, it); } //============================================================================== /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = ±norm( [dx0, dx] ) = ±dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's clarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). */ extern "C" void magma_clarfgtx_gpu(magma_int_t n, magmaFloatComplex *dx0, magmaFloatComplex *dx, magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex *dA, magma_int_t i, magmaFloatComplex *V, magma_int_t ldv, magmaFloatComplex *T, magma_int_t ldt, magmaFloatComplex *work) { /* Generate the elementary reflector H(i) */ magma_clarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, i); if (i==0) { magmaFloatComplex tt = MAGMA_C_ONE; magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, T+i+i*ldt, 1); magma_csetmatrix(1,1, &tt,1, dx0,1); } else { /* Compute the i-th column of T */ magma_cgemv_kernel3<<< i, BLOCK_SIZE, 0, magma_stream >>>(n, V, ldv, dx0, work, dtau); magma_ctrmv_kernel2<<< i, i, 0, magma_stream >>>( T, ldt, work, T+i*ldt, dtau); } } //==============================================================================
c0587aceb668a4beefb4cbc9a55e048faceba2fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2016, National University of Defense Technology // Authors: Xuhao Chen <cxh@illinois.edu> #define BFS_VARIANT "topo_lb" #include "bfs.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include <hipcub/hipcub.hpp> #include "timer.h" typedef hipcub::BlockScan<int, BLOCK_SIZE> BlockScan; __device__ __forceinline__ void process_edge(int depth, int edge, int *column_indices, DistT *dist, bool *changed) { int dst = column_indices[edge]; //assert(dst < m); if (dist[dst] > depth) { //if(dist[dst] == MYINFINITY) { dist[dst] = depth; *changed = true; } } __device__ void expandByCta(int m, int *row_offsets, int *column_indices, DistT *dist, bool *visited, bool *expanded, int depth, bool *changed) { int id = blockIdx.x * blockDim.x + threadIdx.x; int vertex = id; __shared__ int owner; __shared__ int sh_vertex; owner = -1; int size = 0; if(vertex < m && visited[vertex] && !expanded[vertex]) { size = row_offsets[vertex + 1] - row_offsets[vertex]; } while(true) { if(size > BLOCK_SIZE) owner = threadIdx.x; __syncthreads(); if(owner == -1) break; __syncthreads(); if(owner == threadIdx.x) { sh_vertex = vertex; expanded[id] = true; owner = -1; size = 0; } __syncthreads(); int row_begin = row_offsets[sh_vertex]; int row_end = row_offsets[sh_vertex + 1]; int neighbor_size = row_end - row_begin; int num = ((neighbor_size + blockDim.x - 1) / blockDim.x) * blockDim.x; for(int i = threadIdx.x; i < num; i += blockDim.x) { int edge = row_begin + i; if(i < neighbor_size) { process_edge(depth, edge, column_indices, dist, changed); } } } } __device__ __forceinline__ unsigned LaneId() { unsigned ret; asm("mov.u32 %0, %laneid;" : "=r"(ret)); return ret; } __device__ __forceinline__ void expandByWarp(int m, int *row_offsets, int *column_indices, DistT *dist, bool *visited, bool *expanded, int depth, bool *changed) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; unsigned warp_id = threadIdx.x >> LOG_WARP_SIZE; unsigned lane_id = LaneId(); __shared__ int owner[NUM_WARPS]; __shared__ int sh_vertex[NUM_WARPS]; owner[warp_id] = -1; int size = 0; int vertex = id; if(vertex < m && visited[vertex] && !expanded[vertex]) { size = row_offsets[vertex + 1] - row_offsets[vertex]; } while(__any(size) >= WARP_SIZE) { if(size >= WARP_SIZE) owner[warp_id] = lane_id; if(owner[warp_id] == lane_id) { sh_vertex[warp_id] = vertex; expanded[id] = true; owner[warp_id] = -1; size = 0; } int winner = sh_vertex[warp_id]; int row_begin = row_offsets[winner]; int row_end = row_offsets[winner + 1]; int neighbor_size = row_end - row_begin; int num = ((neighbor_size + WARP_SIZE - 1) / WARP_SIZE) * WARP_SIZE; for(int i = lane_id; i < num; i+= WARP_SIZE) { int edge = row_begin + i; if(i < neighbor_size) { process_edge(depth, edge, column_indices, dist, changed); } } } } __global__ void bfs_kernel(int m, int *row_offsets, int *column_indices, DistT *dist, bool *changed, bool *visited, bool *expanded, int *frontier_size, int depth) { expandByCta(m, row_offsets, column_indices, dist, visited, expanded, depth, changed); expandByWarp(m, row_offsets, column_indices, dist, visited, expanded, depth, changed); int tid = blockIdx.x * blockDim.x + threadIdx.x; int src = tid; const int SCRATCHSIZE = BLOCK_SIZE; __shared__ BlockScan::TempStorage temp_storage; __shared__ int gather_offsets[SCRATCHSIZE]; gather_offsets[threadIdx.x] = 0; int neighbor_size = 0; int neighbor_offset = 0; int scratch_offset = 0; int total_edges = 0; if(src < m && visited[src] && !expanded[src]) { // visited but not expanded expanded[src] = true; neighbor_offset = row_offsets[src]; neighbor_size = row_offsets[src+1] - neighbor_offset; } BlockScan(temp_storage).ExclusiveSum(neighbor_size, scratch_offset, total_edges); int done = 0; int neighbors_done = 0; while(total_edges > 0) { __syncthreads(); int i; for(i = 0; neighbors_done + i < neighbor_size && (scratch_offset + i - done) < SCRATCHSIZE; i++) { gather_offsets[scratch_offset + i - done] = neighbor_offset + neighbors_done + i; } neighbors_done += i; scratch_offset += i; __syncthreads(); int edge = gather_offsets[threadIdx.x]; if(threadIdx.x < total_edges) { process_edge(depth, edge, column_indices, dist, changed); } total_edges -= BLOCK_SIZE; done += BLOCK_SIZE; } } __global__ void bfs_update(int m, DistT *dist, bool *visited) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < m) { if(dist[id] < MYINFINITY && !visited[id]) visited[id] = true; } } void BFSSolver(int m, int nnz, int source, int *in_row_offsets, int *in_column_indices, int *h_row_offsets, int *h_column_indices, int *h_degree, DistT *h_dist) { //print_device_info(0); DistT zero = 0; bool one = 1; int *d_row_offsets, *d_column_indices; CUDA_SAFE_CALL(hipMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_column_indices, nnz * sizeof(int))); CUDA_SAFE_CALL(hipMemcpy(d_row_offsets, h_row_offsets, (m + 1) * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_column_indices, h_column_indices, nnz * sizeof(int), hipMemcpyHostToDevice)); DistT * d_dist; CUDA_SAFE_CALL(hipMalloc((void **)&d_dist, m * sizeof(DistT))); CUDA_SAFE_CALL(hipMemcpy(d_dist, h_dist, m * sizeof(DistT), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(&d_dist[source], &zero, sizeof(DistT), hipMemcpyHostToDevice)); bool *d_changed, h_changed, *d_visited, *d_expanded; CUDA_SAFE_CALL(hipMalloc((void **)&d_changed, sizeof(bool))); CUDA_SAFE_CALL(hipMalloc((void **)&d_visited, m * sizeof(bool))); CUDA_SAFE_CALL(hipMalloc((void **)&d_expanded, m * sizeof(bool))); CUDA_SAFE_CALL(hipMemset(d_visited, 0, m * sizeof(bool))); CUDA_SAFE_CALL(hipMemcpy(&d_visited[source], &one, sizeof(bool), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemset(d_expanded, 0, m * sizeof(bool))); int *d_frontier_size; CUDA_SAFE_CALL(hipMalloc((void **)&d_frontier_size, sizeof(int))); CUDA_SAFE_CALL(hipDeviceSynchronize()); int iter = 0; int nthreads = BLOCK_SIZE; int nblocks = (m - 1) / nthreads + 1; //int h_frontier_size = 1; printf("Launching CUDA BFS solver (%d CTAs, %d threads/CTA) ...\n", nblocks, nthreads); Timer t; t.Start(); do { ++ iter; h_changed = false; CUDA_SAFE_CALL(hipMemcpy(d_changed, &h_changed, sizeof(bool), hipMemcpyHostToDevice)); //CUDA_SAFE_CALL(hipMemcpy(d_frontier_size, &zero, sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( bfs_kernel) , dim3(nblocks), dim3(nthreads), 0, 0, m, d_row_offsets, d_column_indices, d_dist, d_changed, d_visited, d_expanded, d_frontier_size, iter); hipLaunchKernelGGL(( bfs_update) , dim3(nblocks), dim3(nthreads), 0, 0, m, d_dist, d_visited); CudaTest("solving failed"); CUDA_SAFE_CALL(hipMemcpy(&h_changed, d_changed, sizeof(bool), hipMemcpyDeviceToHost)); //CUDA_SAFE_CALL(hipMemcpy(&h_frontier_size, d_frontier_size, sizeof(int), hipMemcpyDeviceToHost)); //printf("iteration=%d, frontier_size=%d\n", iter, h_frontier_size); } while (h_changed); CUDA_SAFE_CALL(hipDeviceSynchronize()); t.Stop(); printf("\titerations = %d.\n", iter); printf("\truntime [%s] = %f ms.\n", BFS_VARIANT, t.Millisecs()); CUDA_SAFE_CALL(hipMemcpy(h_dist, d_dist, m * sizeof(DistT), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_row_offsets)); CUDA_SAFE_CALL(hipFree(d_column_indices)); CUDA_SAFE_CALL(hipFree(d_dist)); CUDA_SAFE_CALL(hipFree(d_changed)); CUDA_SAFE_CALL(hipFree(d_visited)); CUDA_SAFE_CALL(hipFree(d_expanded)); CUDA_SAFE_CALL(hipFree(d_frontier_size)); return; }
c0587aceb668a4beefb4cbc9a55e048faceba2fa.cu
// Copyright 2016, National University of Defense Technology // Authors: Xuhao Chen <cxh@illinois.edu> #define BFS_VARIANT "topo_lb" #include "bfs.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include <cub/cub.cuh> #include "timer.h" typedef cub::BlockScan<int, BLOCK_SIZE> BlockScan; __device__ __forceinline__ void process_edge(int depth, int edge, int *column_indices, DistT *dist, bool *changed) { int dst = column_indices[edge]; //assert(dst < m); if (dist[dst] > depth) { //if(dist[dst] == MYINFINITY) { dist[dst] = depth; *changed = true; } } __device__ void expandByCta(int m, int *row_offsets, int *column_indices, DistT *dist, bool *visited, bool *expanded, int depth, bool *changed) { int id = blockIdx.x * blockDim.x + threadIdx.x; int vertex = id; __shared__ int owner; __shared__ int sh_vertex; owner = -1; int size = 0; if(vertex < m && visited[vertex] && !expanded[vertex]) { size = row_offsets[vertex + 1] - row_offsets[vertex]; } while(true) { if(size > BLOCK_SIZE) owner = threadIdx.x; __syncthreads(); if(owner == -1) break; __syncthreads(); if(owner == threadIdx.x) { sh_vertex = vertex; expanded[id] = true; owner = -1; size = 0; } __syncthreads(); int row_begin = row_offsets[sh_vertex]; int row_end = row_offsets[sh_vertex + 1]; int neighbor_size = row_end - row_begin; int num = ((neighbor_size + blockDim.x - 1) / blockDim.x) * blockDim.x; for(int i = threadIdx.x; i < num; i += blockDim.x) { int edge = row_begin + i; if(i < neighbor_size) { process_edge(depth, edge, column_indices, dist, changed); } } } } __device__ __forceinline__ unsigned LaneId() { unsigned ret; asm("mov.u32 %0, %laneid;" : "=r"(ret)); return ret; } __device__ __forceinline__ void expandByWarp(int m, int *row_offsets, int *column_indices, DistT *dist, bool *visited, bool *expanded, int depth, bool *changed) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; unsigned warp_id = threadIdx.x >> LOG_WARP_SIZE; unsigned lane_id = LaneId(); __shared__ int owner[NUM_WARPS]; __shared__ int sh_vertex[NUM_WARPS]; owner[warp_id] = -1; int size = 0; int vertex = id; if(vertex < m && visited[vertex] && !expanded[vertex]) { size = row_offsets[vertex + 1] - row_offsets[vertex]; } while(__any(size) >= WARP_SIZE) { if(size >= WARP_SIZE) owner[warp_id] = lane_id; if(owner[warp_id] == lane_id) { sh_vertex[warp_id] = vertex; expanded[id] = true; owner[warp_id] = -1; size = 0; } int winner = sh_vertex[warp_id]; int row_begin = row_offsets[winner]; int row_end = row_offsets[winner + 1]; int neighbor_size = row_end - row_begin; int num = ((neighbor_size + WARP_SIZE - 1) / WARP_SIZE) * WARP_SIZE; for(int i = lane_id; i < num; i+= WARP_SIZE) { int edge = row_begin + i; if(i < neighbor_size) { process_edge(depth, edge, column_indices, dist, changed); } } } } __global__ void bfs_kernel(int m, int *row_offsets, int *column_indices, DistT *dist, bool *changed, bool *visited, bool *expanded, int *frontier_size, int depth) { expandByCta(m, row_offsets, column_indices, dist, visited, expanded, depth, changed); expandByWarp(m, row_offsets, column_indices, dist, visited, expanded, depth, changed); int tid = blockIdx.x * blockDim.x + threadIdx.x; int src = tid; const int SCRATCHSIZE = BLOCK_SIZE; __shared__ BlockScan::TempStorage temp_storage; __shared__ int gather_offsets[SCRATCHSIZE]; gather_offsets[threadIdx.x] = 0; int neighbor_size = 0; int neighbor_offset = 0; int scratch_offset = 0; int total_edges = 0; if(src < m && visited[src] && !expanded[src]) { // visited but not expanded expanded[src] = true; neighbor_offset = row_offsets[src]; neighbor_size = row_offsets[src+1] - neighbor_offset; } BlockScan(temp_storage).ExclusiveSum(neighbor_size, scratch_offset, total_edges); int done = 0; int neighbors_done = 0; while(total_edges > 0) { __syncthreads(); int i; for(i = 0; neighbors_done + i < neighbor_size && (scratch_offset + i - done) < SCRATCHSIZE; i++) { gather_offsets[scratch_offset + i - done] = neighbor_offset + neighbors_done + i; } neighbors_done += i; scratch_offset += i; __syncthreads(); int edge = gather_offsets[threadIdx.x]; if(threadIdx.x < total_edges) { process_edge(depth, edge, column_indices, dist, changed); } total_edges -= BLOCK_SIZE; done += BLOCK_SIZE; } } __global__ void bfs_update(int m, DistT *dist, bool *visited) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < m) { if(dist[id] < MYINFINITY && !visited[id]) visited[id] = true; } } void BFSSolver(int m, int nnz, int source, int *in_row_offsets, int *in_column_indices, int *h_row_offsets, int *h_column_indices, int *h_degree, DistT *h_dist) { //print_device_info(0); DistT zero = 0; bool one = 1; int *d_row_offsets, *d_column_indices; CUDA_SAFE_CALL(cudaMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_column_indices, nnz * sizeof(int))); CUDA_SAFE_CALL(cudaMemcpy(d_row_offsets, h_row_offsets, (m + 1) * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_column_indices, h_column_indices, nnz * sizeof(int), cudaMemcpyHostToDevice)); DistT * d_dist; CUDA_SAFE_CALL(cudaMalloc((void **)&d_dist, m * sizeof(DistT))); CUDA_SAFE_CALL(cudaMemcpy(d_dist, h_dist, m * sizeof(DistT), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(&d_dist[source], &zero, sizeof(DistT), cudaMemcpyHostToDevice)); bool *d_changed, h_changed, *d_visited, *d_expanded; CUDA_SAFE_CALL(cudaMalloc((void **)&d_changed, sizeof(bool))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_visited, m * sizeof(bool))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_expanded, m * sizeof(bool))); CUDA_SAFE_CALL(cudaMemset(d_visited, 0, m * sizeof(bool))); CUDA_SAFE_CALL(cudaMemcpy(&d_visited[source], &one, sizeof(bool), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemset(d_expanded, 0, m * sizeof(bool))); int *d_frontier_size; CUDA_SAFE_CALL(cudaMalloc((void **)&d_frontier_size, sizeof(int))); CUDA_SAFE_CALL(cudaDeviceSynchronize()); int iter = 0; int nthreads = BLOCK_SIZE; int nblocks = (m - 1) / nthreads + 1; //int h_frontier_size = 1; printf("Launching CUDA BFS solver (%d CTAs, %d threads/CTA) ...\n", nblocks, nthreads); Timer t; t.Start(); do { ++ iter; h_changed = false; CUDA_SAFE_CALL(cudaMemcpy(d_changed, &h_changed, sizeof(bool), cudaMemcpyHostToDevice)); //CUDA_SAFE_CALL(cudaMemcpy(d_frontier_size, &zero, sizeof(int), cudaMemcpyHostToDevice)); bfs_kernel <<<nblocks, nthreads>>> (m, d_row_offsets, d_column_indices, d_dist, d_changed, d_visited, d_expanded, d_frontier_size, iter); bfs_update <<<nblocks, nthreads>>> (m, d_dist, d_visited); CudaTest("solving failed"); CUDA_SAFE_CALL(cudaMemcpy(&h_changed, d_changed, sizeof(bool), cudaMemcpyDeviceToHost)); //CUDA_SAFE_CALL(cudaMemcpy(&h_frontier_size, d_frontier_size, sizeof(int), cudaMemcpyDeviceToHost)); //printf("iteration=%d, frontier_size=%d\n", iter, h_frontier_size); } while (h_changed); CUDA_SAFE_CALL(cudaDeviceSynchronize()); t.Stop(); printf("\titerations = %d.\n", iter); printf("\truntime [%s] = %f ms.\n", BFS_VARIANT, t.Millisecs()); CUDA_SAFE_CALL(cudaMemcpy(h_dist, d_dist, m * sizeof(DistT), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_row_offsets)); CUDA_SAFE_CALL(cudaFree(d_column_indices)); CUDA_SAFE_CALL(cudaFree(d_dist)); CUDA_SAFE_CALL(cudaFree(d_changed)); CUDA_SAFE_CALL(cudaFree(d_visited)); CUDA_SAFE_CALL(cudaFree(d_expanded)); CUDA_SAFE_CALL(cudaFree(d_frontier_size)); return; }
91b443368662554fc0393f2a0a202be8cc073fb7.hip
// !!! This is a file automatically generated by hipify!!! // System includes #include <stdio.h> #include <assert.h> #include <float.h> // CUDA runtime #include <hip/hip_runtime.h> #include <helper_functions.h> #include <EasyBMP.h> #include <EasyBMP.cpp> #define BLOCK_DIM 16 typedef struct { float avgerages[9]; float dispersions[9]; } Pair; /* The device kernel, takes as input the noisy image * and outputs the filtered image */ template<int BLOCK_SIZE> __global__ void rotatingMaskCUDA(Pair * filtered, unsigned char * img, int rows, int cols) { __shared__ unsigned char input_img[BLOCK_SIZE][BLOCK_SIZE]; // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int row = by * BLOCK_SIZE + ty; int col = bx * BLOCK_SIZE + tx; /* Overlapping the tiles */ row -= 2 * by; col -= 2 * bx; if (row < rows && col < cols) input_img[ty][tx] = img[cols * row + col]; __syncthreads(); if (row < rows && col < cols) { float tmp_c = cols; float tmp_r = rows; int numberOfBlocksx = (int) ceil(tmp_c / (BLOCK_SIZE - 2)); int numberOfBlocksy = (int) ceil(tmp_r / (BLOCK_SIZE - 2)); // Check if this pixel should compute the average and the dispersion if ((bx < numberOfBlocksx - 1 || (bx == numberOfBlocksx - 1 && (tx < cols - bx * (BLOCK_SIZE - 2) - 2))) && (by < numberOfBlocksy - 1 || (by == numberOfBlocksy - 1 && (ty < rows - by * (BLOCK_SIZE - 2) - 2)))) { if (tx < BLOCK_SIZE - 2 && ty < BLOCK_SIZE - 2) { /* Calculate the average for the mask * with the current pixel positioned at * the upper-left corner */ float sum = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { int tmp_col = tx + j; int tmp_row = ty + i; sum += input_img[tmp_row][tmp_col]; } } float average = sum / 9; /* Calculate the dispersion for the mask * with the current pixel positioned at * the upper-left corner */ float dispersion = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { int tmp_col = tx + j; int tmp_row = ty + i; dispersion += (input_img[tmp_row][tmp_col] - average) * (input_img[tmp_row][tmp_col] - average); } } /* Assign the value of the calculated mask to each pixel * i.e. the current mask will be added to index 0 * of the Upper left pixel, and index 1 of the * Upper left-but-one pixel, and so on. */ int index = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { int tmp_col = col + j; int tmp_row = row + i; filtered[tmp_col + tmp_row * cols].avgerages[index] = average; filtered[tmp_col + tmp_row * cols].dispersions[index] = dispersion; index++; } } } } } } template<int BLOCK_SIZE> __global__ void getArrayMin(unsigned char * output_img, Pair * input_img, int rows, int cols) { /* Calculate the index of the 2d array */ int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * BLOCK_SIZE + ty; int col = bx * BLOCK_SIZE + tx; float min = FLT_MAX; int min_index = 0; float *dispersions = input_img[col + row * cols].dispersions; for (int i = 0; i < 9; i++) { float tmp = dispersions[i]; if (tmp < min && tmp >= 0) { min = tmp; min_index = i; } } output_img[col + row * cols] = input_img[col + row * cols].avgerages[min_index]; } unsigned char * init(unsigned char * img, int rows, int cols) { // Device input image and filtered image unsigned char *d_img, *filtered_img, *d_filtered; filtered_img = (unsigned char *) malloc(sizeof(unsigned char) * cols * rows); // The temporary matrix holding the averages // and dispersions for all 9 mask positions Pair * d_tmp; // Allocate and copy input image to device int size = rows * cols * sizeof(unsigned char); hipMalloc((void**) &d_img, size); hipMemcpy(d_img, img, size, hipMemcpyHostToDevice); // Allocate memory for output image hipMalloc((void**) &d_filtered, size); // Allocate memory for tmp matrix int size_pair = rows * cols * sizeof(Pair); hipMalloc((void**) &d_tmp, size_pair); // Define grid and block dimensions dim3 block(BLOCK_DIM, BLOCK_DIM, 1); dim3 grid((int) ceil((cols * 1.0) / (BLOCK_DIM - 2)), (int) ceil((rows * 1.0) / (BLOCK_DIM - 2)), 1); // Kernel invocations hipLaunchKernelGGL(( rotatingMaskCUDA<BLOCK_DIM>) , dim3(grid), dim3(block), 0, 0, d_tmp, d_img, rows, cols); dim3 grid2((int) ceil((cols * 1.0) / BLOCK_DIM), (int) ceil((rows * 1.0) / BLOCK_DIM), 1); hipLaunchKernelGGL(( getArrayMin<BLOCK_DIM>) , dim3(grid2), dim3(block), 0, 0, d_filtered, d_tmp, rows, cols); // Copy the filtered image to the host memory hipMemcpy(filtered_img, d_filtered, size, hipMemcpyDeviceToHost); // Free allocated memory hipFree(d_img); hipFree(d_tmp); hipFree(d_filtered); return filtered_img; } void checkCuda(int argc, char **argv) { printf( "[Rotating mask technique for image filtering Using CUDA] - Starting...\n"); // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **) argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **) argv, "device"); hipSetDevice(devID); } hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } } int main(int argc, char **argv) { /* Check if CUDA is available */ checkCuda(argc, argv); BMP imgIn, imgOut; imgIn.ReadFromFile("../../test images/lena_noise.bmp"); int width = imgIn.TellWidth(); int height = imgIn.TellHeight(); unsigned char *pixelsIn_r, *pixelsIn_g, *pixelsIn_b, *pixelsIn_a; unsigned char *pixelsOut_r, *pixelsOut_g, *pixelsOut_b, *pixelsOut_a; // read the 4 channels R, G, B and A from the BMP object pixelsIn_r = imgIn.getPixelArray(Red); pixelsIn_g = imgIn.getPixelArray(Green); pixelsIn_b = imgIn.getPixelArray(Blue); pixelsIn_a = imgIn.getPixelArray(Alpha); /************************************** Timing **************************************/ // hipError_t error; // // Allocate CUDA events that we'll use for timing // hipEvent_t start; // error = hipEventCreate(&start); // if (error != hipSuccess) // { // fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); // exit(EXIT_FAILURE); // } // hipEvent_t stop; // error = hipEventCreate(&stop); // if (error != hipSuccess) // { // fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); // exit(EXIT_FAILURE); // } // // Record the start event // error = hipEventRecord(start, NULL); // if (error != hipSuccess) // { // fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); // exit(EXIT_FAILURE); // } /************************************** Timing **************************************/ // compute the corresponding 4 channels after performing filtering pixelsOut_r = init(pixelsIn_r, height, width); pixelsOut_g = init(pixelsIn_g, height, width); pixelsOut_b = init(pixelsIn_b, height, width); pixelsOut_a = init(pixelsIn_a, height, width); /************************************** Timing **************************************/ // // Record the stop event // error = hipEventRecord(stop, NULL); // if (error != hipSuccess) // { // fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); // exit(EXIT_FAILURE); // } // // Wait for the stop event to complete // error = hipEventSynchronize(stop); // if (error != hipSuccess) // { // fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); // exit(EXIT_FAILURE); // } // float msecTotal = 0.0f; // error = hipEventElapsedTime(&msecTotal, start, stop); // if (error != hipSuccess) // { // fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); // exit(EXIT_FAILURE); // } /************************************** Timing **************************************/ // write the computed channels to a bmp image file imgOut.fromPixelArrays(pixelsOut_r, pixelsOut_g, pixelsOut_b, pixelsOut_a, width, height); imgOut.WriteToFile("../../output images/lena_noise_filtered.bmp"); return 0; }
91b443368662554fc0393f2a0a202be8cc073fb7.cu
// System includes #include <stdio.h> #include <assert.h> #include <float.h> // CUDA runtime #include <cuda_runtime.h> #include <helper_functions.h> #include <EasyBMP.h> #include <EasyBMP.cpp> #define BLOCK_DIM 16 typedef struct { float avgerages[9]; float dispersions[9]; } Pair; /* The device kernel, takes as input the noisy image * and outputs the filtered image */ template<int BLOCK_SIZE> __global__ void rotatingMaskCUDA(Pair * filtered, unsigned char * img, int rows, int cols) { __shared__ unsigned char input_img[BLOCK_SIZE][BLOCK_SIZE]; // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int row = by * BLOCK_SIZE + ty; int col = bx * BLOCK_SIZE + tx; /* Overlapping the tiles */ row -= 2 * by; col -= 2 * bx; if (row < rows && col < cols) input_img[ty][tx] = img[cols * row + col]; __syncthreads(); if (row < rows && col < cols) { float tmp_c = cols; float tmp_r = rows; int numberOfBlocksx = (int) ceil(tmp_c / (BLOCK_SIZE - 2)); int numberOfBlocksy = (int) ceil(tmp_r / (BLOCK_SIZE - 2)); // Check if this pixel should compute the average and the dispersion if ((bx < numberOfBlocksx - 1 || (bx == numberOfBlocksx - 1 && (tx < cols - bx * (BLOCK_SIZE - 2) - 2))) && (by < numberOfBlocksy - 1 || (by == numberOfBlocksy - 1 && (ty < rows - by * (BLOCK_SIZE - 2) - 2)))) { if (tx < BLOCK_SIZE - 2 && ty < BLOCK_SIZE - 2) { /* Calculate the average for the mask * with the current pixel positioned at * the upper-left corner */ float sum = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { int tmp_col = tx + j; int tmp_row = ty + i; sum += input_img[tmp_row][tmp_col]; } } float average = sum / 9; /* Calculate the dispersion for the mask * with the current pixel positioned at * the upper-left corner */ float dispersion = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { int tmp_col = tx + j; int tmp_row = ty + i; dispersion += (input_img[tmp_row][tmp_col] - average) * (input_img[tmp_row][tmp_col] - average); } } /* Assign the value of the calculated mask to each pixel * i.e. the current mask will be added to index 0 * of the Upper left pixel, and index 1 of the * Upper left-but-one pixel, and so on. */ int index = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { int tmp_col = col + j; int tmp_row = row + i; filtered[tmp_col + tmp_row * cols].avgerages[index] = average; filtered[tmp_col + tmp_row * cols].dispersions[index] = dispersion; index++; } } } } } } template<int BLOCK_SIZE> __global__ void getArrayMin(unsigned char * output_img, Pair * input_img, int rows, int cols) { /* Calculate the index of the 2d array */ int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * BLOCK_SIZE + ty; int col = bx * BLOCK_SIZE + tx; float min = FLT_MAX; int min_index = 0; float *dispersions = input_img[col + row * cols].dispersions; for (int i = 0; i < 9; i++) { float tmp = dispersions[i]; if (tmp < min && tmp >= 0) { min = tmp; min_index = i; } } output_img[col + row * cols] = input_img[col + row * cols].avgerages[min_index]; } unsigned char * init(unsigned char * img, int rows, int cols) { // Device input image and filtered image unsigned char *d_img, *filtered_img, *d_filtered; filtered_img = (unsigned char *) malloc(sizeof(unsigned char) * cols * rows); // The temporary matrix holding the averages // and dispersions for all 9 mask positions Pair * d_tmp; // Allocate and copy input image to device int size = rows * cols * sizeof(unsigned char); cudaMalloc((void**) &d_img, size); cudaMemcpy(d_img, img, size, cudaMemcpyHostToDevice); // Allocate memory for output image cudaMalloc((void**) &d_filtered, size); // Allocate memory for tmp matrix int size_pair = rows * cols * sizeof(Pair); cudaMalloc((void**) &d_tmp, size_pair); // Define grid and block dimensions dim3 block(BLOCK_DIM, BLOCK_DIM, 1); dim3 grid((int) ceil((cols * 1.0) / (BLOCK_DIM - 2)), (int) ceil((rows * 1.0) / (BLOCK_DIM - 2)), 1); // Kernel invocations rotatingMaskCUDA<BLOCK_DIM> <<<grid, block>>>(d_tmp, d_img, rows, cols); dim3 grid2((int) ceil((cols * 1.0) / BLOCK_DIM), (int) ceil((rows * 1.0) / BLOCK_DIM), 1); getArrayMin<BLOCK_DIM> <<<grid2, block>>>(d_filtered, d_tmp, rows, cols); // Copy the filtered image to the host memory cudaMemcpy(filtered_img, d_filtered, size, cudaMemcpyDeviceToHost); // Free allocated memory cudaFree(d_img); cudaFree(d_tmp); cudaFree(d_filtered); return filtered_img; } void checkCuda(int argc, char **argv) { printf( "[Rotating mask technique for image filtering Using CUDA] - Starting...\n"); // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **) argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **) argv, "device"); cudaSetDevice(devID); } cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } } int main(int argc, char **argv) { /* Check if CUDA is available */ checkCuda(argc, argv); BMP imgIn, imgOut; imgIn.ReadFromFile("../../test images/lena_noise.bmp"); int width = imgIn.TellWidth(); int height = imgIn.TellHeight(); unsigned char *pixelsIn_r, *pixelsIn_g, *pixelsIn_b, *pixelsIn_a; unsigned char *pixelsOut_r, *pixelsOut_g, *pixelsOut_b, *pixelsOut_a; // read the 4 channels R, G, B and A from the BMP object pixelsIn_r = imgIn.getPixelArray(Red); pixelsIn_g = imgIn.getPixelArray(Green); pixelsIn_b = imgIn.getPixelArray(Blue); pixelsIn_a = imgIn.getPixelArray(Alpha); /************************************** Timing **************************************/ // cudaError_t error; // // Allocate CUDA events that we'll use for timing // cudaEvent_t start; // error = cudaEventCreate(&start); // if (error != cudaSuccess) // { // fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); // exit(EXIT_FAILURE); // } // cudaEvent_t stop; // error = cudaEventCreate(&stop); // if (error != cudaSuccess) // { // fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); // exit(EXIT_FAILURE); // } // // Record the start event // error = cudaEventRecord(start, NULL); // if (error != cudaSuccess) // { // fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); // exit(EXIT_FAILURE); // } /************************************** Timing **************************************/ // compute the corresponding 4 channels after performing filtering pixelsOut_r = init(pixelsIn_r, height, width); pixelsOut_g = init(pixelsIn_g, height, width); pixelsOut_b = init(pixelsIn_b, height, width); pixelsOut_a = init(pixelsIn_a, height, width); /************************************** Timing **************************************/ // // Record the stop event // error = cudaEventRecord(stop, NULL); // if (error != cudaSuccess) // { // fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); // exit(EXIT_FAILURE); // } // // Wait for the stop event to complete // error = cudaEventSynchronize(stop); // if (error != cudaSuccess) // { // fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); // exit(EXIT_FAILURE); // } // float msecTotal = 0.0f; // error = cudaEventElapsedTime(&msecTotal, start, stop); // if (error != cudaSuccess) // { // fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); // exit(EXIT_FAILURE); // } /************************************** Timing **************************************/ // write the computed channels to a bmp image file imgOut.fromPixelArrays(pixelsOut_r, pixelsOut_g, pixelsOut_b, pixelsOut_a, width, height); imgOut.WriteToFile("../../output images/lena_noise_filtered.bmp"); return 0; }
f1bf710312d9bff5190be95adcaf91dadeead757.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Vector-matrix multiplication: Y = A * X. * Host code. * Author: Naga Kandasamy * Date: 2/21/2017 */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <string.h> #include <math.h> #include <sys/time.h> #include "vec_mat_mult_kernel.cu" #define MIN_NUMBER 1 #define MAX_NUMBER 4 extern "C" void compute_gold(float*, const float*, const float*, unsigned int, unsigned int); Matrix allocate_matrix_on_gpu(const Matrix); Matrix allocate_matrix(int, int, int); void copy_matrix_to_device(Matrix, const Matrix); void copy_matrix_from_device(Matrix, const Matrix); void vec_mat_mult_on_device_using_global_memory(const Matrix, const Matrix, Matrix); void vec_mat_mult_on_device_using_shared_memory(const Matrix, const Matrix, Matrix); void print_matrix(const Matrix); float get_random_number(int, int); int checkResults(float *, float *, int, float); int main(int argc, char** argv) { // Matrices for the program Matrix A; // N x N matrix Matrix X; // N x 1 vector Matrix Y_cpu, Y_gpu_1, Y_gpu_2; // N x 1 vector // Initialize the random number generator with a seed value srand(time(NULL)); // Check command line arguments if(argc > 1){ printf("Error. This program accepts no arguments. \n"); exit(0); } // Allocate and initialize the matrices A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); // Create a random N x N matrix X = allocate_matrix(MATRIX_SIZE, 1, 1); // Create a random N x 1 vector Y_cpu = allocate_matrix(MATRIX_SIZE, 1, 0); // Allocate memory for the output vectors Y_gpu_1 = allocate_matrix(MATRIX_SIZE, 1, 0); Y_gpu_2 = allocate_matrix(MATRIX_SIZE, 1, 0); printf("Matrix Size : %d x %d. \n", MATRIX_SIZE, MATRIX_SIZE); // compute the vector-matrix multiplication on the CPU for comparison struct timeval start, stop; gettimeofday(&start, NULL); compute_gold(Y_cpu.elements, A.elements, X.elements, A.num_rows, A.num_columns); gettimeofday(&stop, NULL); float golden_time = (float) (stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float) 1000000); printf("CPU Execution time = %fs. \n", golden_time); // Perform the vector-matrix multiplication on the GPU using global memory // Return the results in Y_gpu_1 gettimeofday(&start, NULL); vec_mat_mult_on_device_using_global_memory(A, X, Y_gpu_1); gettimeofday(&stop, NULL); float global_time = (float) (stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float) 1000000); printf("GPU (global) Execution time = %fs. \n", global_time); printf("GPU (global) Speed up = %fs. \n", global_time/golden_time); // check if the device result is equivalent to the expected solution printf("Checking against reference result. \n"); int size_elements = NUM_ROWS; int res = checkResults(Y_cpu.elements, Y_gpu_1.elements, size_elements, 0.0001); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // Perform the vector-matrix multiplication on the GPU using shared memory // Return the results in Y_gpu_2 gettimeofday(&start, NULL); vec_mat_mult_on_device_using_shared_memory(A, X, Y_gpu_2); gettimeofday(&stop, NULL); float shared_mem_time = (float) (stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float) 1000000); printf("GPU (shared) Execution time = %fs. \n", shared_mem_time); printf("GPU (shared) Speed up = %fs. \n", shared_mem_time/golden_time); // check if the device result is equivalent to the expected solution printf("Checking against reference result. \n"); res = checkResults(Y_cpu.elements, Y_gpu_2.elements, size_elements, 0.0001); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // Free host matrices free(A.elements); A.elements = NULL; free(X.elements); X.elements = NULL; free(Y_cpu.elements); Y_cpu.elements = NULL; free(Y_gpu_1.elements); Y_gpu_1.elements = NULL; free(Y_gpu_2.elements); Y_gpu_2.elements = NULL; return 0; } // Complete the functionality of vector-matrix multiplication using the GPU // Kernel should use global memory void vec_mat_mult_on_device_using_global_memory(const Matrix A, const Matrix X, Matrix Y) { // Martix A_d = allocate_matrix_on_gpu(A); // Matrix X_d = allocate_matrix_on_gpu(X); // Matrix Y_d = allocate_matrix_on_gpu(Y); // // copy_matrix_to_device(A_d,A); // copy_matrix_to_device(X_d,X); // // // dim3 dimBlock(MATRIX_SIZE,MATRIX_SIZE,1); // dim3 dimGrid(A_d.width/dimBlock.x, X_d.height/dimBlock.y,1); // // printf("Setting up a %d x %d grid of thread blocks. \n", dimGrid.x, dimGrid.y); // // struct timeval start, stop; // gettimeofday(&start, NULL); // vec_mat_kernel_naive<<<dimGrid, dimBlock>>>(A_d.elements,X_d.elements,Y_d.elements); // // hipDeviceSynchronize(); // gettimeofday(&stop, NULL); // printf("Execution time = %fs. \n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000)); // // // Read P from the device // copy_matrix_from_device(Y, Yd); // hipFree(A_dev.elements); // hipFree(X_dev.elements); // hipFree(Y_dev.elements); } // Complete the functionality of vector-matrix multiplication using the GPU // Kernel should use shared memory void vec_mat_mult_on_device_using_shared_memory(const Matrix A, const Matrix X, Matrix Y) { // Load M and N to the device Matrix Ad = allocate_matrix_on_gpu(A); copy_matrix_to_device(Ad, A); Matrix Xd = allocate_matrix_on_gpu(X); copy_matrix_to_device(Xd, X); // Allocate P on the device Matrix Yd = allocate_matrix_on_gpu(Y); copy_matrix_to_device(Yd, Y); // Clear memory // Setup the execution configuration dim3 dimBlock(16, 16, 1); int num_rows = ceil((float)A.num_rows / 16); dim3 dimGrid(1, num_rows); printf("Running the functionality of vector-matrix multiplication using the shared memory\n"); struct timeval start, stop; gettimeofday(&start, NULL); // Launch the device computation threads! hipLaunchKernelGGL(( vec_mat_kernel_optimized), dim3(dimGrid),dim3(dimBlock), 0, 0, Ad.elements,Xd.elements,Yd.elements); hipDeviceSynchronize(); gettimeofday(&stop, NULL); printf("Execution time = %fs. \n", (float)(stop.tv_sec-start.tv_sec+(stop.tv_usec - start.tv_usec)/(float)1000000)); // Read P from the device copy_matrix_from_device(Y, Yd); // Free device matrices hipFree(Ad.elements); hipFree(Xd.elements); hipFree(Yd.elements); } // Allocate a device matrix of same size as M. Matrix allocate_matrix_on_gpu(const Matrix M) { Matrix Mdevice = M; int size = M.num_rows * M.num_columns * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. Matrix allocate_matrix(int num_rows, int num_columns, int init) { Matrix M; M.num_columns = M.pitch = num_columns; M.num_rows = num_rows; int size = M.num_rows * M.num_columns; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < size; i++){ if(init == 0) M.elements[i] = 0; else M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER); } return M; } // Copy a host matrix to a device matrix. void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.num_rows * Mhost.num_columns * sizeof(float); Mdevice.num_rows = Mhost.num_rows; Mdevice.num_columns = Mhost.num_columns; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } // Prints the matrix out to screen void print_matrix(const Matrix M) { for(unsigned int i = 0; i < M.num_rows; i++){ for(unsigned int j = 0; j < M.num_columns; j++) printf("%f ", M.elements[i*M.num_columns + j]); printf("\n"); } printf("\n"); } // Returns a random floating-point number between the specified min and max values float get_random_number(int min, int max){ return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX))); } int checkResults(float *reference, float *gpu_result, int num_elements, float threshold) { int checkMark = 1; float epsilon = 0.0; for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){ checkMark = 0; break; } for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){ epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]); } printf("Max epsilon = %f. \n", epsilon); return checkMark; }
f1bf710312d9bff5190be95adcaf91dadeead757.cu
/* Vector-matrix multiplication: Y = A * X. * Host code. * Author: Naga Kandasamy * Date: 2/21/2017 */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <string.h> #include <math.h> #include <sys/time.h> #include "vec_mat_mult_kernel.cu" #define MIN_NUMBER 1 #define MAX_NUMBER 4 extern "C" void compute_gold(float*, const float*, const float*, unsigned int, unsigned int); Matrix allocate_matrix_on_gpu(const Matrix); Matrix allocate_matrix(int, int, int); void copy_matrix_to_device(Matrix, const Matrix); void copy_matrix_from_device(Matrix, const Matrix); void vec_mat_mult_on_device_using_global_memory(const Matrix, const Matrix, Matrix); void vec_mat_mult_on_device_using_shared_memory(const Matrix, const Matrix, Matrix); void print_matrix(const Matrix); float get_random_number(int, int); int checkResults(float *, float *, int, float); int main(int argc, char** argv) { // Matrices for the program Matrix A; // N x N matrix Matrix X; // N x 1 vector Matrix Y_cpu, Y_gpu_1, Y_gpu_2; // N x 1 vector // Initialize the random number generator with a seed value srand(time(NULL)); // Check command line arguments if(argc > 1){ printf("Error. This program accepts no arguments. \n"); exit(0); } // Allocate and initialize the matrices A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); // Create a random N x N matrix X = allocate_matrix(MATRIX_SIZE, 1, 1); // Create a random N x 1 vector Y_cpu = allocate_matrix(MATRIX_SIZE, 1, 0); // Allocate memory for the output vectors Y_gpu_1 = allocate_matrix(MATRIX_SIZE, 1, 0); Y_gpu_2 = allocate_matrix(MATRIX_SIZE, 1, 0); printf("Matrix Size : %d x %d. \n", MATRIX_SIZE, MATRIX_SIZE); // compute the vector-matrix multiplication on the CPU for comparison struct timeval start, stop; gettimeofday(&start, NULL); compute_gold(Y_cpu.elements, A.elements, X.elements, A.num_rows, A.num_columns); gettimeofday(&stop, NULL); float golden_time = (float) (stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float) 1000000); printf("CPU Execution time = %fs. \n", golden_time); // Perform the vector-matrix multiplication on the GPU using global memory // Return the results in Y_gpu_1 gettimeofday(&start, NULL); vec_mat_mult_on_device_using_global_memory(A, X, Y_gpu_1); gettimeofday(&stop, NULL); float global_time = (float) (stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float) 1000000); printf("GPU (global) Execution time = %fs. \n", global_time); printf("GPU (global) Speed up = %fs. \n", global_time/golden_time); // check if the device result is equivalent to the expected solution printf("Checking against reference result. \n"); int size_elements = NUM_ROWS; int res = checkResults(Y_cpu.elements, Y_gpu_1.elements, size_elements, 0.0001); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // Perform the vector-matrix multiplication on the GPU using shared memory // Return the results in Y_gpu_2 gettimeofday(&start, NULL); vec_mat_mult_on_device_using_shared_memory(A, X, Y_gpu_2); gettimeofday(&stop, NULL); float shared_mem_time = (float) (stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float) 1000000); printf("GPU (shared) Execution time = %fs. \n", shared_mem_time); printf("GPU (shared) Speed up = %fs. \n", shared_mem_time/golden_time); // check if the device result is equivalent to the expected solution printf("Checking against reference result. \n"); res = checkResults(Y_cpu.elements, Y_gpu_2.elements, size_elements, 0.0001); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // Free host matrices free(A.elements); A.elements = NULL; free(X.elements); X.elements = NULL; free(Y_cpu.elements); Y_cpu.elements = NULL; free(Y_gpu_1.elements); Y_gpu_1.elements = NULL; free(Y_gpu_2.elements); Y_gpu_2.elements = NULL; return 0; } // Complete the functionality of vector-matrix multiplication using the GPU // Kernel should use global memory void vec_mat_mult_on_device_using_global_memory(const Matrix A, const Matrix X, Matrix Y) { // Martix A_d = allocate_matrix_on_gpu(A); // Matrix X_d = allocate_matrix_on_gpu(X); // Matrix Y_d = allocate_matrix_on_gpu(Y); // // copy_matrix_to_device(A_d,A); // copy_matrix_to_device(X_d,X); // // // dim3 dimBlock(MATRIX_SIZE,MATRIX_SIZE,1); // dim3 dimGrid(A_d.width/dimBlock.x, X_d.height/dimBlock.y,1); // // printf("Setting up a %d x %d grid of thread blocks. \n", dimGrid.x, dimGrid.y); // // struct timeval start, stop; // gettimeofday(&start, NULL); // vec_mat_kernel_naive<<<dimGrid, dimBlock>>>(A_d.elements,X_d.elements,Y_d.elements); // // cudaThreadSynchronize(); // gettimeofday(&stop, NULL); // printf("Execution time = %fs. \n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000)); // // // Read P from the device // copy_matrix_from_device(Y, Yd); // cudaFree(A_dev.elements); // cudaFree(X_dev.elements); // cudaFree(Y_dev.elements); } // Complete the functionality of vector-matrix multiplication using the GPU // Kernel should use shared memory void vec_mat_mult_on_device_using_shared_memory(const Matrix A, const Matrix X, Matrix Y) { // Load M and N to the device Matrix Ad = allocate_matrix_on_gpu(A); copy_matrix_to_device(Ad, A); Matrix Xd = allocate_matrix_on_gpu(X); copy_matrix_to_device(Xd, X); // Allocate P on the device Matrix Yd = allocate_matrix_on_gpu(Y); copy_matrix_to_device(Yd, Y); // Clear memory // Setup the execution configuration dim3 dimBlock(16, 16, 1); int num_rows = ceil((float)A.num_rows / 16); dim3 dimGrid(1, num_rows); printf("Running the functionality of vector-matrix multiplication using the shared memory\n"); struct timeval start, stop; gettimeofday(&start, NULL); // Launch the device computation threads! vec_mat_kernel_optimized<<<dimGrid,dimBlock>>>(Ad.elements,Xd.elements,Yd.elements); cudaThreadSynchronize(); gettimeofday(&stop, NULL); printf("Execution time = %fs. \n", (float)(stop.tv_sec-start.tv_sec+(stop.tv_usec - start.tv_usec)/(float)1000000)); // Read P from the device copy_matrix_from_device(Y, Yd); // Free device matrices cudaFree(Ad.elements); cudaFree(Xd.elements); cudaFree(Yd.elements); } // Allocate a device matrix of same size as M. Matrix allocate_matrix_on_gpu(const Matrix M) { Matrix Mdevice = M; int size = M.num_rows * M.num_columns * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. Matrix allocate_matrix(int num_rows, int num_columns, int init) { Matrix M; M.num_columns = M.pitch = num_columns; M.num_rows = num_rows; int size = M.num_rows * M.num_columns; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < size; i++){ if(init == 0) M.elements[i] = 0; else M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER); } return M; } // Copy a host matrix to a device matrix. void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.num_rows * Mhost.num_columns * sizeof(float); Mdevice.num_rows = Mhost.num_rows; Mdevice.num_columns = Mhost.num_columns; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } // Prints the matrix out to screen void print_matrix(const Matrix M) { for(unsigned int i = 0; i < M.num_rows; i++){ for(unsigned int j = 0; j < M.num_columns; j++) printf("%f ", M.elements[i*M.num_columns + j]); printf("\n"); } printf("\n"); } // Returns a random floating-point number between the specified min and max values float get_random_number(int min, int max){ return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX))); } int checkResults(float *reference, float *gpu_result, int num_elements, float threshold) { int checkMark = 1; float epsilon = 0.0; for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){ checkMark = 0; break; } for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){ epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]); } printf("Max epsilon = %f. \n", epsilon); return checkMark; }
element_computedBy_oneThread.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define N 3 #define M 2 __global__ void add(int *A, int *B, int *C) { // int bid = blockIdx.x; int tid = threadIdx.x; printf("(%d)", tid); C[tid] = A[tid] + B[tid]; } int main() { // int *a, *b, *c; int a[M * N], b[M * N], c[M * N]; // host copies of variables a, b & c int *d_a, *d_b, *d_c; // device copies of variables a, b & c int size = sizeof(int) * M * N; // a = (int *)malloc(sizeof(int) * N); // b = (int *)malloc(sizeof(int) * N); // c = (int *)malloc(sizeof(int) * N); // Allocate space for device copies a, b & c hipMalloc((void**)&d_a, size); hipMalloc((void**)&d_b, size); hipMalloc((void**)&d_c, size); int t = 1; // Setup input values printf("Enter values for a: "); for(int i = 0; i < M; i++) for(int j = 0; j < N; j++) a[i * N + j] = t++; t = 1; printf("Enter values for b: "); for(int i = 0; i < M; i++) for(int j = 0; j < N; j++) b[i * N + j] = t++; // Copy inputs to device hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice); // Launch add() kernel on GPU hipLaunchKernelGGL(( add), dim3(1), dim3(M*N), 0, 0, d_a, d_b, d_c); // Copy result back to host hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost); // print result for(int i = 0; i < M; i++) { for(int j = 0; j < N; j++) { printf("%d + %d = %d\n", a[i * N + j], b[i * N + j], c[i * N + j]); } } // Cleanup hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
element_computedBy_oneThread.cu
#include <stdio.h> #include <stdlib.h> #define N 3 #define M 2 __global__ void add(int *A, int *B, int *C) { // int bid = blockIdx.x; int tid = threadIdx.x; printf("(%d)", tid); C[tid] = A[tid] + B[tid]; } int main() { // int *a, *b, *c; int a[M * N], b[M * N], c[M * N]; // host copies of variables a, b & c int *d_a, *d_b, *d_c; // device copies of variables a, b & c int size = sizeof(int) * M * N; // a = (int *)malloc(sizeof(int) * N); // b = (int *)malloc(sizeof(int) * N); // c = (int *)malloc(sizeof(int) * N); // Allocate space for device copies a, b & c cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); int t = 1; // Setup input values printf("Enter values for a: "); for(int i = 0; i < M; i++) for(int j = 0; j < N; j++) a[i * N + j] = t++; t = 1; printf("Enter values for b: "); for(int i = 0; i < M; i++) for(int j = 0; j < N; j++) b[i * N + j] = t++; // Copy inputs to device cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU add<<<1, M*N>>>(d_a, d_b, d_c); // Copy result back to host cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); // print result for(int i = 0; i < M; i++) { for(int j = 0; j < N; j++) { printf("%d + %d = %d\n", a[i * N + j], b[i * N + j], c[i * N + j]); } } // Cleanup cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }