hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
b2bf710a195358cf1c5cea234bb10ac145ac344a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Device code /* * CUDA kernel for computing the dual iteration of the fusion algorithm * Updates the values of the variables Q, P, and Err * * Author: Valsamis Ntouskos * e-mail: ntouskos@diag.uniroma1.it * ALCOR Lab, DIAG, Sapienza University of Rome */ #include <stdint.h> #include "decl.h" template <bool TTGV, bool TADAPTIVE, bool TROF, typename TYPE> __global__ void TVQ2opt(TYPE* Q, TYPE* P, TYPE* Err, const TYPE* Xhat, const TYPE* C, const TYPE* G, const TYPE* D, const TYPE* p_opt, const TYPE* p_wght, const TYPE p_norm, const TYPE p_huber, unsigned int M, unsigned int N, unsigned int K) { unsigned int imsize = M*N; // compute index unsigned int xl = threadIdx.x; unsigned int yl = threadIdx.y; unsigned int il = yl*(MEM_TILE_X+1) + xl; unsigned int indx = blockDim.x*blockIdx.x + xl; unsigned int indy = blockDim.y*blockIdx.y + yl; if (indx>=N || indy>=M) { return; } unsigned int tind = indy*N + indx; TYPE Diff[3] = {0,0,0}, norm_temp; TYPE tmpr, tgv_res = 0; // load image block to shared memory __shared__ TYPE Xhs[(MEM_TILE_X+1)*MEM_TILE_Y]; __shared__ TYPE Vhxs[(MEM_TILE_X+1)*MEM_TILE_Y]; __shared__ TYPE Vhys[(MEM_TILE_X+1)*MEM_TILE_Y]; Xhs[il] = Xhat[tind]; if (TTGV) { Vhys[il] = Xhat[tind+imsize]; Vhxs[il] = Xhat[tind+2*imsize]; } if (xl==(blockDim.x-1)) { Xhs[il+1] = Xhat[tind+1]; if (TTGV) { Vhys[il+1] = Xhat[tind+1+imsize]; Vhxs[il+1] = Xhat[tind+1+2*imsize]; } } if (yl==(blockDim.y-1)) { Xhs[il+MEM_TILE_X+1] = Xhat[tind+N]; if (TTGV) { Vhys[il+MEM_TILE_X+1] = Xhat[tind+N+imsize]; Vhxs[il+MEM_TILE_X+1] = Xhat[tind+N+2*imsize]; } } __syncthreads(); bool nanflag = isnan(Xhat[tind]); for(uint16_t kk = 0; kk < K; kk++) { unsigned int gm_ind = kk*imsize+tind; if (nanflag||isnan(D[gm_ind])) { Err[gm_ind] = 0; } else { Err[gm_ind] = Xhat[tind]-D[gm_ind]; } } // update P if (!TROF) { for(uint16_t kk = 0; kk < K; kk++) { unsigned int gm_ind = kk*imsize+tind; unsigned int c_ind; if (TADAPTIVE) { c_ind = K*imsize+tind; } else { c_ind = gm_ind; } tmpr = (P[gm_ind] + p_opt[2]*p_wght[0]*C[c_ind]*Err[gm_ind])/ (1+p_huber*p_opt[2]*p_wght[0]*C[c_ind]); if (tmpr>1 || tmpr<-1) { P[gm_ind] = copysignf(1.0f,tmpr); } else { P[gm_ind] = tmpr; } } } // end update P // compute gradient of Xhat if (indy<(M-1)) { Diff[0] = Xhs[il+MEM_TILE_X+1]-Xhs[il]; } if (indx<(N-1)) { Diff[1] = Xhs[il+1]-Xhs[il]; } #pragma unroll for(uint8_t ii=0; ii < 2; ii++) { if (isnan(Diff[ii])) { Diff[ii] = 0; } } // end grad(Xhat) // update Q #pragma unroll for(uint8_t ii=0; ii < 2; ii++) { if (TTGV) { tgv_res = Xhat[tind+(ii+1)*imsize]; } Q[tind+ii*imsize] += p_opt[1]*p_wght[1]*G[tind]*(Diff[ii]-tgv_res); } if (isinf(p_norm)) { norm_temp = max(abs(Q[tind]),abs(Q[tind+imsize])); } else { norm_temp = pow((pow(abs(Q[tind]),p_norm)+ pow(abs(Q[tind+imsize]),p_norm)),1/p_norm); } if (norm_temp>1) { Q[tind] /= norm_temp; Q[tind+imsize] /=norm_temp; } // end update Q if (TTGV) { // update R #pragma unroll for(uint8_t kk=0; kk < 2; kk++) { Diff[kk] = 0; } if (indy<(M-1)) { Diff[0] = Vhys[il+MEM_TILE_X+1]-Vhys[il]; Diff[1] = Vhxs[il+MEM_TILE_X+1]-Vhxs[il]; } if (indx<(N-1)) { Diff[1] += Vhys[il+1]-Vhys[il]; Diff[2] = Vhxs[il+1]-Vhxs[il]; } #pragma unroll for(uint8_t ii=0; ii < 3; ii++) { Q[tind+(ii+2)*imsize] += p_opt[1]*p_wght[2]*Diff[ii]; } if (isinf(p_norm)) { norm_temp = max(max(Q[tind+2*imsize],Q[tind+3*imsize]),Q[tind+4*imsize]); } else { norm_temp = pow((pow(abs(Q[tind+2*imsize]),p_norm)+ pow(abs(Q[tind+3*imsize]),p_norm)+ pow(abs(Q[tind+4*imsize]),p_norm)),1/p_norm); } if (norm_temp>1) { #pragma unroll for(int ii=0; ii < 3; ii++) { Q[tind+(ii+2)*imsize] /= norm_temp; } } // end update R } } template __global__ void TVQ2opt<false,false,false,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,true,false,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,false,false,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,true,false,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,false,true,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,true,true,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,false,true,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,true,true,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,false,false,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,true,false,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,false,false,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,true,false,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,false,true,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,true,true,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,false,true,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,true,true,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K);
b2bf710a195358cf1c5cea234bb10ac145ac344a.cu
// Device code /* * CUDA kernel for computing the dual iteration of the fusion algorithm * Updates the values of the variables Q, P, and Err * * Author: Valsamis Ntouskos * e-mail: ntouskos@diag.uniroma1.it * ALCOR Lab, DIAG, Sapienza University of Rome */ #include <stdint.h> #include "decl.h" template <bool TTGV, bool TADAPTIVE, bool TROF, typename TYPE> __global__ void TVQ2opt(TYPE* Q, TYPE* P, TYPE* Err, const TYPE* Xhat, const TYPE* C, const TYPE* G, const TYPE* D, const TYPE* p_opt, const TYPE* p_wght, const TYPE p_norm, const TYPE p_huber, unsigned int M, unsigned int N, unsigned int K) { unsigned int imsize = M*N; // compute index unsigned int xl = threadIdx.x; unsigned int yl = threadIdx.y; unsigned int il = yl*(MEM_TILE_X+1) + xl; unsigned int indx = blockDim.x*blockIdx.x + xl; unsigned int indy = blockDim.y*blockIdx.y + yl; if (indx>=N || indy>=M) { return; } unsigned int tind = indy*N + indx; TYPE Diff[3] = {0,0,0}, norm_temp; TYPE tmpr, tgv_res = 0; // load image block to shared memory __shared__ TYPE Xhs[(MEM_TILE_X+1)*MEM_TILE_Y]; __shared__ TYPE Vhxs[(MEM_TILE_X+1)*MEM_TILE_Y]; __shared__ TYPE Vhys[(MEM_TILE_X+1)*MEM_TILE_Y]; Xhs[il] = Xhat[tind]; if (TTGV) { Vhys[il] = Xhat[tind+imsize]; Vhxs[il] = Xhat[tind+2*imsize]; } if (xl==(blockDim.x-1)) { Xhs[il+1] = Xhat[tind+1]; if (TTGV) { Vhys[il+1] = Xhat[tind+1+imsize]; Vhxs[il+1] = Xhat[tind+1+2*imsize]; } } if (yl==(blockDim.y-1)) { Xhs[il+MEM_TILE_X+1] = Xhat[tind+N]; if (TTGV) { Vhys[il+MEM_TILE_X+1] = Xhat[tind+N+imsize]; Vhxs[il+MEM_TILE_X+1] = Xhat[tind+N+2*imsize]; } } __syncthreads(); bool nanflag = isnan(Xhat[tind]); for(uint16_t kk = 0; kk < K; kk++) { unsigned int gm_ind = kk*imsize+tind; if (nanflag||isnan(D[gm_ind])) { Err[gm_ind] = 0; } else { Err[gm_ind] = Xhat[tind]-D[gm_ind]; } } // update P if (!TROF) { for(uint16_t kk = 0; kk < K; kk++) { unsigned int gm_ind = kk*imsize+tind; unsigned int c_ind; if (TADAPTIVE) { c_ind = K*imsize+tind; } else { c_ind = gm_ind; } tmpr = (P[gm_ind] + p_opt[2]*p_wght[0]*C[c_ind]*Err[gm_ind])/ (1+p_huber*p_opt[2]*p_wght[0]*C[c_ind]); if (tmpr>1 || tmpr<-1) { P[gm_ind] = copysignf(1.0f,tmpr); } else { P[gm_ind] = tmpr; } } } // end update P // compute gradient of Xhat if (indy<(M-1)) { Diff[0] = Xhs[il+MEM_TILE_X+1]-Xhs[il]; } if (indx<(N-1)) { Diff[1] = Xhs[il+1]-Xhs[il]; } #pragma unroll for(uint8_t ii=0; ii < 2; ii++) { if (isnan(Diff[ii])) { Diff[ii] = 0; } } // end grad(Xhat) // update Q #pragma unroll for(uint8_t ii=0; ii < 2; ii++) { if (TTGV) { tgv_res = Xhat[tind+(ii+1)*imsize]; } Q[tind+ii*imsize] += p_opt[1]*p_wght[1]*G[tind]*(Diff[ii]-tgv_res); } if (isinf(p_norm)) { norm_temp = max(abs(Q[tind]),abs(Q[tind+imsize])); } else { norm_temp = pow((pow(abs(Q[tind]),p_norm)+ pow(abs(Q[tind+imsize]),p_norm)),1/p_norm); } if (norm_temp>1) { Q[tind] /= norm_temp; Q[tind+imsize] /=norm_temp; } // end update Q if (TTGV) { // update R #pragma unroll for(uint8_t kk=0; kk < 2; kk++) { Diff[kk] = 0; } if (indy<(M-1)) { Diff[0] = Vhys[il+MEM_TILE_X+1]-Vhys[il]; Diff[1] = Vhxs[il+MEM_TILE_X+1]-Vhxs[il]; } if (indx<(N-1)) { Diff[1] += Vhys[il+1]-Vhys[il]; Diff[2] = Vhxs[il+1]-Vhxs[il]; } #pragma unroll for(uint8_t ii=0; ii < 3; ii++) { Q[tind+(ii+2)*imsize] += p_opt[1]*p_wght[2]*Diff[ii]; } if (isinf(p_norm)) { norm_temp = max(max(Q[tind+2*imsize],Q[tind+3*imsize]),Q[tind+4*imsize]); } else { norm_temp = pow((pow(abs(Q[tind+2*imsize]),p_norm)+ pow(abs(Q[tind+3*imsize]),p_norm)+ pow(abs(Q[tind+4*imsize]),p_norm)),1/p_norm); } if (norm_temp>1) { #pragma unroll for(int ii=0; ii < 3; ii++) { Q[tind+(ii+2)*imsize] /= norm_temp; } } // end update R } } template __global__ void TVQ2opt<false,false,false,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,true,false,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,false,false,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,true,false,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,false,true,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,true,true,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,false,true,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,true,true,float>(float* Q, float* P, float* Err, const float* Xhat, const float* C, const float* G, const float* D, const float* p_opt, const float* p_wght, const float p_norm, const float p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,false,false,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,true,false,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,false,false,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,true,false,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,false,true,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<false,true,true,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,false,true,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K); template __global__ void TVQ2opt<true,true,true,double>(double* Q, double* P, double* Err, const double* Xhat, const double* C, const double* G, const double* D, const double* p_opt, const double* p_wght, const double p_norm, const double p_huber, unsigned int M, unsigned int N, unsigned int K);
e5ed4c4293c8b808c8fbfa7fd88f10cb35e3da72.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/BinaryOps.h> #include <ATen/native/TensorIterator.h> #include <c10/util/TypeSafeSignMath.h> #include <type_traits> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void remainder_kernel_cuda(TensorIteratorBase& iter) { if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) { AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "remainder_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { scalar_t r = a % b; if (r != 0 && c10::signs_differ(r, b)) { r += b; } return r; }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "remainder_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t { auto mod = ::fmod(a, b); if (mod != 0 && c10::signs_differ(b, mod)) { mod += b; } return mod; }); }); } } void fmod_kernel_cuda(TensorIteratorBase& iter) { if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) { AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "fmod_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a % b; }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.common_dtype(), "fmod_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t { return ::fmod(a, b); }); }); } } REGISTER_DISPATCH(remainder_stub, &remainder_kernel_cuda); REGISTER_DISPATCH(fmod_stub, &fmod_kernel_cuda); }} // namespace at::native
e5ed4c4293c8b808c8fbfa7fd88f10cb35e3da72.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/BinaryOps.h> #include <ATen/native/TensorIterator.h> #include <c10/util/TypeSafeSignMath.h> #include <type_traits> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void remainder_kernel_cuda(TensorIteratorBase& iter) { if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) { AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "remainder_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { scalar_t r = a % b; if (r != 0 && c10::signs_differ(r, b)) { r += b; } return r; }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "remainder_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t { auto mod = ::fmod(a, b); if (mod != 0 && c10::signs_differ(b, mod)) { mod += b; } return mod; }); }); } } void fmod_kernel_cuda(TensorIteratorBase& iter) { if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) { AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "fmod_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a % b; }); }); } else { AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.common_dtype(), "fmod_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t { return ::fmod(a, b); }); }); } } REGISTER_DISPATCH(remainder_stub, &remainder_kernel_cuda); REGISTER_DISPATCH(fmod_stub, &fmod_kernel_cuda); }} // namespace at::native
c469e2fff5830c61bc46c7bd6c4d38ad709dda6e.hip
// !!! This is a file automatically generated by hipify!!! // Puts everything together // For now, just run V times. // Optimizations: // -come up with good stopping criteria [done] // -start from i=1 [done] // -test whether float really are faster than ints // -distributed idea // -change nthread [done - doesn't work] #include <cstdlib> #include <stdio.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <hipsparse.h> #include <moderngpu.cuh> #include <util.cuh> #include <sssp.cuh> #include <string.h> #include <testBfs.cpp> #include <testSssp.cpp> void runSssp(int argc, char**argv) { int m, n, edge; mgpu::ContextPtr context = mgpu::CreateCudaDevice(0); // Define what filetype edge value should be stored typedef float Value; // File i/o // 1. Open file from command-line // -source 1 freopen(argv[1],"r",stdin); int source; int device; float delta; bool undirected = false; if( parseArgs( argc, argv, source, device, delta, undirected )==true ) { printf( "Usage: test apple.mtx -source 5\n"); return; } //hipSetDevice(device); printf("Testing %s from source %d\n", argv[1], source); // 2. Reads in number of edges, number of nodes readEdge( m, n, edge, stdin ); printf("Graph has %d nodes, %d edges\n", m, edge); // 3. Allocate memory depending on how many edges are present Value *h_csrValA; int *h_csrRowPtrA, *h_csrColIndA, *h_cooRowIndA; float *h_ssspResult, *h_ssspResultCPU; h_csrValA = (Value*)malloc(edge*sizeof(Value)); h_csrRowPtrA = (int*)malloc((m+1)*sizeof(int)); h_csrColIndA = (int*)malloc(edge*sizeof(int)); h_cooRowIndA = (int*)malloc(edge*sizeof(int)); h_ssspResult = (float*)malloc((m)*sizeof(float)); h_ssspResultCPU = (float*)malloc((m)*sizeof(float)); // 4. Read in graph from .mtx file readMtx<Value>( edge, h_csrColIndA, h_cooRowIndA, h_csrValA ); print_array( h_cooRowIndA, m ); // 5. Allocate GPU memory Value *d_csrValA; int *d_csrRowPtrA, *d_csrColIndA, *d_cooRowIndA; Value *d_cscValA; int *d_cscRowIndA, *d_cscColPtrA; float *d_ssspResult; hipMalloc(&d_ssspResult, m*sizeof(float)); hipMalloc(&d_csrValA, edge*sizeof(Value)); hipMalloc(&d_csrRowPtrA, (m+1)*sizeof(int)); hipMalloc(&d_csrColIndA, edge*sizeof(int)); hipMalloc(&d_cooRowIndA, edge*sizeof(int)); hipMalloc(&d_cscValA, edge*sizeof(Value)); hipMalloc(&d_cscRowIndA, edge*sizeof(int)); hipMalloc(&d_cscColPtrA, (m+1)*sizeof(int)); // 6. Copy data from host to device hipMemcpy(d_csrValA, h_csrValA, (edge)*sizeof(Value),hipMemcpyHostToDevice); hipMemcpy(d_csrColIndA, h_csrColIndA, (edge)*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(d_cooRowIndA, h_cooRowIndA, (edge)*sizeof(int),hipMemcpyHostToDevice); // 7. Run COO -> CSR kernel coo2csr( d_cooRowIndA, edge, m, d_csrRowPtrA ); // 8. Run SSSP on CPU. Need data in CSR form first. hipMemcpy(h_csrRowPtrA,d_csrRowPtrA,(m+1)*sizeof(int),hipMemcpyDeviceToHost); int depth = 1000; ssspCPU( source, m, h_csrRowPtrA, h_csrColIndA, h_csrValA, h_ssspResultCPU, depth ); print_end_interesting(h_ssspResultCPU, m); // Verify SSSP CPU with BFS CPU. depth = bfsCPU<float>( source, m, h_csrRowPtrA, h_csrColIndA, h_ssspResult, 1000); //ssspBoost( source, m, edge, h_csrRowPtrA, h_csrColIndA, h_csrValA, h_ssspResult, 1000); //verify<float>( m, h_ssspResultCPU, h_ssspResult ); // Make two GPU timers GpuTimer gpu_timer; GpuTimer gpu_timer2; float elapsed = 0.0f; float elapsed2 = 0.0f; gpu_timer.Start(); // 9. Run CSR -> CSC kernel csr2csc<Value>( m, edge, d_csrValA, d_csrRowPtrA, d_csrColIndA, d_cscValA, d_cscRowIndA, d_cscColPtrA ); gpu_timer.Stop(); gpu_timer2.Start(); // 10. Run SSSP kernel on GPU //sssp<Value>( source, edge, m, d_csrValA, d_cscColPtrA, d_cscRowIndA, d_ssspResult, depth, *context ); sssp<Value>( source, edge, m, d_csrValA, d_csrRowPtrA, d_csrColIndA, d_ssspResult, depth, *context ); gpu_timer2.Stop(); elapsed += gpu_timer.ElapsedMillis(); elapsed2 += gpu_timer2.ElapsedMillis(); printf("CSR->CSC finished in %f msec. performed %d iterations\n", elapsed, depth-1); //printf("GPU SSSP finished in %f msec. not including transpose\n", elapsed2); hipMemcpy(h_csrColIndA, d_csrColIndA, edge*sizeof(int), hipMemcpyDeviceToHost); print_array(h_csrColIndA, m); // Compare with CPU SSSP for errors hipMemcpy(h_ssspResult,d_ssspResult,m*sizeof(float),hipMemcpyDeviceToHost); verify<float>( m, h_ssspResult, h_ssspResultCPU ); print_array(h_ssspResult, m); // Compare with SpMV for errors //bfs( 0, edge, m, d_cscColPtrA, d_cscRowIndA, d_bfsResult, depth, *context); //hipMemcpy(h_bfsResult,d_bfsResult,m*sizeof(int),hipMemcpyDeviceToHost); //verify<int>( m, h_bfsResult, h_bfsResultCPU ); //print_array(h_bfsResult, m); hipFree(d_csrValA); hipFree(d_csrRowPtrA); hipFree(d_csrColIndA); hipFree(d_cooRowIndA); hipFree(d_cscValA); hipFree(d_cscRowIndA); hipFree(d_cscColPtrA); hipFree(d_ssspResult); free(h_csrValA); free(h_csrRowPtrA); free(h_csrColIndA); free(h_cooRowIndA); free(h_ssspResult); free(h_ssspResultCPU); //free(h_cscValA); //free(h_cscRowIndA); //free(h_cscColPtrA);*/ } int main(int argc, char**argv) { runSssp(argc, argv); }
c469e2fff5830c61bc46c7bd6c4d38ad709dda6e.cu
// Puts everything together // For now, just run V times. // Optimizations: // -come up with good stopping criteria [done] // -start from i=1 [done] // -test whether float really are faster than ints // -distributed idea // -change nthread [done - doesn't work] #include <cstdlib> #include <stdio.h> #include <cuda_runtime_api.h> #include <cuda.h> #include <cusparse.h> #include <moderngpu.cuh> #include <util.cuh> #include <sssp.cuh> #include <string.h> #include <testBfs.cpp> #include <testSssp.cpp> void runSssp(int argc, char**argv) { int m, n, edge; mgpu::ContextPtr context = mgpu::CreateCudaDevice(0); // Define what filetype edge value should be stored typedef float Value; // File i/o // 1. Open file from command-line // -source 1 freopen(argv[1],"r",stdin); int source; int device; float delta; bool undirected = false; if( parseArgs( argc, argv, source, device, delta, undirected )==true ) { printf( "Usage: test apple.mtx -source 5\n"); return; } //cudaSetDevice(device); printf("Testing %s from source %d\n", argv[1], source); // 2. Reads in number of edges, number of nodes readEdge( m, n, edge, stdin ); printf("Graph has %d nodes, %d edges\n", m, edge); // 3. Allocate memory depending on how many edges are present Value *h_csrValA; int *h_csrRowPtrA, *h_csrColIndA, *h_cooRowIndA; float *h_ssspResult, *h_ssspResultCPU; h_csrValA = (Value*)malloc(edge*sizeof(Value)); h_csrRowPtrA = (int*)malloc((m+1)*sizeof(int)); h_csrColIndA = (int*)malloc(edge*sizeof(int)); h_cooRowIndA = (int*)malloc(edge*sizeof(int)); h_ssspResult = (float*)malloc((m)*sizeof(float)); h_ssspResultCPU = (float*)malloc((m)*sizeof(float)); // 4. Read in graph from .mtx file readMtx<Value>( edge, h_csrColIndA, h_cooRowIndA, h_csrValA ); print_array( h_cooRowIndA, m ); // 5. Allocate GPU memory Value *d_csrValA; int *d_csrRowPtrA, *d_csrColIndA, *d_cooRowIndA; Value *d_cscValA; int *d_cscRowIndA, *d_cscColPtrA; float *d_ssspResult; cudaMalloc(&d_ssspResult, m*sizeof(float)); cudaMalloc(&d_csrValA, edge*sizeof(Value)); cudaMalloc(&d_csrRowPtrA, (m+1)*sizeof(int)); cudaMalloc(&d_csrColIndA, edge*sizeof(int)); cudaMalloc(&d_cooRowIndA, edge*sizeof(int)); cudaMalloc(&d_cscValA, edge*sizeof(Value)); cudaMalloc(&d_cscRowIndA, edge*sizeof(int)); cudaMalloc(&d_cscColPtrA, (m+1)*sizeof(int)); // 6. Copy data from host to device cudaMemcpy(d_csrValA, h_csrValA, (edge)*sizeof(Value),cudaMemcpyHostToDevice); cudaMemcpy(d_csrColIndA, h_csrColIndA, (edge)*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_cooRowIndA, h_cooRowIndA, (edge)*sizeof(int),cudaMemcpyHostToDevice); // 7. Run COO -> CSR kernel coo2csr( d_cooRowIndA, edge, m, d_csrRowPtrA ); // 8. Run SSSP on CPU. Need data in CSR form first. cudaMemcpy(h_csrRowPtrA,d_csrRowPtrA,(m+1)*sizeof(int),cudaMemcpyDeviceToHost); int depth = 1000; ssspCPU( source, m, h_csrRowPtrA, h_csrColIndA, h_csrValA, h_ssspResultCPU, depth ); print_end_interesting(h_ssspResultCPU, m); // Verify SSSP CPU with BFS CPU. depth = bfsCPU<float>( source, m, h_csrRowPtrA, h_csrColIndA, h_ssspResult, 1000); //ssspBoost( source, m, edge, h_csrRowPtrA, h_csrColIndA, h_csrValA, h_ssspResult, 1000); //verify<float>( m, h_ssspResultCPU, h_ssspResult ); // Make two GPU timers GpuTimer gpu_timer; GpuTimer gpu_timer2; float elapsed = 0.0f; float elapsed2 = 0.0f; gpu_timer.Start(); // 9. Run CSR -> CSC kernel csr2csc<Value>( m, edge, d_csrValA, d_csrRowPtrA, d_csrColIndA, d_cscValA, d_cscRowIndA, d_cscColPtrA ); gpu_timer.Stop(); gpu_timer2.Start(); // 10. Run SSSP kernel on GPU //sssp<Value>( source, edge, m, d_csrValA, d_cscColPtrA, d_cscRowIndA, d_ssspResult, depth, *context ); sssp<Value>( source, edge, m, d_csrValA, d_csrRowPtrA, d_csrColIndA, d_ssspResult, depth, *context ); gpu_timer2.Stop(); elapsed += gpu_timer.ElapsedMillis(); elapsed2 += gpu_timer2.ElapsedMillis(); printf("CSR->CSC finished in %f msec. performed %d iterations\n", elapsed, depth-1); //printf("GPU SSSP finished in %f msec. not including transpose\n", elapsed2); cudaMemcpy(h_csrColIndA, d_csrColIndA, edge*sizeof(int), cudaMemcpyDeviceToHost); print_array(h_csrColIndA, m); // Compare with CPU SSSP for errors cudaMemcpy(h_ssspResult,d_ssspResult,m*sizeof(float),cudaMemcpyDeviceToHost); verify<float>( m, h_ssspResult, h_ssspResultCPU ); print_array(h_ssspResult, m); // Compare with SpMV for errors //bfs( 0, edge, m, d_cscColPtrA, d_cscRowIndA, d_bfsResult, depth, *context); //cudaMemcpy(h_bfsResult,d_bfsResult,m*sizeof(int),cudaMemcpyDeviceToHost); //verify<int>( m, h_bfsResult, h_bfsResultCPU ); //print_array(h_bfsResult, m); cudaFree(d_csrValA); cudaFree(d_csrRowPtrA); cudaFree(d_csrColIndA); cudaFree(d_cooRowIndA); cudaFree(d_cscValA); cudaFree(d_cscRowIndA); cudaFree(d_cscColPtrA); cudaFree(d_ssspResult); free(h_csrValA); free(h_csrRowPtrA); free(h_csrColIndA); free(h_cooRowIndA); free(h_ssspResult); free(h_ssspResultCPU); //free(h_cscValA); //free(h_cscRowIndA); //free(h_cscColPtrA);*/ } int main(int argc, char**argv) { runSssp(argc, argv); }
dec84eb8fdbbd428e8ffb5e0775fcd0958f7364b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "quantize_linear.cuh" #include <limits> #include "core/providers/cuda/cu_inc/common.cuh" #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11080 #include "cuda_fp8.h" #endif namespace onnxruntime { namespace cuda { template <typename InT, typename OutT> struct RoundStd; template <typename InT, typename OutT> struct RoundSat; template <> struct RoundStd<float, int8_t> { __device__ __forceinline__ int8_t operator()(float v, float scale, int8_t zero_point) const { int value = __float2int_rn(v / scale) + zero_point; return static_cast<int8_t>(max(std::numeric_limits<int8_t>::min(), min(std::numeric_limits<int8_t>::max(), value))); } }; template <> struct RoundStd<float, uint8_t> { __device__ __forceinline__ uint8_t operator()(float v, float scale, uint8_t zero_point) const { int value = __float2int_rn(v / scale) + zero_point; return static_cast<uint8_t>(max(std::numeric_limits<uint8_t>::min(), min(std::numeric_limits<uint8_t>::max(), value))); } }; #if !defined(DISABLE_FLOAT8_TYPES) #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11080 // Conversion from float 8 to float or float16 does not need zero_point argument as defined by onnx standard. template <> struct RoundSat<float, Float8E4M3FN> { __device__ __forceinline__ Float8E4M3FN operator()(float v, float scale, Float8E4M3FN /* zero_point */, bool saturate) const { return Float8E4M3FN(static_cast<unsigned char>(__nv_cvt_float_to_fp8(v / scale, saturate ? __NV_SATFINITE : __NV_NOSAT, __NV_E4M3)), Float8E4M3FN::FromBits()); } }; template <> struct RoundSat<half, Float8E4M3FN> { __device__ __forceinline__ Float8E4M3FN operator()(half v, half scale, Float8E4M3FN /* zero_point */, bool saturate) const { return Float8E4M3FN(static_cast<unsigned char>(__nv_cvt_halfraw_to_fp8(v / scale, saturate ? __NV_SATFINITE : __NV_NOSAT, __NV_E4M3)), Float8E4M3FN::FromBits()); } }; template <> struct RoundSat<float, Float8E5M2> { __device__ __forceinline__ Float8E5M2 operator()(float v, float scale, Float8E5M2 /* zero_point */, bool saturate) const { return Float8E5M2(static_cast<unsigned char>(__nv_cvt_float_to_fp8(v / scale, saturate ? __NV_SATFINITE : __NV_NOSAT, __NV_E5M2)), Float8E5M2::FromBits()); } }; template <> struct RoundSat<half, Float8E5M2> { __device__ __forceinline__ Float8E5M2 operator()(half v, half scale, Float8E5M2 /* zero_point */, bool saturate) const { return Float8E5M2(static_cast<unsigned char>(__nv_cvt_halfraw_to_fp8(v / scale, saturate ? __NV_SATFINITE : __NV_NOSAT, __NV_E5M2)), Float8E5M2::FromBits()); } }; #else // Conversion from float 8 to float or float16 does not need zero_point argument as defined by onnx standard. template <> struct RoundSat<float, Float8E4M3FN> { __device__ __forceinline__ Float8E4M3FN operator()(float v, float scale, Float8E4M3FN /* zero_point */, bool saturate) const { return Float8E4M3FN(v / scale, saturate); } }; template <> struct RoundSat<half, Float8E4M3FN> { __device__ __forceinline__ Float8E4M3FN operator()(half v, half scale, Float8E4M3FN /* zero_point */, bool saturate) const { return Float8E4M3FN(__half2float(v / scale), true); } }; template <> struct RoundSat<float, Float8E5M2> { __device__ __forceinline__ Float8E5M2 operator()(float v, float scale, Float8E5M2 /* zero_point */, bool saturate) const { return Float8E5M2(v / scale, saturate); } }; template <> struct RoundSat<half, Float8E5M2> { __device__ __forceinline__ Float8E5M2 operator()(half v, half scale, Float8E5M2 /* zero_point */, bool saturate) const { return Float8E5M2(__half2float(v / scale), saturate); } }; #endif #endif template <> struct RoundStd<half, int8_t> { __device__ __forceinline__ int8_t operator()(half v, half scale, int8_t zero_point) const { int value = __half2int_rn(v / scale) + zero_point; return static_cast<int8_t>(max(std::numeric_limits<int8_t>::min(), min(std::numeric_limits<int8_t>::max(), value))); } }; template <> struct RoundStd<half, uint8_t> { __device__ __forceinline__ int8_t operator()(half v, half scale, uint8_t zero_point) const { int value = __half2int_rn(v / scale) + zero_point; return static_cast<uint8_t>(max(std::numeric_limits<uint8_t>::min(), min(std::numeric_limits<uint8_t>::max(), value))); } }; template <int NumThreadsPerBlock, int NumElementsPerThread, typename OutT, typename InT> __global__ void QuantizeLinearKernelStd(const InT* input, OutT* output, const InT* scale_ptr, const OutT* zero_point_ptr, CUDA_LONG N, RoundStd<InT, OutT> round) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; InT scale = *scale_ptr; OutT zero_point = zero_point_ptr != nullptr ? *zero_point_ptr : static_cast<OutT>(0); #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output[id] = round(input[id], scale, zero_point); id += NumThreadsPerBlock; } } } template <int NumThreadsPerBlock, int NumElementsPerThread, typename OutT, typename InT> __global__ void QuantizeLinearKernelAxisStd(const InT* input, OutT* output, const InT* scale_ptr, const OutT* zero_point_ptr, CUDA_LONG N, size_t batch_size, size_t n_scales, RoundStd<InT, OutT> round) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; // The scale needs to change every n_same_scale. CUDA_LONG n_same_scale = N / (batch_size * n_scales); int scale_id; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { scale_id = (id / n_same_scale) % n_scales; output[id] = round(input[id], scale_ptr[scale_id], zero_point_ptr == nullptr ? static_cast<OutT>(0) : zero_point_ptr[scale_id]); id += NumThreadsPerBlock; } } } #if !defined(DISABLE_FLOAT8_TYPES) template <int NumThreadsPerBlock, int NumElementsPerThread, typename OutT, typename InT> __global__ void QuantizeLinearKernelSat(const InT* input, OutT* output, const InT* scale_ptr, const OutT* zero_point_ptr, CUDA_LONG N, RoundSat<InT, OutT> round, bool saturate) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; InT scale = *scale_ptr; OutT zero_point = zero_point_ptr != nullptr ? *zero_point_ptr : OutT(0, true); #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output[id] = round(input[id], scale, zero_point, saturate); id += NumThreadsPerBlock; } } } template <int NumThreadsPerBlock, int NumElementsPerThread, typename OutT, typename InT> __global__ void QuantizeLinearKernelAxisSat(const InT* input, OutT* output, const InT* scale_ptr, const OutT* zero_point_ptr, CUDA_LONG N, size_t batch_size, size_t n_scales, RoundSat<InT, OutT> round, bool saturate) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; // The scale needs to change every n_same_scale. CUDA_LONG n_same_scale = N / (batch_size * n_scales); int scale_id; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { scale_id = (id / n_same_scale) % n_scales; output[id] = round(input[id], scale_ptr[scale_id], zero_point_ptr == nullptr ? OutT(0, true) : zero_point_ptr[scale_id], saturate); id += NumThreadsPerBlock; } } } #endif template <class OutT, class InT> Status CudaQuantizeLinearStd(hipStream_t stream, const InT* input, OutT* output, const InT* scale, const OutT* zero_point, size_t num_of_element) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); hipLaunchKernelGGL(( QuantizeLinearKernelStd<GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input, output, scale, zero_point, static_cast<int>(num_of_element), RoundStd<InT, OutT>()); return Status::OK(); } template <class OutT, class InT> Status CudaQuantizeLinearAxisStd(hipStream_t stream, const InT* input, OutT* output, const InT* scale, const OutT* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); hipLaunchKernelGGL(( QuantizeLinearKernelAxisStd<GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input, output, scale, zero_point, static_cast<int>(num_of_element), batch_size, n_scales, RoundStd<InT, OutT>()); return Status::OK(); } #if !defined(DISABLE_FLOAT8_TYPES) template <class OutT, class InT> Status CudaQuantizeLinearSat(hipStream_t stream, const InT* input, OutT* output, const InT* scale, const OutT* zero_point, size_t num_of_element, bool saturate) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); hipLaunchKernelGGL(( QuantizeLinearKernelSat<GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input, output, scale, zero_point, static_cast<int>(num_of_element), RoundSat<InT, OutT>(), saturate); return Status::OK(); } template <class OutT, class InT> Status CudaQuantizeLinearAxisSat(hipStream_t stream, const InT* input, OutT* output, const InT* scale, const OutT* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales, bool saturate) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); hipLaunchKernelGGL(( QuantizeLinearKernelAxisSat<GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input, output, scale, zero_point, static_cast<int>(num_of_element), batch_size, n_scales, RoundSat<InT, OutT>(), saturate); return Status::OK(); } #endif template <class InT, class OutT, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void DequantizeLinearKernelStd(const InT* input, OutT* output, const OutT* scale_ptr, const InT* zero_point_ptr, CUDA_LONG N) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; OutT scale = *scale_ptr; InT zero_point = zero_point_ptr != nullptr ? *zero_point_ptr : static_cast<InT>(0); #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output[id] = static_cast<OutT>(input[id] - zero_point) * scale; id += NumThreadsPerBlock; } } } template <class InT, class OutT, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void DequantizeLinearKernelAxisStd(const InT* input, OutT* output, const OutT* scale_ptr, const InT* zero_point_ptr, CUDA_LONG N, size_t batch_size, size_t n_scales) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; // The scale needs to change every n_same_scale. CUDA_LONG n_same_scale = N / (batch_size * n_scales); int scale_id; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { scale_id = (id / n_same_scale) % n_scales; output[id] = (zero_point_ptr == nullptr ? static_cast<OutT>(input[id]) : static_cast<OutT>(input[id] - zero_point_ptr[scale_id])) * scale_ptr[scale_id]; id += NumThreadsPerBlock; } } } template <typename InT, typename OutT> struct DQFloat8; #if !defined(DISABLE_FLOAT8_TYPES) #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11080 template <> struct DQFloat8<Float8E4M3FN, half> { __device__ __forceinline__ half operator()(Float8E4M3FN v, half scale) const { return __nv_cvt_fp8_to_halfraw(v.val, __NV_E4M3) * scale; } }; template <> struct DQFloat8<Float8E5M2, half> { __device__ __forceinline__ half operator()(Float8E5M2 v, half scale) const { return __nv_cvt_fp8_to_halfraw(v.val, __NV_E5M2) * scale; } }; template <> struct DQFloat8<Float8E4M3FN, float> { __device__ __forceinline__ float operator()(Float8E4M3FN v, float scale) const { return __half2float(__nv_cvt_fp8_to_halfraw(v.val, __NV_E4M3)) * scale; } }; template <> struct DQFloat8<Float8E5M2, float> { __device__ __forceinline__ float operator()(Float8E5M2 v, float scale) const { return __half2float(__nv_cvt_fp8_to_halfraw(v.val, __NV_E5M2)) * scale; } }; #else template <> struct DQFloat8<Float8E4M3FN, half> { __device__ __forceinline__ half operator()(Float8E4M3FN v, half scale) const { return __float2half(v.ToFloat()) * scale; } }; template <> struct DQFloat8<Float8E5M2, half> { __device__ __forceinline__ half operator()(Float8E5M2 v, half scale) const { return __float2half(v.ToFloat()) * scale; } }; template <> struct DQFloat8<Float8E4M3FN, float> { __device__ __forceinline__ float operator()(Float8E4M3FN v, float scale) const { return v.ToFloat() * scale; } }; template <> struct DQFloat8<Float8E5M2, float> { __device__ __forceinline__ float operator()(Float8E5M2 v, float scale) const { return v.ToFloat() * scale; } }; #endif #endif template <class InT, class OutT, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void DequantizeLinearKernelSat(const InT* input, OutT* output, const OutT* scale_ptr, const InT* zero_point_ptr, CUDA_LONG N) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; OutT scale = *scale_ptr; // zero_point is unused. // InT zero_point = zero_point_ptr != nullptr ? *zero_point_ptr : InT(0, true); #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output[id] = DQFloat8<InT, OutT>()(input[id], scale); id += NumThreadsPerBlock; } } } #if !defined(DISABLE_FLOAT8_TYPES) template <class InT, class OutT, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void DequantizeLinearKernelAxisSat(const InT* input, OutT* output, const OutT* scale_ptr, const InT* zero_point_ptr, CUDA_LONG N, size_t batch_size, size_t n_scales) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; // The scale needs to change every n_same_scale. CUDA_LONG n_same_scale = N / (batch_size * n_scales); int scale_id; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { scale_id = (id / n_same_scale) % n_scales; output[id] = DQFloat8<InT, OutT>()(input[id], scale_ptr[scale_id]); id += NumThreadsPerBlock; } } } #endif template <class InT, class OutT> Status CudaDequantizeLinearStd(hipStream_t stream, const InT* input, OutT* output, const OutT* scale, const InT* zero_point, size_t num_of_element) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); hipLaunchKernelGGL(( DequantizeLinearKernelStd<InT, OutT, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input, output, scale, zero_point, static_cast<int>(num_of_element)); return Status::OK(); } template <class InT, class OutT> Status CudaDequantizeLinearAxisStd(hipStream_t stream, const InT* input, OutT* output, const OutT* scale, const InT* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); hipLaunchKernelGGL(( DequantizeLinearKernelAxisStd<InT, OutT, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input, output, scale, zero_point, static_cast<int>(num_of_element), batch_size, n_scales); return Status::OK(); } #if !defined(DISABLE_FLOAT8_TYPES) template <class InT, class OutT> Status CudaDequantizeLinearSat(hipStream_t stream, const InT* input, OutT* output, const OutT* scale, const InT* zero_point, size_t num_of_element) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); hipLaunchKernelGGL(( DequantizeLinearKernelSat<InT, OutT, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input, output, scale, zero_point, static_cast<int>(num_of_element)); return Status::OK(); } template <class InT, class OutT> Status CudaDequantizeLinearAxisSat(hipStream_t stream, const InT* input, OutT* output, const OutT* scale, const InT* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); hipLaunchKernelGGL(( DequantizeLinearKernelAxisSat<InT, OutT, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input, output, scale, zero_point, static_cast<int>(num_of_element), batch_size, n_scales); return Status::OK(); } #endif template Status CudaQuantizeLinearStd<int8_t, float>(hipStream_t stream, const float* input, int8_t* output, const float* scale, const int8_t* zero_point, size_t num_of_element); template Status CudaQuantizeLinearStd<uint8_t, float>(hipStream_t stream, const float* input, uint8_t* output, const float* scale, const uint8_t* zero_point, size_t num_of_element); template Status CudaQuantizeLinearStd<int8_t, half>(hipStream_t stream, const half* input, int8_t* output, const half* scale, const int8_t* zero_point, size_t num_of_element); template Status CudaQuantizeLinearStd<uint8_t, half>(hipStream_t stream, const half* input, uint8_t* output, const half* scale, const uint8_t* zero_point, size_t num_of_element); template Status CudaQuantizeLinearAxisStd<int8_t, float>(hipStream_t stream, const float* input, int8_t* output, const float* scale, const int8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaQuantizeLinearAxisStd<uint8_t, float>(hipStream_t stream, const float* input, uint8_t* output, const float* scale, const uint8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaQuantizeLinearAxisStd<int8_t, half>(hipStream_t stream, const half* input, int8_t* output, const half* scale, const int8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaQuantizeLinearAxisStd<uint8_t, half>(hipStream_t stream, const half* input, uint8_t* output, const half* scale, const uint8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); #if !defined(DISABLE_FLOAT8_TYPES) template Status CudaQuantizeLinearSat<Float8E4M3FN, float>(hipStream_t stream, const float* input, Float8E4M3FN* output, const float* scale, const Float8E4M3FN* zero_point, size_t num_of_element, bool saturate); template Status CudaQuantizeLinearSat<Float8E5M2, float>(hipStream_t stream, const float* input, Float8E5M2* output, const float* scale, const Float8E5M2* zero_point, size_t num_of_element, bool saturate); template Status CudaQuantizeLinearSat<Float8E4M3FN, half>(hipStream_t stream, const half* input, Float8E4M3FN* output, const half* scale, const Float8E4M3FN* zero_point, size_t num_of_element, bool saturate); template Status CudaQuantizeLinearSat<Float8E5M2, half>(hipStream_t stream, const half* input, Float8E5M2* output, const half* scale, const Float8E5M2* zero_point, size_t num_of_element, bool saturate); template Status CudaQuantizeLinearAxisSat<Float8E4M3FN, float>(hipStream_t stream, const float* input, Float8E4M3FN* output, const float* scale, const Float8E4M3FN* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales, bool saturate); template Status CudaQuantizeLinearAxisSat<Float8E5M2, float>(hipStream_t stream, const float* input, Float8E5M2* output, const float* scale, const Float8E5M2* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales, bool saturate); template Status CudaQuantizeLinearAxisSat<Float8E4M3FN, half>(hipStream_t stream, const half* input, Float8E4M3FN* output, const half* scale, const Float8E4M3FN* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales, bool saturate); template Status CudaQuantizeLinearAxisSat<Float8E5M2, half>(hipStream_t stream, const half* input, Float8E5M2* output, const half* scale, const Float8E5M2* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales, bool saturate); #endif template Status CudaDequantizeLinearStd<int8_t, float>(hipStream_t stream, const int8_t* input, float* output, const float* scale, const int8_t* zero_point, size_t num_of_element); template Status CudaDequantizeLinearStd<uint8_t, float>(hipStream_t stream, const uint8_t* input, float* output, const float* scale, const uint8_t* zero_point, size_t num_of_element); template Status CudaDequantizeLinearStd<int8_t, half>(hipStream_t stream, const int8_t* input, half* output, const half* scale, const int8_t* zero_point, size_t num_of_element); template Status CudaDequantizeLinearStd<uint8_t, half>(hipStream_t stream, const uint8_t* input, half* output, const half* scale, const uint8_t* zero_point, size_t num_of_element); template Status CudaDequantizeLinearAxisStd<int8_t, float>(hipStream_t stream, const int8_t* input, float* output, const float* scale, const int8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisStd<uint8_t, float>(hipStream_t stream, const uint8_t* input, float* output, const float* scale, const uint8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisStd<int8_t, half>(hipStream_t stream, const int8_t* input, half* output, const half* scale, const int8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisStd<uint8_t, half>(hipStream_t stream, const uint8_t* input, half* output, const half* scale, const uint8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); #if !defined(DISABLE_FLOAT8_TYPES) template Status CudaDequantizeLinearSat<Float8E4M3FN, float>(hipStream_t stream, const Float8E4M3FN* input, float* output, const float* scale, const Float8E4M3FN* zero_point, size_t num_of_element); template Status CudaDequantizeLinearSat<Float8E5M2, float>(hipStream_t stream, const Float8E5M2* input, float* output, const float* scale, const Float8E5M2* zero_point, size_t num_of_element); template Status CudaDequantizeLinearSat<Float8E4M3FN, half>(hipStream_t stream, const Float8E4M3FN* input, half* output, const half* scale, const Float8E4M3FN* zero_point, size_t num_of_element); template Status CudaDequantizeLinearSat<Float8E5M2, half>(hipStream_t stream, const Float8E5M2* input, half* output, const half* scale, const Float8E5M2* zero_point, size_t num_of_element); template Status CudaDequantizeLinearAxisSat<Float8E4M3FN, float>(hipStream_t stream, const Float8E4M3FN* input, float* output, const float* scale, const Float8E4M3FN* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisSat<Float8E5M2, float>(hipStream_t stream, const Float8E5M2* input, float* output, const float* scale, const Float8E5M2* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisSat<Float8E4M3FN, half>(hipStream_t stream, const Float8E4M3FN* input, half* output, const half* scale, const Float8E4M3FN* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisSat<Float8E5M2, half>(hipStream_t stream, const Float8E5M2* input, half* output, const half* scale, const Float8E5M2* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); #endif } // namespace cuda } // namespace onnxruntime
dec84eb8fdbbd428e8ffb5e0775fcd0958f7364b.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "quantize_linear.cuh" #include <limits> #include "core/providers/cuda/cu_inc/common.cuh" #if defined(CUDA_VERSION) && CUDA_VERSION >= 11080 #include "cuda_fp8.h" #endif namespace onnxruntime { namespace cuda { template <typename InT, typename OutT> struct RoundStd; template <typename InT, typename OutT> struct RoundSat; template <> struct RoundStd<float, int8_t> { __device__ __forceinline__ int8_t operator()(float v, float scale, int8_t zero_point) const { int value = __float2int_rn(v / scale) + zero_point; return static_cast<int8_t>(max(std::numeric_limits<int8_t>::min(), min(std::numeric_limits<int8_t>::max(), value))); } }; template <> struct RoundStd<float, uint8_t> { __device__ __forceinline__ uint8_t operator()(float v, float scale, uint8_t zero_point) const { int value = __float2int_rn(v / scale) + zero_point; return static_cast<uint8_t>(max(std::numeric_limits<uint8_t>::min(), min(std::numeric_limits<uint8_t>::max(), value))); } }; #if !defined(DISABLE_FLOAT8_TYPES) #if defined(CUDA_VERSION) && CUDA_VERSION >= 11080 // Conversion from float 8 to float or float16 does not need zero_point argument as defined by onnx standard. template <> struct RoundSat<float, Float8E4M3FN> { __device__ __forceinline__ Float8E4M3FN operator()(float v, float scale, Float8E4M3FN /* zero_point */, bool saturate) const { return Float8E4M3FN(static_cast<unsigned char>(__nv_cvt_float_to_fp8(v / scale, saturate ? __NV_SATFINITE : __NV_NOSAT, __NV_E4M3)), Float8E4M3FN::FromBits()); } }; template <> struct RoundSat<half, Float8E4M3FN> { __device__ __forceinline__ Float8E4M3FN operator()(half v, half scale, Float8E4M3FN /* zero_point */, bool saturate) const { return Float8E4M3FN(static_cast<unsigned char>(__nv_cvt_halfraw_to_fp8(v / scale, saturate ? __NV_SATFINITE : __NV_NOSAT, __NV_E4M3)), Float8E4M3FN::FromBits()); } }; template <> struct RoundSat<float, Float8E5M2> { __device__ __forceinline__ Float8E5M2 operator()(float v, float scale, Float8E5M2 /* zero_point */, bool saturate) const { return Float8E5M2(static_cast<unsigned char>(__nv_cvt_float_to_fp8(v / scale, saturate ? __NV_SATFINITE : __NV_NOSAT, __NV_E5M2)), Float8E5M2::FromBits()); } }; template <> struct RoundSat<half, Float8E5M2> { __device__ __forceinline__ Float8E5M2 operator()(half v, half scale, Float8E5M2 /* zero_point */, bool saturate) const { return Float8E5M2(static_cast<unsigned char>(__nv_cvt_halfraw_to_fp8(v / scale, saturate ? __NV_SATFINITE : __NV_NOSAT, __NV_E5M2)), Float8E5M2::FromBits()); } }; #else // Conversion from float 8 to float or float16 does not need zero_point argument as defined by onnx standard. template <> struct RoundSat<float, Float8E4M3FN> { __device__ __forceinline__ Float8E4M3FN operator()(float v, float scale, Float8E4M3FN /* zero_point */, bool saturate) const { return Float8E4M3FN(v / scale, saturate); } }; template <> struct RoundSat<half, Float8E4M3FN> { __device__ __forceinline__ Float8E4M3FN operator()(half v, half scale, Float8E4M3FN /* zero_point */, bool saturate) const { return Float8E4M3FN(__half2float(v / scale), true); } }; template <> struct RoundSat<float, Float8E5M2> { __device__ __forceinline__ Float8E5M2 operator()(float v, float scale, Float8E5M2 /* zero_point */, bool saturate) const { return Float8E5M2(v / scale, saturate); } }; template <> struct RoundSat<half, Float8E5M2> { __device__ __forceinline__ Float8E5M2 operator()(half v, half scale, Float8E5M2 /* zero_point */, bool saturate) const { return Float8E5M2(__half2float(v / scale), saturate); } }; #endif #endif template <> struct RoundStd<half, int8_t> { __device__ __forceinline__ int8_t operator()(half v, half scale, int8_t zero_point) const { int value = __half2int_rn(v / scale) + zero_point; return static_cast<int8_t>(max(std::numeric_limits<int8_t>::min(), min(std::numeric_limits<int8_t>::max(), value))); } }; template <> struct RoundStd<half, uint8_t> { __device__ __forceinline__ int8_t operator()(half v, half scale, uint8_t zero_point) const { int value = __half2int_rn(v / scale) + zero_point; return static_cast<uint8_t>(max(std::numeric_limits<uint8_t>::min(), min(std::numeric_limits<uint8_t>::max(), value))); } }; template <int NumThreadsPerBlock, int NumElementsPerThread, typename OutT, typename InT> __global__ void QuantizeLinearKernelStd(const InT* input, OutT* output, const InT* scale_ptr, const OutT* zero_point_ptr, CUDA_LONG N, RoundStd<InT, OutT> round) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; InT scale = *scale_ptr; OutT zero_point = zero_point_ptr != nullptr ? *zero_point_ptr : static_cast<OutT>(0); #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output[id] = round(input[id], scale, zero_point); id += NumThreadsPerBlock; } } } template <int NumThreadsPerBlock, int NumElementsPerThread, typename OutT, typename InT> __global__ void QuantizeLinearKernelAxisStd(const InT* input, OutT* output, const InT* scale_ptr, const OutT* zero_point_ptr, CUDA_LONG N, size_t batch_size, size_t n_scales, RoundStd<InT, OutT> round) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; // The scale needs to change every n_same_scale. CUDA_LONG n_same_scale = N / (batch_size * n_scales); int scale_id; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { scale_id = (id / n_same_scale) % n_scales; output[id] = round(input[id], scale_ptr[scale_id], zero_point_ptr == nullptr ? static_cast<OutT>(0) : zero_point_ptr[scale_id]); id += NumThreadsPerBlock; } } } #if !defined(DISABLE_FLOAT8_TYPES) template <int NumThreadsPerBlock, int NumElementsPerThread, typename OutT, typename InT> __global__ void QuantizeLinearKernelSat(const InT* input, OutT* output, const InT* scale_ptr, const OutT* zero_point_ptr, CUDA_LONG N, RoundSat<InT, OutT> round, bool saturate) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; InT scale = *scale_ptr; OutT zero_point = zero_point_ptr != nullptr ? *zero_point_ptr : OutT(0, true); #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output[id] = round(input[id], scale, zero_point, saturate); id += NumThreadsPerBlock; } } } template <int NumThreadsPerBlock, int NumElementsPerThread, typename OutT, typename InT> __global__ void QuantizeLinearKernelAxisSat(const InT* input, OutT* output, const InT* scale_ptr, const OutT* zero_point_ptr, CUDA_LONG N, size_t batch_size, size_t n_scales, RoundSat<InT, OutT> round, bool saturate) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; // The scale needs to change every n_same_scale. CUDA_LONG n_same_scale = N / (batch_size * n_scales); int scale_id; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { scale_id = (id / n_same_scale) % n_scales; output[id] = round(input[id], scale_ptr[scale_id], zero_point_ptr == nullptr ? OutT(0, true) : zero_point_ptr[scale_id], saturate); id += NumThreadsPerBlock; } } } #endif template <class OutT, class InT> Status CudaQuantizeLinearStd(cudaStream_t stream, const InT* input, OutT* output, const InT* scale, const OutT* zero_point, size_t num_of_element) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); QuantizeLinearKernelStd<GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( input, output, scale, zero_point, static_cast<int>(num_of_element), RoundStd<InT, OutT>()); return Status::OK(); } template <class OutT, class InT> Status CudaQuantizeLinearAxisStd(cudaStream_t stream, const InT* input, OutT* output, const InT* scale, const OutT* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); QuantizeLinearKernelAxisStd<GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( input, output, scale, zero_point, static_cast<int>(num_of_element), batch_size, n_scales, RoundStd<InT, OutT>()); return Status::OK(); } #if !defined(DISABLE_FLOAT8_TYPES) template <class OutT, class InT> Status CudaQuantizeLinearSat(cudaStream_t stream, const InT* input, OutT* output, const InT* scale, const OutT* zero_point, size_t num_of_element, bool saturate) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); QuantizeLinearKernelSat<GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( input, output, scale, zero_point, static_cast<int>(num_of_element), RoundSat<InT, OutT>(), saturate); return Status::OK(); } template <class OutT, class InT> Status CudaQuantizeLinearAxisSat(cudaStream_t stream, const InT* input, OutT* output, const InT* scale, const OutT* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales, bool saturate) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); QuantizeLinearKernelAxisSat<GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( input, output, scale, zero_point, static_cast<int>(num_of_element), batch_size, n_scales, RoundSat<InT, OutT>(), saturate); return Status::OK(); } #endif template <class InT, class OutT, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void DequantizeLinearKernelStd(const InT* input, OutT* output, const OutT* scale_ptr, const InT* zero_point_ptr, CUDA_LONG N) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; OutT scale = *scale_ptr; InT zero_point = zero_point_ptr != nullptr ? *zero_point_ptr : static_cast<InT>(0); #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output[id] = static_cast<OutT>(input[id] - zero_point) * scale; id += NumThreadsPerBlock; } } } template <class InT, class OutT, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void DequantizeLinearKernelAxisStd(const InT* input, OutT* output, const OutT* scale_ptr, const InT* zero_point_ptr, CUDA_LONG N, size_t batch_size, size_t n_scales) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; // The scale needs to change every n_same_scale. CUDA_LONG n_same_scale = N / (batch_size * n_scales); int scale_id; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { scale_id = (id / n_same_scale) % n_scales; output[id] = (zero_point_ptr == nullptr ? static_cast<OutT>(input[id]) : static_cast<OutT>(input[id] - zero_point_ptr[scale_id])) * scale_ptr[scale_id]; id += NumThreadsPerBlock; } } } template <typename InT, typename OutT> struct DQFloat8; #if !defined(DISABLE_FLOAT8_TYPES) #if defined(CUDA_VERSION) && CUDA_VERSION >= 11080 template <> struct DQFloat8<Float8E4M3FN, half> { __device__ __forceinline__ half operator()(Float8E4M3FN v, half scale) const { return __nv_cvt_fp8_to_halfraw(v.val, __NV_E4M3) * scale; } }; template <> struct DQFloat8<Float8E5M2, half> { __device__ __forceinline__ half operator()(Float8E5M2 v, half scale) const { return __nv_cvt_fp8_to_halfraw(v.val, __NV_E5M2) * scale; } }; template <> struct DQFloat8<Float8E4M3FN, float> { __device__ __forceinline__ float operator()(Float8E4M3FN v, float scale) const { return __half2float(__nv_cvt_fp8_to_halfraw(v.val, __NV_E4M3)) * scale; } }; template <> struct DQFloat8<Float8E5M2, float> { __device__ __forceinline__ float operator()(Float8E5M2 v, float scale) const { return __half2float(__nv_cvt_fp8_to_halfraw(v.val, __NV_E5M2)) * scale; } }; #else template <> struct DQFloat8<Float8E4M3FN, half> { __device__ __forceinline__ half operator()(Float8E4M3FN v, half scale) const { return __float2half(v.ToFloat()) * scale; } }; template <> struct DQFloat8<Float8E5M2, half> { __device__ __forceinline__ half operator()(Float8E5M2 v, half scale) const { return __float2half(v.ToFloat()) * scale; } }; template <> struct DQFloat8<Float8E4M3FN, float> { __device__ __forceinline__ float operator()(Float8E4M3FN v, float scale) const { return v.ToFloat() * scale; } }; template <> struct DQFloat8<Float8E5M2, float> { __device__ __forceinline__ float operator()(Float8E5M2 v, float scale) const { return v.ToFloat() * scale; } }; #endif #endif template <class InT, class OutT, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void DequantizeLinearKernelSat(const InT* input, OutT* output, const OutT* scale_ptr, const InT* zero_point_ptr, CUDA_LONG N) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; OutT scale = *scale_ptr; // zero_point is unused. // InT zero_point = zero_point_ptr != nullptr ? *zero_point_ptr : InT(0, true); #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output[id] = DQFloat8<InT, OutT>()(input[id], scale); id += NumThreadsPerBlock; } } } #if !defined(DISABLE_FLOAT8_TYPES) template <class InT, class OutT, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void DequantizeLinearKernelAxisSat(const InT* input, OutT* output, const OutT* scale_ptr, const InT* zero_point_ptr, CUDA_LONG N, size_t batch_size, size_t n_scales) { CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; // The scale needs to change every n_same_scale. CUDA_LONG n_same_scale = N / (batch_size * n_scales); int scale_id; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { scale_id = (id / n_same_scale) % n_scales; output[id] = DQFloat8<InT, OutT>()(input[id], scale_ptr[scale_id]); id += NumThreadsPerBlock; } } } #endif template <class InT, class OutT> Status CudaDequantizeLinearStd(cudaStream_t stream, const InT* input, OutT* output, const OutT* scale, const InT* zero_point, size_t num_of_element) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); DequantizeLinearKernelStd<InT, OutT, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( input, output, scale, zero_point, static_cast<int>(num_of_element)); return Status::OK(); } template <class InT, class OutT> Status CudaDequantizeLinearAxisStd(cudaStream_t stream, const InT* input, OutT* output, const OutT* scale, const InT* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); DequantizeLinearKernelAxisStd<InT, OutT, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( input, output, scale, zero_point, static_cast<int>(num_of_element), batch_size, n_scales); return Status::OK(); } #if !defined(DISABLE_FLOAT8_TYPES) template <class InT, class OutT> Status CudaDequantizeLinearSat(cudaStream_t stream, const InT* input, OutT* output, const OutT* scale, const InT* zero_point, size_t num_of_element) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); DequantizeLinearKernelSat<InT, OutT, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( input, output, scale, zero_point, static_cast<int>(num_of_element)); return Status::OK(); } template <class InT, class OutT> Status CudaDequantizeLinearAxisSat(cudaStream_t stream, const InT* input, OutT* output, const OutT* scale, const InT* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales) { if (num_of_element <= 0) return Status::OK(); int blocksPerGrid = static_cast<int>(CeilDiv(num_of_element, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); DequantizeLinearKernelAxisSat<InT, OutT, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( input, output, scale, zero_point, static_cast<int>(num_of_element), batch_size, n_scales); return Status::OK(); } #endif template Status CudaQuantizeLinearStd<int8_t, float>(cudaStream_t stream, const float* input, int8_t* output, const float* scale, const int8_t* zero_point, size_t num_of_element); template Status CudaQuantizeLinearStd<uint8_t, float>(cudaStream_t stream, const float* input, uint8_t* output, const float* scale, const uint8_t* zero_point, size_t num_of_element); template Status CudaQuantizeLinearStd<int8_t, half>(cudaStream_t stream, const half* input, int8_t* output, const half* scale, const int8_t* zero_point, size_t num_of_element); template Status CudaQuantizeLinearStd<uint8_t, half>(cudaStream_t stream, const half* input, uint8_t* output, const half* scale, const uint8_t* zero_point, size_t num_of_element); template Status CudaQuantizeLinearAxisStd<int8_t, float>(cudaStream_t stream, const float* input, int8_t* output, const float* scale, const int8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaQuantizeLinearAxisStd<uint8_t, float>(cudaStream_t stream, const float* input, uint8_t* output, const float* scale, const uint8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaQuantizeLinearAxisStd<int8_t, half>(cudaStream_t stream, const half* input, int8_t* output, const half* scale, const int8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaQuantizeLinearAxisStd<uint8_t, half>(cudaStream_t stream, const half* input, uint8_t* output, const half* scale, const uint8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); #if !defined(DISABLE_FLOAT8_TYPES) template Status CudaQuantizeLinearSat<Float8E4M3FN, float>(cudaStream_t stream, const float* input, Float8E4M3FN* output, const float* scale, const Float8E4M3FN* zero_point, size_t num_of_element, bool saturate); template Status CudaQuantizeLinearSat<Float8E5M2, float>(cudaStream_t stream, const float* input, Float8E5M2* output, const float* scale, const Float8E5M2* zero_point, size_t num_of_element, bool saturate); template Status CudaQuantizeLinearSat<Float8E4M3FN, half>(cudaStream_t stream, const half* input, Float8E4M3FN* output, const half* scale, const Float8E4M3FN* zero_point, size_t num_of_element, bool saturate); template Status CudaQuantizeLinearSat<Float8E5M2, half>(cudaStream_t stream, const half* input, Float8E5M2* output, const half* scale, const Float8E5M2* zero_point, size_t num_of_element, bool saturate); template Status CudaQuantizeLinearAxisSat<Float8E4M3FN, float>(cudaStream_t stream, const float* input, Float8E4M3FN* output, const float* scale, const Float8E4M3FN* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales, bool saturate); template Status CudaQuantizeLinearAxisSat<Float8E5M2, float>(cudaStream_t stream, const float* input, Float8E5M2* output, const float* scale, const Float8E5M2* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales, bool saturate); template Status CudaQuantizeLinearAxisSat<Float8E4M3FN, half>(cudaStream_t stream, const half* input, Float8E4M3FN* output, const half* scale, const Float8E4M3FN* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales, bool saturate); template Status CudaQuantizeLinearAxisSat<Float8E5M2, half>(cudaStream_t stream, const half* input, Float8E5M2* output, const half* scale, const Float8E5M2* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales, bool saturate); #endif template Status CudaDequantizeLinearStd<int8_t, float>(cudaStream_t stream, const int8_t* input, float* output, const float* scale, const int8_t* zero_point, size_t num_of_element); template Status CudaDequantizeLinearStd<uint8_t, float>(cudaStream_t stream, const uint8_t* input, float* output, const float* scale, const uint8_t* zero_point, size_t num_of_element); template Status CudaDequantizeLinearStd<int8_t, half>(cudaStream_t stream, const int8_t* input, half* output, const half* scale, const int8_t* zero_point, size_t num_of_element); template Status CudaDequantizeLinearStd<uint8_t, half>(cudaStream_t stream, const uint8_t* input, half* output, const half* scale, const uint8_t* zero_point, size_t num_of_element); template Status CudaDequantizeLinearAxisStd<int8_t, float>(cudaStream_t stream, const int8_t* input, float* output, const float* scale, const int8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisStd<uint8_t, float>(cudaStream_t stream, const uint8_t* input, float* output, const float* scale, const uint8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisStd<int8_t, half>(cudaStream_t stream, const int8_t* input, half* output, const half* scale, const int8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisStd<uint8_t, half>(cudaStream_t stream, const uint8_t* input, half* output, const half* scale, const uint8_t* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); #if !defined(DISABLE_FLOAT8_TYPES) template Status CudaDequantizeLinearSat<Float8E4M3FN, float>(cudaStream_t stream, const Float8E4M3FN* input, float* output, const float* scale, const Float8E4M3FN* zero_point, size_t num_of_element); template Status CudaDequantizeLinearSat<Float8E5M2, float>(cudaStream_t stream, const Float8E5M2* input, float* output, const float* scale, const Float8E5M2* zero_point, size_t num_of_element); template Status CudaDequantizeLinearSat<Float8E4M3FN, half>(cudaStream_t stream, const Float8E4M3FN* input, half* output, const half* scale, const Float8E4M3FN* zero_point, size_t num_of_element); template Status CudaDequantizeLinearSat<Float8E5M2, half>(cudaStream_t stream, const Float8E5M2* input, half* output, const half* scale, const Float8E5M2* zero_point, size_t num_of_element); template Status CudaDequantizeLinearAxisSat<Float8E4M3FN, float>(cudaStream_t stream, const Float8E4M3FN* input, float* output, const float* scale, const Float8E4M3FN* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisSat<Float8E5M2, float>(cudaStream_t stream, const Float8E5M2* input, float* output, const float* scale, const Float8E5M2* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisSat<Float8E4M3FN, half>(cudaStream_t stream, const Float8E4M3FN* input, half* output, const half* scale, const Float8E4M3FN* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); template Status CudaDequantizeLinearAxisSat<Float8E5M2, half>(cudaStream_t stream, const Float8E5M2* input, half* output, const half* scale, const Float8E5M2* zero_point, size_t num_of_element, size_t batch_size, size_t n_scales); #endif } // namespace cuda } // namespace onnxruntime
1fc0df7351fbff04468f68e1d0bf15c240341388.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "common.h" #define SIZE_X 32 #define SIZE_Y 32 #define DELTA_X 0.01 #define DELTA_y 0.01 __global__ void ensJulia(int **d_img) { unsigned int idxTx = threadIdx.x; unsigned int idxTy = threadIdx.y; if ( (idxTx < 2*SIZE_X) && (idxTy < 2*SIZE_Y) ) { d_img[idxTx][idxTy] = idxTx + idxTy * 2 *SIZE_Y; } } int main(void) { int i,j; int h_img[2*SIZE_X][2*SIZE_Y]; int **d_img = NULL; CUDA_CHECK_RETURN( hipMalloc((void**) &d_img, sizeof(int) * 4 * SIZE_X * SIZE_Y)); // CUDA_CHECK_RETURN( hipMemcpy(d, idata, // sizeof(int) * 4 * SIZEX * SIZE_Y, hipMemcpyHostToDevice)); dim3 threadSize(SIZE_X*2, SIZE_Y*2); hipLaunchKernelGGL(( ensJulia), dim3(threadSize), dim3(1), 0, 0, d_img); CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete CUDA_CHECK_RETURN(hipGetLastError()); CUDA_CHECK_RETURN(hipMemcpy(h_img, d_img, sizeof(int) * 4 * SIZE_X * SIZE_Y, hipMemcpyDeviceToHost)); // Affichage for (i=0; i<2*SIZE_X; i++) { for (j=0; j<2*SIZE_Y; j++) { printf("%3d ", h_img[i][j]); } printf("\n"); } CUDA_CHECK_RETURN(hipFree((void*) d_img)); CUDA_CHECK_RETURN(hipDeviceReset()); return 0; }
1fc0df7351fbff04468f68e1d0bf15c240341388.cu
#include <stdio.h> #include <stdlib.h> #include "common.h" #define SIZE_X 32 #define SIZE_Y 32 #define DELTA_X 0.01 #define DELTA_y 0.01 __global__ void ensJulia(int **d_img) { unsigned int idxTx = threadIdx.x; unsigned int idxTy = threadIdx.y; if ( (idxTx < 2*SIZE_X) && (idxTy < 2*SIZE_Y) ) { d_img[idxTx][idxTy] = idxTx + idxTy * 2 *SIZE_Y; } } int main(void) { int i,j; int h_img[2*SIZE_X][2*SIZE_Y]; int **d_img = NULL; CUDA_CHECK_RETURN( cudaMalloc((void**) &d_img, sizeof(int) * 4 * SIZE_X * SIZE_Y)); // CUDA_CHECK_RETURN( cudaMemcpy(d, idata, // sizeof(int) * 4 * SIZEX * SIZE_Y, cudaMemcpyHostToDevice)); dim3 threadSize(SIZE_X*2, SIZE_Y*2); ensJulia<<<threadSize, 1>>>(d_img); CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete CUDA_CHECK_RETURN(cudaGetLastError()); CUDA_CHECK_RETURN(cudaMemcpy(h_img, d_img, sizeof(int) * 4 * SIZE_X * SIZE_Y, cudaMemcpyDeviceToHost)); // Affichage for (i=0; i<2*SIZE_X; i++) { for (j=0; j<2*SIZE_Y; j++) { printf("%3d ", h_img[i][j]); } printf("\n"); } CUDA_CHECK_RETURN(cudaFree((void*) d_img)); CUDA_CHECK_RETURN(cudaDeviceReset()); return 0; }
16207a00bb31e1ccda31ff6b7c52a41caa1f2ae1.hip
// !!! This is a file automatically generated by hipify!!! /* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> __global__ void ADD(float * A, float*O,int N) { int i = (blockDim.x * blockIdx.x + threadIdx.x) * N; if (i < N * 1024 * 1024){ O[i] = A[i]+threadIdx.x; } } int main(void) { int N = 1; while(N <= 40){ size_t size = N * 1024 * 1024 * sizeof(float); float* h_A = (float*)malloc(size); float* h_O = (float*)malloc(size); float* d_A; hipMalloc((void**)&d_A, size); float* d_O; hipMalloc((void**)&d_O, size); hipEvent_t stop,stop1,stop2; hipEvent_t start,start1,start2; hipEventCreate(&start); hipEventCreate(&stop); hipEventCreate(&start1); hipEventCreate(&stop1); hipEventCreate(&start2); hipEventCreate(&stop2); for( int i = 0; i< N * 1024 * 1024; i++){ h_A[i] = rand()/(float)RAND_MAX; } hipEventRecord(start); hipMemcpy(d_A,h_A,size,hipMemcpyHostToDevice); hipEventRecord(stop); hipEventSynchronize(stop); float elapsedTime = 0; hipEventElapsedTime(&elapsedTime, start, stop); int threadsPerBlock = 256; int blocksPerGrid = (N * 1024 * 1024 + threadsPerBlock - 1) / threadsPerBlock; hipEventRecord(start1); hipLaunchKernelGGL(( ADD), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_A,d_O,N); hipEventRecord(stop1); hipDeviceSynchronize (); hipEventSynchronize(stop1); float elapsedTime1 = 0; hipEventElapsedTime(&elapsedTime1, start1, stop1); hipEventRecord(start2); hipMemcpy(h_O, d_O, size, hipMemcpyDeviceToHost); hipEventRecord(stop2); hipEventSynchronize(stop2); float elapsedTime2 = 0; hipEventElapsedTime(&elapsedTime2, start2, stop2); hipFree(d_A); hipFree(d_O); free(h_A); free(h_O); hipEventDestroy(start); hipEventDestroy(stop); hipEventDestroy(start1); hipEventDestroy(stop1); hipEventDestroy(start2); hipEventDestroy(stop2); if(N == 1){ printf("%s\n%s\n%s\n","FIRSTNAME: XINYUN","LASTNAME: LV","E-MAIL: xinyunlv0425@gmail.com"); printf("%-28s%-15s%-15s%-15s\n","N"," CPUtoGPU(ms)"," Kernel(ms)"," GPUtoCPU(ms)"); } printf("%-30d%-15f%-15f%-15f\n",N,elapsedTime,elapsedTime1,elapsedTime2); N++; } }
16207a00bb31e1ccda31ff6b7c52a41caa1f2ae1.cu
/* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <math.h> __global__ void ADD(float * A, float*O,int N) { int i = (blockDim.x * blockIdx.x + threadIdx.x) * N; if (i < N * 1024 * 1024){ O[i] = A[i]+threadIdx.x; } } int main(void) { int N = 1; while(N <= 40){ size_t size = N * 1024 * 1024 * sizeof(float); float* h_A = (float*)malloc(size); float* h_O = (float*)malloc(size); float* d_A; cudaMalloc((void**)&d_A, size); float* d_O; cudaMalloc((void**)&d_O, size); cudaEvent_t stop,stop1,stop2; cudaEvent_t start,start1,start2; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&start1); cudaEventCreate(&stop1); cudaEventCreate(&start2); cudaEventCreate(&stop2); for( int i = 0; i< N * 1024 * 1024; i++){ h_A[i] = rand()/(float)RAND_MAX; } cudaEventRecord(start); cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice); cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsedTime = 0; cudaEventElapsedTime(&elapsedTime, start, stop); int threadsPerBlock = 256; int blocksPerGrid = (N * 1024 * 1024 + threadsPerBlock - 1) / threadsPerBlock; cudaEventRecord(start1); ADD<<<blocksPerGrid,threadsPerBlock>>>(d_A,d_O,N); cudaEventRecord(stop1); cudaDeviceSynchronize (); cudaEventSynchronize(stop1); float elapsedTime1 = 0; cudaEventElapsedTime(&elapsedTime1, start1, stop1); cudaEventRecord(start2); cudaMemcpy(h_O, d_O, size, cudaMemcpyDeviceToHost); cudaEventRecord(stop2); cudaEventSynchronize(stop2); float elapsedTime2 = 0; cudaEventElapsedTime(&elapsedTime2, start2, stop2); cudaFree(d_A); cudaFree(d_O); free(h_A); free(h_O); cudaEventDestroy(start); cudaEventDestroy(stop); cudaEventDestroy(start1); cudaEventDestroy(stop1); cudaEventDestroy(start2); cudaEventDestroy(stop2); if(N == 1){ printf("%s\n%s\n%s\n","FIRSTNAME: XINYUN","LASTNAME: LV","E-MAIL: xinyunlv0425@gmail.com"); printf("%-28s%-15s%-15s%-15s\n","N"," CPUtoGPU(ms)"," Kernel(ms)"," GPUtoCPU(ms)"); } printf("%-30d%-15f%-15f%-15f\n",N,elapsedTime,elapsedTime1,elapsedTime2); N++; } }
1e285033850950417dacb41d99ad862aebdd6186.hip
// !!! This is a file automatically generated by hipify!!! // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include <device_launch_parameters.h> #include <typedef.h> /* * +----------------+ * | | * |A1 |A2 |A3 |B1 |B2 => |A2 |B2 |A3 * ############### ########## ################ */ __global__ void contractTensorPermKernel(type *A, type *B, type* C, int sizeA2, int sizeA3, int sizeB2, int contract) { const int idx = threadIdx.x; const int idy = threadIdx.y*16; const int inx = threadIdx.x + blockIdx.x * blockDim.x; const int iny = (threadIdx.y + blockIdx.y * blockDim.y) * 16; const int inz = threadIdx.z + blockIdx.z * blockDim.z; type sum[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; A += inx + inz * contract * sizeA2; B += iny * contract; C += inx + iny*sizeA2 + inz * sizeA2 * sizeB2; __shared__ type As[32][32]; for (int i = 0; i < contract; i += 32){ #pragma unroll for (int k = 0; k < 16; k ++){ As[idx][idy + k] = A[(k + idy)*contract]; } __syncthreads(); for (int k = 0; k < 32; k++){ type a = As[k][idx]; #pragma unroll for (int j = 0; j < 16; j++){ sum[j] += a * B[contract * j]; } B++; } A += 32; __syncthreads(); } #pragma unroll for (int j = 0; j < 16; j++) C[j * sizeA2] = sum[j]; } extern "C" void contractTensorPerm(type *A, type *B, type* C, int sizeA1, int sizeA2, int sizeA3, int sizeB2){ dim3 threads(32, 2, 1); dim3 grid(sizeA2 / threads.x, sizeB2 / threads.y/16, sizeA3 / threads.z); hipLaunchKernelGGL(( contractTensorPermKernel), dim3(grid), dim3(threads) , 0, 0, A, B, C, sizeA2, sizeA3, sizeB2, sizeA1); }
1e285033850950417dacb41d99ad862aebdd6186.cu
// System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include <device_launch_parameters.h> #include <typedef.h> /* * +----------------+ * | | * |A1 |A2 |A3 |B1 |B2 => |A2 |B2 |A3 * ############### ########## ################ */ __global__ void contractTensorPermKernel(type *A, type *B, type* C, int sizeA2, int sizeA3, int sizeB2, int contract) { const int idx = threadIdx.x; const int idy = threadIdx.y*16; const int inx = threadIdx.x + blockIdx.x * blockDim.x; const int iny = (threadIdx.y + blockIdx.y * blockDim.y) * 16; const int inz = threadIdx.z + blockIdx.z * blockDim.z; type sum[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; A += inx + inz * contract * sizeA2; B += iny * contract; C += inx + iny*sizeA2 + inz * sizeA2 * sizeB2; __shared__ type As[32][32]; for (int i = 0; i < contract; i += 32){ #pragma unroll for (int k = 0; k < 16; k ++){ As[idx][idy + k] = A[(k + idy)*contract]; } __syncthreads(); for (int k = 0; k < 32; k++){ type a = As[k][idx]; #pragma unroll for (int j = 0; j < 16; j++){ sum[j] += a * B[contract * j]; } B++; } A += 32; __syncthreads(); } #pragma unroll for (int j = 0; j < 16; j++) C[j * sizeA2] = sum[j]; } extern "C" void contractTensorPerm(type *A, type *B, type* C, int sizeA1, int sizeA2, int sizeA3, int sizeB2){ dim3 threads(32, 2, 1); dim3 grid(sizeA2 / threads.x, sizeB2 / threads.y/16, sizeA3 / threads.z); contractTensorPermKernel<<<grid, threads >>>(A, B, C, sizeA2, sizeA3, sizeB2, sizeA1); }
8bbad656971d883fc17618bb9e234fe5d9aad960.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // You can use any other block size you wish. #define BLOCK_SIZE 512 #define BLOCK_DUB 1024 //Works for power of 2 elements #define DEFAULT_NUM_ELEMENTS 1024 #define MAX_RAND 2 typedef float REAL; __global__ void prescan(REAL *odata, REAL *idata, int num) { volatile __shared__ REAL temp[BLOCK_DUB]; int ti = threadIdx.x; int bid = blockIdx.x + blockIdx.y*gridDim.x; int index = bid*blockDim.x + ti; int ofs = 1; int mult = DEFAULT_NUM_ELEMENTS/num; int top = mult*(2*(index+1))-1; if (top < DEFAULT_NUM_ELEMENTS) { temp[2*ti] = idata[2*index*mult+mult-1]; temp[2*ti+1] = idata[top]; } else { temp[2*ti+1] = 0; if (top == DEFAULT_NUM_ELEMENTS) { temp[2*ti] = idata[2*index*mult+mult-1]; } else { temp[2*ti] = 0; } } for (int i = BLOCK_SIZE; i>0; i>>=1) { __syncthreads(); if (ti<i) { int ai = ofs*(2*ti+1)-1; int bi = ofs*(2*ti+2)-1; temp[bi] += temp[ai]; } ofs <<= 1; } __syncthreads(); if (top < DEFAULT_NUM_ELEMENTS) { idata[2*index*mult] = temp[2*ti]; idata[top] = temp[2*ti+1]; } } __global__ void downsweep(REAL *odata, REAL *idata, int num) { volatile __shared__ REAL tempd[BLOCK_DUB]; int ti = threadIdx.x; int bid = blockIdx.x + blockIdx.y*gridDim.x; int index = bid*blockDim.x + ti; int ofs = BLOCK_DUB; int mult = DEFAULT_NUM_ELEMENTS/num; int top = mult*(2*(index+1))-1; if (top < DEFAULT_NUM_ELEMENTS) { tempd[2*ti] = idata[2*index*mult+mult-1]; tempd[2*ti+1] = idata[top]; } else { tempd[2*ti+1] = 0; if (top == DEFAULT_NUM_ELEMENTS) { tempd[2*ti] = idata[2*index*mult+mult-1]; } else { tempd[2*ti] = 0; } } if (index == 511) { tempd[num-1] = 0; } for (int j = 1; j<num; j<<=1) { ofs >>= 1; __syncthreads(); if (ti < j) { int ai = ofs*(2*ti+1)-1; int bi = ofs*(2*ti+2)-1; REAL temp2 = tempd[ai]; tempd[ai] = tempd[bi]; tempd[bi] += temp2; } } __syncthreads(); odata[ti] = tempd[ti]; odata[ti+blockDim.x] = tempd[ti+blockDim.x]; } // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. void prescanArray(REAL *outArray, REAL *inArray, int numElements) { //Use kernel to compute the reduction int blocksx, blocksy, blocks; int threads = BLOCK_SIZE; int nestElements = numElements; blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid(blocksx,blocksy); while(nestElements > 1) { // Recursive implementation to compute the reduction hipLaunchKernelGGL(( prescan) , dim3(dimGrid),dim3(threads), 0, 0, outArray, inArray, nestElements); nestElements = blocks; blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid(blocksx, blocksy); } //prescan <<<1,BLOCK_SIZE>>>(outArray, inArray, numElements); //hipDeviceSynchronize(); hipLaunchKernelGGL(( downsweep) , dim3(1),dim3(BLOCK_SIZE), 0, 0, outArray, inArray, numElements); } // **===-----------------------------------------------------------===** //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" unsigned int compare( const REAL* reference, const REAL* data, const unsigned int len); extern "C" void computeGold( REAL* reference, REAL* idata, const unsigned int len); unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { float device_time; float host_time; int num_elements = 0; // Must support large, non-power-of-2 arrays // allocate host memory to store the input data unsigned int mem_size = sizeof( REAL) * num_elements; REAL* h_data = (REAL*) malloc( mem_size); switch(argc-1) { case 0: num_elements = DEFAULT_NUM_ELEMENTS; // allocate host memory to store the input data mem_size = sizeof( REAL) * num_elements; h_data = (REAL*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; default: num_elements = atoi(argv[1]); // allocate host memory to store the input data mem_size = sizeof( REAL) * num_elements; h_data = (REAL*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; } hipEvent_t time_start; hipEvent_t time_end; hipEventCreate(&time_start); hipEventCreate(&time_end); // compute reference solution REAL* reference = (REAL*) malloc( mem_size); // cutStartTimer(timer); hipEventRecord(time_start, 0); computeGold( reference, h_data, num_elements); hipEventRecord(time_end, 0); hipEventSynchronize(time_end); hipEventElapsedTime(&host_time, time_start, time_end); // cutStopTimer(timer); printf("\n\n**===-------------------------------------------------===**\n"); printf("Processing %d elements...\n", num_elements); printf("Host CPU Processing time: %f (ms)\n", host_time); // allocate device memory input and output arrays REAL* d_idata = NULL; REAL* d_odata = NULL; hipMalloc( (void**) &d_idata, mem_size); hipMalloc( (void**) &d_odata, mem_size); // copy host memory to device input array hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice); // initialize all the other device arrays to be safe hipMemcpy( d_odata, h_data, mem_size, hipMemcpyHostToDevice); // **===-------- Allocate data structure here -----------===** // preallocBlockSums(num_elements); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement //prescanArray(d_odata, d_idata, 16); // Run the prescan // CUT_SAFE_CALL(cutCreateTimer(&timer)); // cutStartTimer(timer); hipEventRecord(time_start, 0); // **===-------- Modify the body of this function -----------===** prescanArray(d_odata, d_idata, num_elements); // **===-----------------------------------------------------------===** hipDeviceSynchronize(); hipEventRecord(time_end, 0); hipEventSynchronize(time_end); hipEventElapsedTime(&device_time, time_start, time_end); hipEventDestroy(time_start); hipEventDestroy(time_end); // cutStopTimer(timer); printf("CUDA Processing time: %g (ms)\n", device_time); // device_time = cutGetTimerValue(timer); // printf("Speedup: %fX\n", host_time/device_time); // **===-------- Deallocate data structure here -----------===** // deallocBlockSums(); // **===-----------------------------------------------------------===** // copy result from device to host hipMemcpy( h_data, d_odata, sizeof(REAL) * num_elements, hipMemcpyDeviceToHost); // Check if the result is equivalent to the expected soluion unsigned int result_regtest = cutComparef( reference, h_data, num_elements, 1e-7); printf( "Test %s\n", (0 == result_regtest) ? "FAILED" : "PASSED"); // cleanup memory free( h_data); free( reference); hipFree( d_odata); hipFree( d_idata); } unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err) { int i; int diff_count = 0; for (i = 0; i < num_elements; i++) { REAL diff = fabs(reference[i] - h_data[i]); REAL denominator = 1.f; if (denominator < fabs(reference[i])) { denominator = fabs(reference[i]); } if (!(diff / denominator < err)) { diff_count ++; } } if (diff_count > 0) { printf("Number of difference: %d\n", diff_count); return 0; } else { return 1; } }
8bbad656971d883fc17618bb9e234fe5d9aad960.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // You can use any other block size you wish. #define BLOCK_SIZE 512 #define BLOCK_DUB 1024 //Works for power of 2 elements #define DEFAULT_NUM_ELEMENTS 1024 #define MAX_RAND 2 typedef float REAL; __global__ void prescan(REAL *odata, REAL *idata, int num) { volatile __shared__ REAL temp[BLOCK_DUB]; int ti = threadIdx.x; int bid = blockIdx.x + blockIdx.y*gridDim.x; int index = bid*blockDim.x + ti; int ofs = 1; int mult = DEFAULT_NUM_ELEMENTS/num; int top = mult*(2*(index+1))-1; if (top < DEFAULT_NUM_ELEMENTS) { temp[2*ti] = idata[2*index*mult+mult-1]; temp[2*ti+1] = idata[top]; } else { temp[2*ti+1] = 0; if (top == DEFAULT_NUM_ELEMENTS) { temp[2*ti] = idata[2*index*mult+mult-1]; } else { temp[2*ti] = 0; } } for (int i = BLOCK_SIZE; i>0; i>>=1) { __syncthreads(); if (ti<i) { int ai = ofs*(2*ti+1)-1; int bi = ofs*(2*ti+2)-1; temp[bi] += temp[ai]; } ofs <<= 1; } __syncthreads(); if (top < DEFAULT_NUM_ELEMENTS) { idata[2*index*mult] = temp[2*ti]; idata[top] = temp[2*ti+1]; } } __global__ void downsweep(REAL *odata, REAL *idata, int num) { volatile __shared__ REAL tempd[BLOCK_DUB]; int ti = threadIdx.x; int bid = blockIdx.x + blockIdx.y*gridDim.x; int index = bid*blockDim.x + ti; int ofs = BLOCK_DUB; int mult = DEFAULT_NUM_ELEMENTS/num; int top = mult*(2*(index+1))-1; if (top < DEFAULT_NUM_ELEMENTS) { tempd[2*ti] = idata[2*index*mult+mult-1]; tempd[2*ti+1] = idata[top]; } else { tempd[2*ti+1] = 0; if (top == DEFAULT_NUM_ELEMENTS) { tempd[2*ti] = idata[2*index*mult+mult-1]; } else { tempd[2*ti] = 0; } } if (index == 511) { tempd[num-1] = 0; } for (int j = 1; j<num; j<<=1) { ofs >>= 1; __syncthreads(); if (ti < j) { int ai = ofs*(2*ti+1)-1; int bi = ofs*(2*ti+2)-1; REAL temp2 = tempd[ai]; tempd[ai] = tempd[bi]; tempd[bi] += temp2; } } __syncthreads(); odata[ti] = tempd[ti]; odata[ti+blockDim.x] = tempd[ti+blockDim.x]; } // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. void prescanArray(REAL *outArray, REAL *inArray, int numElements) { //Use kernel to compute the reduction int blocksx, blocksy, blocks; int threads = BLOCK_SIZE; int nestElements = numElements; blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid(blocksx,blocksy); while(nestElements > 1) { // Recursive implementation to compute the reduction prescan <<<dimGrid,threads>>> (outArray, inArray, nestElements); nestElements = blocks; blocksx = (nestElements+BLOCK_DUB-1)/(threads*2); blocks = blocksx; blocksy = 1; if (blocksx > 65535) { blocksy = (blocksx+65534)/65535; blocksx = 65535; } dim3 dimGrid(blocksx, blocksy); } //prescan <<<1,BLOCK_SIZE>>>(outArray, inArray, numElements); //cudaThreadSynchronize(); downsweep <<<1,BLOCK_SIZE>>>(outArray, inArray, numElements); } // **===-----------------------------------------------------------===** //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" unsigned int compare( const REAL* reference, const REAL* data, const unsigned int len); extern "C" void computeGold( REAL* reference, REAL* idata, const unsigned int len); unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { float device_time; float host_time; int num_elements = 0; // Must support large, non-power-of-2 arrays // allocate host memory to store the input data unsigned int mem_size = sizeof( REAL) * num_elements; REAL* h_data = (REAL*) malloc( mem_size); switch(argc-1) { case 0: num_elements = DEFAULT_NUM_ELEMENTS; // allocate host memory to store the input data mem_size = sizeof( REAL) * num_elements; h_data = (REAL*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; default: num_elements = atoi(argv[1]); // allocate host memory to store the input data mem_size = sizeof( REAL) * num_elements; h_data = (REAL*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; } cudaEvent_t time_start; cudaEvent_t time_end; cudaEventCreate(&time_start); cudaEventCreate(&time_end); // compute reference solution REAL* reference = (REAL*) malloc( mem_size); // cutStartTimer(timer); cudaEventRecord(time_start, 0); computeGold( reference, h_data, num_elements); cudaEventRecord(time_end, 0); cudaEventSynchronize(time_end); cudaEventElapsedTime(&host_time, time_start, time_end); // cutStopTimer(timer); printf("\n\n**===-------------------------------------------------===**\n"); printf("Processing %d elements...\n", num_elements); printf("Host CPU Processing time: %f (ms)\n", host_time); // allocate device memory input and output arrays REAL* d_idata = NULL; REAL* d_odata = NULL; cudaMalloc( (void**) &d_idata, mem_size); cudaMalloc( (void**) &d_odata, mem_size); // copy host memory to device input array cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice); // initialize all the other device arrays to be safe cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice); // **===-------- Allocate data structure here -----------===** // preallocBlockSums(num_elements); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement //prescanArray(d_odata, d_idata, 16); // Run the prescan // CUT_SAFE_CALL(cutCreateTimer(&timer)); // cutStartTimer(timer); cudaEventRecord(time_start, 0); // **===-------- Modify the body of this function -----------===** prescanArray(d_odata, d_idata, num_elements); // **===-----------------------------------------------------------===** cudaThreadSynchronize(); cudaEventRecord(time_end, 0); cudaEventSynchronize(time_end); cudaEventElapsedTime(&device_time, time_start, time_end); cudaEventDestroy(time_start); cudaEventDestroy(time_end); // cutStopTimer(timer); printf("CUDA Processing time: %g (ms)\n", device_time); // device_time = cutGetTimerValue(timer); // printf("Speedup: %fX\n", host_time/device_time); // **===-------- Deallocate data structure here -----------===** // deallocBlockSums(); // **===-----------------------------------------------------------===** // copy result from device to host cudaMemcpy( h_data, d_odata, sizeof(REAL) * num_elements, cudaMemcpyDeviceToHost); // Check if the result is equivalent to the expected soluion unsigned int result_regtest = cutComparef( reference, h_data, num_elements, 1e-7); printf( "Test %s\n", (0 == result_regtest) ? "FAILED" : "PASSED"); // cleanup memory free( h_data); free( reference); cudaFree( d_odata); cudaFree( d_idata); } unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err) { int i; int diff_count = 0; for (i = 0; i < num_elements; i++) { REAL diff = fabs(reference[i] - h_data[i]); REAL denominator = 1.f; if (denominator < fabs(reference[i])) { denominator = fabs(reference[i]); } if (!(diff / denominator < err)) { diff_count ++; } } if (diff_count > 0) { printf("Number of difference: %d\n", diff_count); return 0; } else { return 1; } }
d00eeebea8dc14a66f4b99d9644d6901a8d2433c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/convert/convert_datetime.hpp> #include <cudf/strings/detail/converters.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf/wrappers/timestamps.hpp> #include <strings/utilities.cuh> #include <rmm/thrust_rmm_allocator.h> #include <map> #include <vector> namespace cudf { namespace strings { namespace detail { namespace { /** * @brief Units for timestamp conversion. * These are defined since there are more than what cudf supports. */ enum class timestamp_units { years, ///< precision is years months, ///< precision is months days, ///< precision is days hours, ///< precision is hours minutes, ///< precision is minutes seconds, ///< precision is seconds ms, ///< precision is milliseconds us, ///< precision is microseconds ns ///< precision is nanoseconds }; // used to index values in a timeparts array enum timestamp_parse_component { TP_YEAR = 0, TP_MONTH = 1, TP_DAY = 2, TP_DAY_OF_YEAR = 3, TP_HOUR = 4, TP_MINUTE = 5, TP_SECOND = 6, TP_SUBSECOND = 7, TP_TZ_MINUTES = 8, TP_ARRAYSIZE = 9 }; enum class format_char_type : int8_t { literal, // literal char type passed through specifier // timestamp format specifier }; /** * @brief Represents a format specifier or literal from a timestamp format string. * * Created by the format_compiler when parsing a format string. */ struct alignas(4) format_item { format_char_type item_type; // specifier or literal indicator char value; // specifier or literal value int8_t length; // item length in bytes static format_item new_specifier(char format_char, int8_t length) { return format_item{format_char_type::specifier, format_char, length}; } static format_item new_delimiter(char literal) { return format_item{format_char_type::literal, literal, 1}; } }; /** * @brief The format_compiler parses a timestamp format string into a vector of * format_items. * * The vector of format_items are used when parsing a string into timestamp * components and when formatting a string from timestamp components. */ struct format_compiler { std::string format; std::string template_string; timestamp_units units; rmm::device_vector<format_item> d_items; std::map<char, int8_t> specifier_lengths = {{'Y', 4}, {'y', 2}, {'m', 2}, {'d', 2}, {'H', 2}, {'I', 2}, {'M', 2}, {'S', 2}, {'f', 6}, {'z', 5}, {'Z', 3}, {'p', 2}, {'j', 3}}; format_compiler(const char* format, timestamp_units units) : format(format), units(units) {} format_item const* compile_to_device() { std::vector<format_item> items; const char* str = format.c_str(); auto length = format.length(); while (length > 0) { char ch = *str++; length--; if (ch != '%') { items.push_back(format_item::new_delimiter(ch)); template_string.append(1, ch); continue; } CUDF_EXPECTS(length > 0, "Unfinished specifier in timestamp format"); ch = *str++; length--; if (ch == '%') // escaped % char { items.push_back(format_item::new_delimiter(ch)); template_string.append(1, ch); continue; } if (ch >= '0' && ch <= '9') { CUDF_EXPECTS(*str == 'f', "precision not supported for specifier: " + std::string(1, *str)); specifier_lengths[*str] = static_cast<int8_t>(ch - '0'); ch = *str++; length--; } CUDF_EXPECTS(specifier_lengths.find(ch) != specifier_lengths.end(), "invalid format specifier: " + std::string(1, ch)); int8_t spec_length = specifier_lengths[ch]; items.push_back(format_item::new_specifier(ch, spec_length)); template_string.append((size_t)spec_length, ch); } // create program in device memory d_items.resize(items.size()); CUDA_TRY(hipMemcpyAsync( d_items.data().get(), items.data(), items.size() * sizeof(items[0]), hipMemcpyHostToDevice)); return d_items.data().get(); } // these calls are only valid after compile_to_device is called size_type template_bytes() const { return static_cast<size_type>(template_string.size()); } size_type items_count() const { return static_cast<size_type>(d_items.size()); } int8_t subsecond_precision() const { return specifier_lengths.at('f'); } }; // this parses date/time characters into a timestamp integer template <typename T> // timestamp type struct parse_datetime { column_device_view const d_strings; format_item const* d_format_items; size_type items_count; timestamp_units units; int8_t subsecond_precision; // __device__ int32_t str2int(const char* str, size_type bytes) { const char* ptr = str; int32_t value = 0; for (size_type idx = 0; idx < bytes; ++idx) { char chr = *ptr++; if (chr < '0' || chr > '9') break; value = (value * 10) + static_cast<int32_t>(chr - '0'); } return value; } // Walk the format_items to read the datetime string. // Returns 0 if all ok. __device__ int parse_into_parts(string_view const& d_string, int32_t* timeparts) { auto ptr = d_string.data(); auto length = d_string.size_bytes(); for (size_t idx = 0; idx < items_count; ++idx) { auto item = d_format_items[idx]; if (length < item.length) return 1; if (item.item_type == format_char_type::literal) { // static character we'll just skip; // consume item.length bytes from string ptr += item.length; length -= item.length; continue; } // special logic for each specifier switch (item.value) { case 'Y': timeparts[TP_YEAR] = str2int(ptr, item.length); break; case 'y': timeparts[TP_YEAR] = str2int(ptr, item.length) + 1900; break; case 'm': timeparts[TP_MONTH] = str2int(ptr, item.length); break; case 'd': timeparts[TP_DAY] = str2int(ptr, item.length); break; case 'j': timeparts[TP_DAY_OF_YEAR] = str2int(ptr, item.length); break; case 'H': case 'I': timeparts[TP_HOUR] = str2int(ptr, item.length); break; case 'M': timeparts[TP_MINUTE] = str2int(ptr, item.length); break; case 'S': timeparts[TP_SECOND] = str2int(ptr, item.length); break; case 'f': timeparts[TP_SUBSECOND] = str2int(ptr, item.length); break; case 'p': { string_view am_pm(ptr, 2); auto hour = timeparts[TP_HOUR]; if ((am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0)) { if (hour == 12) hour = 0; } else if (hour < 12) hour += 12; timeparts[TP_HOUR] = hour; break; } case 'z': { int sign = *ptr == '-' ? 1 : -1; // revert timezone back to UTC int hh = str2int(ptr + 1, 2); int mm = str2int(ptr + 3, 2); // ignoring the rest for now // item.length has how many chars we should read timeparts[TP_TZ_MINUTES] = sign * ((hh * 60) + mm); break; } case 'Z': break; // skip default: return 3; } ptr += item.length; length -= item.length; } return 0; } __device__ int64_t timestamp_from_parts(int32_t const* timeparts, timestamp_units units) { auto year = timeparts[TP_YEAR]; if (units == timestamp_units::years) return year - 1970; auto month = timeparts[TP_MONTH]; if (units == timestamp_units::months) return ((year - 1970) * 12) + (month - 1); // months are 1-12, need to 0-base it here auto day = timeparts[TP_DAY]; // The months are shifted so that March is the starting month and February // (possible leap day in it) is the last month for the linear calculation year -= (month <= 2) ? 1 : 0; // date cycle repeats every 400 years (era) constexpr int32_t erasInDays = 146097; constexpr int32_t erasInYears = (erasInDays / 365); auto era = (year >= 0 ? year : year - 399) / erasInYears; auto yoe = year - era * erasInYears; auto doy = month == 0 ? day : ((153 * (month + (month > 2 ? -3 : 9)) + 2) / 5 + day - 1); auto doe = (yoe * 365) + (yoe / 4) - (yoe / 100) + doy; int32_t days = (era * erasInDays) + doe - 719468; // 719468 = days from 0000-00-00 to 1970-03-01 if (units == timestamp_units::days) return days; auto tzadjust = timeparts[TP_TZ_MINUTES]; // in minutes auto hour = timeparts[TP_HOUR]; if (units == timestamp_units::hours) return (days * 24L) + hour + (tzadjust / 60); auto minute = timeparts[TP_MINUTE]; if (units == timestamp_units::minutes) return static_cast<int64_t>(days * 24L * 60L) + (hour * 60L) + minute + tzadjust; auto second = timeparts[TP_SECOND]; int64_t timestamp = (days * 24L * 3600L) + (hour * 3600L) + (minute * 60L) + second + (tzadjust * 60); if (units == timestamp_units::seconds) return timestamp; int64_t powers_of_ten[] = { 1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L}; int64_t subsecond = timeparts[TP_SUBSECOND] * powers_of_ten[9 - subsecond_precision]; // normalize to nanoseconds if (units == timestamp_units::ms) { timestamp *= 1000L; subsecond = subsecond / 1000000L; } else if (units == timestamp_units::us) { timestamp *= 1000000L; subsecond = subsecond / 1000L; } else if (units == timestamp_units::ns) timestamp *= 1000000000L; timestamp += subsecond; return timestamp; } __device__ T operator()(size_type idx) { if (d_strings.is_null(idx)) return 0; string_view d_str = d_strings.element<string_view>(idx); if (d_str.empty()) return 0; // int32_t timeparts[TP_ARRAYSIZE] = {0, 1, 1}; // month and day are 1-based if (parse_into_parts(d_str, timeparts)) return 0; // unexpected parse case // return static_cast<T>(timestamp_from_parts(timeparts, units)); } }; // convert cudf type to timestamp units struct dispatch_timestamp_to_units_fn { template <typename T> timestamp_units operator()() { CUDF_FAIL("Invalid type for timestamp conversion."); } }; template <> timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_D>() { return timestamp_units::days; } template <> timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_s>() { return timestamp_units::seconds; } template <> timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ms>() { return timestamp_units::ms; } template <> timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_us>() { return timestamp_units::us; } template <> timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ns>() { return timestamp_units::ns; } // dispatch operator to map timestamp to native fixed-width-type struct dispatch_to_timestamps_fn { template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr> void operator()(column_device_view const& d_strings, std::string const& format, timestamp_units units, mutable_column_view& results_view, hipStream_t stream) const { format_compiler compiler(format.c_str(), units); auto d_items = compiler.compile_to_device(); auto d_results = results_view.data<T>(); parse_datetime<T> pfn{ d_strings, d_items, compiler.items_count(), units, compiler.subsecond_precision()}; thrust::transform(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(results_view.size()), d_results, pfn); } template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr> void operator()(column_device_view const&, std::string const&, timestamp_units, mutable_column_view&, hipStream_t) const { CUDF_FAIL("Only timestamps type are expected"); } }; } // namespace // std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings, data_type timestamp_type, std::string const& format, hipStream_t stream, rmm::mr::device_memory_resource* mr) { size_type strings_count = strings.size(); if (strings_count == 0) return make_timestamp_column(timestamp_type, 0); CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty."); timestamp_units units = cudf::type_dispatcher(timestamp_type, dispatch_timestamp_to_units_fn()); auto strings_column = column_device_view::create(strings.parent(), stream); auto d_column = *strings_column; auto results = make_timestamp_column(timestamp_type, strings_count, copy_bitmask(strings.parent(), stream, mr), strings.null_count(), stream, mr); auto results_view = results->mutable_view(); cudf::type_dispatcher( timestamp_type, dispatch_to_timestamps_fn(), d_column, format, units, results_view, stream); results->set_null_count(strings.null_count()); return results; } } // namespace detail // external API std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings, data_type timestamp_type, std::string const& format, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::to_timestamps(strings, timestamp_type, format, hipStream_t{}, mr); } namespace detail { namespace { // converts a timestamp into date-time string template <typename T> struct datetime_formatter { const column_device_view d_timestamps; const format_item* d_format_items; size_type items_count; timestamp_units units; const int32_t* d_offsets; char* d_chars; // divide timestamp integer into time components (year, month, day, etc) // TODO call the simt::std::chrono methods here instead when the are ready __device__ void dissect_timestamp(int64_t timestamp, int32_t* timeparts) { if (units == timestamp_units::years) { timeparts[TP_YEAR] = static_cast<int32_t>(timestamp) + 1970; timeparts[TP_MONTH] = 1; timeparts[TP_DAY] = 1; return; } if (units == timestamp_units::months) { int32_t month = static_cast<int32_t>(timestamp % 12); int32_t year = static_cast<int32_t>(timestamp / 12) + 1970; timeparts[TP_YEAR] = year; timeparts[TP_MONTH] = month + 1; // months start at 1 and not 0 timeparts[TP_DAY] = 1; return; } // first, convert to days so we can handle months, leap years, etc. int32_t days = static_cast<int32_t>(timestamp); // default to days if (units == timestamp_units::hours) days = static_cast<int32_t>(timestamp / 24L); else if (units == timestamp_units::minutes) days = static_cast<int32_t>(timestamp / 1440L); // 24*60 else if (units == timestamp_units::seconds) days = static_cast<int32_t>(timestamp / 86400L); // 24*60*60 else if (units == timestamp_units::ms) days = static_cast<int32_t>(timestamp / 86400000L); else if (units == timestamp_units::us) days = static_cast<int32_t>(timestamp / 86400000000L); else if (units == timestamp_units::ns) days = static_cast<int32_t>(timestamp / 86400000000000L); days = days + 719468; // 719468 is days between 0000-00-00 and 1970-01-01 constexpr int32_t daysInEra = 146097; // (400*365)+97 constexpr int32_t daysInCentury = 36524; // (100*365) + 24; constexpr int32_t daysIn4Years = 1461; // (4*365) + 1; constexpr int32_t daysInYear = 365; // The months are shifted so that March is the starting month and February // (with possible leap day in it) is the last month for the linear calculation. // Day offsets for each month: Mar Apr May June July Aug Sep Oct Nov Dec Jan Feb const int32_t monthDayOffset[] = {0, 31, 61, 92, 122, 153, 184, 214, 245, 275, 306, 337, 366}; // code logic handles leap years in chunks: 400y,100y,4y,1y int32_t year = 400 * (days / daysInEra); days = days % daysInEra; int32_t leapy = days / daysInCentury; days = days % daysInCentury; if (leapy == 4) { // landed exactly on a leap century days += daysInCentury; --leapy; } year += 100 * leapy; year += 4 * (days / daysIn4Years); days = days % daysIn4Years; leapy = days / daysInYear; days = days % daysInYear; if (leapy == 4) { // landed exactly on a leap year days += daysInYear; --leapy; } year += leapy; // int32_t month = 12; for (int32_t idx = 0; idx < month; ++idx) { // find the month if (days < monthDayOffset[idx + 1]) { month = idx; break; } } // compute day of the year and account for calculating with March being the first month // for month >= 10, leap-day has been already been included timeparts[TP_DAY_OF_YEAR] = (month >= 10) ? days - monthDayOffset[10] + 1 : days + /*Jan=*/31 + /*Feb=*/28 + 1 + // 2-month shift ((year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0))); int32_t day = days - monthDayOffset[month] + 1; // compute day of month if (month >= 10) ++year; month = ((month + 2) % 12) + 1; // adjust Jan-Mar offset timeparts[TP_YEAR] = year; timeparts[TP_MONTH] = month; timeparts[TP_DAY] = day; if (units == timestamp_units::days) return; // done with date // now work on time int64_t hour = timestamp, minute = timestamp, second = timestamp; if (units == timestamp_units::hours) { timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); return; } hour = hour / 60; if (units == timestamp_units::minutes) { timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); timeparts[TP_MINUTE] = static_cast<int32_t>(minute % 60); return; } hour = hour / 60; minute = minute / 60; if (units == timestamp_units::seconds) { timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); timeparts[TP_MINUTE] = static_cast<int32_t>(minute % 60); timeparts[TP_SECOND] = static_cast<int32_t>(second % 60); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; if (units == timestamp_units::ms) { timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); timeparts[TP_MINUTE] = static_cast<int32_t>(minute % 60); timeparts[TP_SECOND] = static_cast<int32_t>(second % 60); timeparts[TP_SUBSECOND] = static_cast<int32_t>(timestamp % 1000); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; if (units == timestamp_units::us) { timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); timeparts[TP_MINUTE] = static_cast<int32_t>(minute % 60); timeparts[TP_SECOND] = static_cast<int32_t>(second % 60); timeparts[TP_SUBSECOND] = static_cast<int32_t>(timestamp % 1000000); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); timeparts[TP_MINUTE] = static_cast<int32_t>(minute % 60); timeparts[TP_SECOND] = static_cast<int32_t>(second % 60); timeparts[TP_SUBSECOND] = static_cast<int32_t>(timestamp % 1000000000); } // utility to create 0-padded integers (up to 9 chars) __device__ char* int2str(char* str, int bytes, int val) { char tmpl[9] = {'0', '0', '0', '0', '0', '0', '0', '0', '0'}; char* ptr = tmpl; while (val > 0) { int digit = val % 10; *ptr++ = '0' + digit; val = val / 10; } ptr = tmpl + bytes - 1; while (bytes-- > 0) *str++ = *ptr--; return str; } __device__ char* format_from_parts(int32_t const* timeparts, char* ptr) { for (size_t idx = 0; idx < items_count; ++idx) { auto item = d_format_items[idx]; if (item.item_type == format_char_type::literal) { *ptr++ = item.value; continue; } // special logic for each specifier switch (item.value) { case 'Y': // 4-digit year ptr = int2str(ptr, item.length, timeparts[TP_YEAR]); break; case 'y': // 2-digit year ptr = int2str(ptr, item.length, timeparts[TP_YEAR] - 1900); break; case 'm': // month ptr = int2str(ptr, item.length, timeparts[TP_MONTH]); break; case 'd': // day of month ptr = int2str(ptr, item.length, timeparts[TP_DAY]); break; case 'j': // day of year ptr = int2str(ptr, item.length, timeparts[TP_DAY_OF_YEAR]); break; case 'H': // 24-hour ptr = int2str(ptr, item.length, timeparts[TP_HOUR]); break; case 'I': // 12-hour { // 0 = 12am; 12 = 12pm; 6 = 06am; 18 = 06pm auto hour = timeparts[TP_HOUR]; if (hour == 0) hour = 12; if (hour > 12) hour -= 12; ptr = int2str(ptr, item.length, hour); break; } case 'M': // minute ptr = int2str(ptr, item.length, timeparts[TP_MINUTE]); break; case 'S': // second ptr = int2str(ptr, item.length, timeparts[TP_SECOND]); break; case 'f': // sub-second { char subsecond_digits[] = "000000000"; // 9 max digits const int digits = [units = units] { if (units == timestamp_units::ms) return 3; if (units == timestamp_units::us) return 6; if (units == timestamp_units::ns) return 9; return 0; }(); int2str(subsecond_digits, digits, timeparts[TP_SUBSECOND]); ptr = copy_and_increment(ptr, subsecond_digits, item.length); break; } case 'p': // am or pm // 0 = 12am, 12 = 12pm if (timeparts[TP_HOUR] < 12) memcpy(ptr, "AM", 2); else memcpy(ptr, "PM", 2); ptr += 2; break; case 'z': // timezone memcpy(ptr, "+0000", 5); // always UTC ptr += 5; break; case 'Z': memcpy(ptr, "UTC", 3); ptr += 3; break; default: // ignore everything else break; } } return ptr; } __device__ void operator()(size_type idx) { if (d_timestamps.is_null(idx)) return; auto timestamp = d_timestamps.element<T>(idx); int32_t timeparts[TP_ARRAYSIZE] = {0}; dissect_timestamp(timestamp.time_since_epoch().count(), timeparts); // convert to characters format_from_parts(timeparts, d_chars + d_offsets[idx]); } }; // struct dispatch_from_timestamps_fn { template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr> void operator()(column_device_view const& d_timestamps, format_item const* d_format_items, size_type items_count, timestamp_units units, const int32_t* d_offsets, char* d_chars, hipStream_t stream) const { datetime_formatter<T> pfn{d_timestamps, d_format_items, items_count, units, d_offsets, d_chars}; thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), d_timestamps.size(), pfn); } template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr> void operator()(column_device_view const&, format_item const*, size_type, timestamp_units, const int32_t*, char* d_chars, hipStream_t stream) const { CUDF_FAIL("Only timestamps type are expected"); } }; } // namespace // std::unique_ptr<column> from_timestamps(column_view const& timestamps, std::string const& format, hipStream_t stream, rmm::mr::device_memory_resource* mr) { size_type strings_count = timestamps.size(); if (strings_count == 0) return make_empty_strings_column(mr, stream); CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty."); timestamp_units units = cudf::type_dispatcher(timestamps.type(), dispatch_timestamp_to_units_fn()); format_compiler compiler(format.c_str(), units); auto d_format_items = compiler.compile_to_device(); auto column = column_device_view::create(timestamps, stream); auto d_column = *column; // copy null mask rmm::device_buffer null_mask = copy_bitmask(timestamps, stream, mr); // Each string will be the same number of bytes which can be determined // directly from the format string. auto d_str_bytes = compiler.template_bytes(); // size in bytes of each string // build offsets column auto offsets_transformer_itr = thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), [d_column, d_str_bytes] __device__(size_type idx) { return (d_column.is_null(idx) ? 0 : d_str_bytes); }); auto offsets_column = make_offsets_child_column( offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream); auto offsets_view = offsets_column->view(); auto d_new_offsets = offsets_view.template data<int32_t>(); // build chars column size_type bytes = thrust::device_pointer_cast(d_new_offsets)[strings_count]; auto chars_column = create_chars_child_column(strings_count, timestamps.null_count(), bytes, mr, stream); auto chars_view = chars_column->mutable_view(); auto d_chars = chars_view.template data<char>(); // fill in chars column with timestamps // dispatcher is called to handle the different timestamp types cudf::type_dispatcher(timestamps.type(), dispatch_from_timestamps_fn(), d_column, d_format_items, compiler.items_count(), units, d_new_offsets, d_chars, stream); // return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), timestamps.null_count(), std::move(null_mask), stream, mr); } } // namespace detail // external API std::unique_ptr<column> from_timestamps(column_view const& timestamps, std::string const& format, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::from_timestamps(timestamps, format, hipStream_t{}, mr); } } // namespace strings } // namespace cudf
d00eeebea8dc14a66f4b99d9644d6901a8d2433c.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/convert/convert_datetime.hpp> #include <cudf/strings/detail/converters.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf/wrappers/timestamps.hpp> #include <strings/utilities.cuh> #include <rmm/thrust_rmm_allocator.h> #include <map> #include <vector> namespace cudf { namespace strings { namespace detail { namespace { /** * @brief Units for timestamp conversion. * These are defined since there are more than what cudf supports. */ enum class timestamp_units { years, ///< precision is years months, ///< precision is months days, ///< precision is days hours, ///< precision is hours minutes, ///< precision is minutes seconds, ///< precision is seconds ms, ///< precision is milliseconds us, ///< precision is microseconds ns ///< precision is nanoseconds }; // used to index values in a timeparts array enum timestamp_parse_component { TP_YEAR = 0, TP_MONTH = 1, TP_DAY = 2, TP_DAY_OF_YEAR = 3, TP_HOUR = 4, TP_MINUTE = 5, TP_SECOND = 6, TP_SUBSECOND = 7, TP_TZ_MINUTES = 8, TP_ARRAYSIZE = 9 }; enum class format_char_type : int8_t { literal, // literal char type passed through specifier // timestamp format specifier }; /** * @brief Represents a format specifier or literal from a timestamp format string. * * Created by the format_compiler when parsing a format string. */ struct alignas(4) format_item { format_char_type item_type; // specifier or literal indicator char value; // specifier or literal value int8_t length; // item length in bytes static format_item new_specifier(char format_char, int8_t length) { return format_item{format_char_type::specifier, format_char, length}; } static format_item new_delimiter(char literal) { return format_item{format_char_type::literal, literal, 1}; } }; /** * @brief The format_compiler parses a timestamp format string into a vector of * format_items. * * The vector of format_items are used when parsing a string into timestamp * components and when formatting a string from timestamp components. */ struct format_compiler { std::string format; std::string template_string; timestamp_units units; rmm::device_vector<format_item> d_items; std::map<char, int8_t> specifier_lengths = {{'Y', 4}, {'y', 2}, {'m', 2}, {'d', 2}, {'H', 2}, {'I', 2}, {'M', 2}, {'S', 2}, {'f', 6}, {'z', 5}, {'Z', 3}, {'p', 2}, {'j', 3}}; format_compiler(const char* format, timestamp_units units) : format(format), units(units) {} format_item const* compile_to_device() { std::vector<format_item> items; const char* str = format.c_str(); auto length = format.length(); while (length > 0) { char ch = *str++; length--; if (ch != '%') { items.push_back(format_item::new_delimiter(ch)); template_string.append(1, ch); continue; } CUDF_EXPECTS(length > 0, "Unfinished specifier in timestamp format"); ch = *str++; length--; if (ch == '%') // escaped % char { items.push_back(format_item::new_delimiter(ch)); template_string.append(1, ch); continue; } if (ch >= '0' && ch <= '9') { CUDF_EXPECTS(*str == 'f', "precision not supported for specifier: " + std::string(1, *str)); specifier_lengths[*str] = static_cast<int8_t>(ch - '0'); ch = *str++; length--; } CUDF_EXPECTS(specifier_lengths.find(ch) != specifier_lengths.end(), "invalid format specifier: " + std::string(1, ch)); int8_t spec_length = specifier_lengths[ch]; items.push_back(format_item::new_specifier(ch, spec_length)); template_string.append((size_t)spec_length, ch); } // create program in device memory d_items.resize(items.size()); CUDA_TRY(cudaMemcpyAsync( d_items.data().get(), items.data(), items.size() * sizeof(items[0]), cudaMemcpyHostToDevice)); return d_items.data().get(); } // these calls are only valid after compile_to_device is called size_type template_bytes() const { return static_cast<size_type>(template_string.size()); } size_type items_count() const { return static_cast<size_type>(d_items.size()); } int8_t subsecond_precision() const { return specifier_lengths.at('f'); } }; // this parses date/time characters into a timestamp integer template <typename T> // timestamp type struct parse_datetime { column_device_view const d_strings; format_item const* d_format_items; size_type items_count; timestamp_units units; int8_t subsecond_precision; // __device__ int32_t str2int(const char* str, size_type bytes) { const char* ptr = str; int32_t value = 0; for (size_type idx = 0; idx < bytes; ++idx) { char chr = *ptr++; if (chr < '0' || chr > '9') break; value = (value * 10) + static_cast<int32_t>(chr - '0'); } return value; } // Walk the format_items to read the datetime string. // Returns 0 if all ok. __device__ int parse_into_parts(string_view const& d_string, int32_t* timeparts) { auto ptr = d_string.data(); auto length = d_string.size_bytes(); for (size_t idx = 0; idx < items_count; ++idx) { auto item = d_format_items[idx]; if (length < item.length) return 1; if (item.item_type == format_char_type::literal) { // static character we'll just skip; // consume item.length bytes from string ptr += item.length; length -= item.length; continue; } // special logic for each specifier switch (item.value) { case 'Y': timeparts[TP_YEAR] = str2int(ptr, item.length); break; case 'y': timeparts[TP_YEAR] = str2int(ptr, item.length) + 1900; break; case 'm': timeparts[TP_MONTH] = str2int(ptr, item.length); break; case 'd': timeparts[TP_DAY] = str2int(ptr, item.length); break; case 'j': timeparts[TP_DAY_OF_YEAR] = str2int(ptr, item.length); break; case 'H': case 'I': timeparts[TP_HOUR] = str2int(ptr, item.length); break; case 'M': timeparts[TP_MINUTE] = str2int(ptr, item.length); break; case 'S': timeparts[TP_SECOND] = str2int(ptr, item.length); break; case 'f': timeparts[TP_SUBSECOND] = str2int(ptr, item.length); break; case 'p': { string_view am_pm(ptr, 2); auto hour = timeparts[TP_HOUR]; if ((am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0)) { if (hour == 12) hour = 0; } else if (hour < 12) hour += 12; timeparts[TP_HOUR] = hour; break; } case 'z': { int sign = *ptr == '-' ? 1 : -1; // revert timezone back to UTC int hh = str2int(ptr + 1, 2); int mm = str2int(ptr + 3, 2); // ignoring the rest for now // item.length has how many chars we should read timeparts[TP_TZ_MINUTES] = sign * ((hh * 60) + mm); break; } case 'Z': break; // skip default: return 3; } ptr += item.length; length -= item.length; } return 0; } __device__ int64_t timestamp_from_parts(int32_t const* timeparts, timestamp_units units) { auto year = timeparts[TP_YEAR]; if (units == timestamp_units::years) return year - 1970; auto month = timeparts[TP_MONTH]; if (units == timestamp_units::months) return ((year - 1970) * 12) + (month - 1); // months are 1-12, need to 0-base it here auto day = timeparts[TP_DAY]; // The months are shifted so that March is the starting month and February // (possible leap day in it) is the last month for the linear calculation year -= (month <= 2) ? 1 : 0; // date cycle repeats every 400 years (era) constexpr int32_t erasInDays = 146097; constexpr int32_t erasInYears = (erasInDays / 365); auto era = (year >= 0 ? year : year - 399) / erasInYears; auto yoe = year - era * erasInYears; auto doy = month == 0 ? day : ((153 * (month + (month > 2 ? -3 : 9)) + 2) / 5 + day - 1); auto doe = (yoe * 365) + (yoe / 4) - (yoe / 100) + doy; int32_t days = (era * erasInDays) + doe - 719468; // 719468 = days from 0000-00-00 to 1970-03-01 if (units == timestamp_units::days) return days; auto tzadjust = timeparts[TP_TZ_MINUTES]; // in minutes auto hour = timeparts[TP_HOUR]; if (units == timestamp_units::hours) return (days * 24L) + hour + (tzadjust / 60); auto minute = timeparts[TP_MINUTE]; if (units == timestamp_units::minutes) return static_cast<int64_t>(days * 24L * 60L) + (hour * 60L) + minute + tzadjust; auto second = timeparts[TP_SECOND]; int64_t timestamp = (days * 24L * 3600L) + (hour * 3600L) + (minute * 60L) + second + (tzadjust * 60); if (units == timestamp_units::seconds) return timestamp; int64_t powers_of_ten[] = { 1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L}; int64_t subsecond = timeparts[TP_SUBSECOND] * powers_of_ten[9 - subsecond_precision]; // normalize to nanoseconds if (units == timestamp_units::ms) { timestamp *= 1000L; subsecond = subsecond / 1000000L; } else if (units == timestamp_units::us) { timestamp *= 1000000L; subsecond = subsecond / 1000L; } else if (units == timestamp_units::ns) timestamp *= 1000000000L; timestamp += subsecond; return timestamp; } __device__ T operator()(size_type idx) { if (d_strings.is_null(idx)) return 0; string_view d_str = d_strings.element<string_view>(idx); if (d_str.empty()) return 0; // int32_t timeparts[TP_ARRAYSIZE] = {0, 1, 1}; // month and day are 1-based if (parse_into_parts(d_str, timeparts)) return 0; // unexpected parse case // return static_cast<T>(timestamp_from_parts(timeparts, units)); } }; // convert cudf type to timestamp units struct dispatch_timestamp_to_units_fn { template <typename T> timestamp_units operator()() { CUDF_FAIL("Invalid type for timestamp conversion."); } }; template <> timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_D>() { return timestamp_units::days; } template <> timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_s>() { return timestamp_units::seconds; } template <> timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ms>() { return timestamp_units::ms; } template <> timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_us>() { return timestamp_units::us; } template <> timestamp_units dispatch_timestamp_to_units_fn::operator()<cudf::timestamp_ns>() { return timestamp_units::ns; } // dispatch operator to map timestamp to native fixed-width-type struct dispatch_to_timestamps_fn { template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr> void operator()(column_device_view const& d_strings, std::string const& format, timestamp_units units, mutable_column_view& results_view, cudaStream_t stream) const { format_compiler compiler(format.c_str(), units); auto d_items = compiler.compile_to_device(); auto d_results = results_view.data<T>(); parse_datetime<T> pfn{ d_strings, d_items, compiler.items_count(), units, compiler.subsecond_precision()}; thrust::transform(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(results_view.size()), d_results, pfn); } template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr> void operator()(column_device_view const&, std::string const&, timestamp_units, mutable_column_view&, cudaStream_t) const { CUDF_FAIL("Only timestamps type are expected"); } }; } // namespace // std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings, data_type timestamp_type, std::string const& format, cudaStream_t stream, rmm::mr::device_memory_resource* mr) { size_type strings_count = strings.size(); if (strings_count == 0) return make_timestamp_column(timestamp_type, 0); CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty."); timestamp_units units = cudf::type_dispatcher(timestamp_type, dispatch_timestamp_to_units_fn()); auto strings_column = column_device_view::create(strings.parent(), stream); auto d_column = *strings_column; auto results = make_timestamp_column(timestamp_type, strings_count, copy_bitmask(strings.parent(), stream, mr), strings.null_count(), stream, mr); auto results_view = results->mutable_view(); cudf::type_dispatcher( timestamp_type, dispatch_to_timestamps_fn(), d_column, format, units, results_view, stream); results->set_null_count(strings.null_count()); return results; } } // namespace detail // external API std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& strings, data_type timestamp_type, std::string const& format, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::to_timestamps(strings, timestamp_type, format, cudaStream_t{}, mr); } namespace detail { namespace { // converts a timestamp into date-time string template <typename T> struct datetime_formatter { const column_device_view d_timestamps; const format_item* d_format_items; size_type items_count; timestamp_units units; const int32_t* d_offsets; char* d_chars; // divide timestamp integer into time components (year, month, day, etc) // TODO call the simt::std::chrono methods here instead when the are ready __device__ void dissect_timestamp(int64_t timestamp, int32_t* timeparts) { if (units == timestamp_units::years) { timeparts[TP_YEAR] = static_cast<int32_t>(timestamp) + 1970; timeparts[TP_MONTH] = 1; timeparts[TP_DAY] = 1; return; } if (units == timestamp_units::months) { int32_t month = static_cast<int32_t>(timestamp % 12); int32_t year = static_cast<int32_t>(timestamp / 12) + 1970; timeparts[TP_YEAR] = year; timeparts[TP_MONTH] = month + 1; // months start at 1 and not 0 timeparts[TP_DAY] = 1; return; } // first, convert to days so we can handle months, leap years, etc. int32_t days = static_cast<int32_t>(timestamp); // default to days if (units == timestamp_units::hours) days = static_cast<int32_t>(timestamp / 24L); else if (units == timestamp_units::minutes) days = static_cast<int32_t>(timestamp / 1440L); // 24*60 else if (units == timestamp_units::seconds) days = static_cast<int32_t>(timestamp / 86400L); // 24*60*60 else if (units == timestamp_units::ms) days = static_cast<int32_t>(timestamp / 86400000L); else if (units == timestamp_units::us) days = static_cast<int32_t>(timestamp / 86400000000L); else if (units == timestamp_units::ns) days = static_cast<int32_t>(timestamp / 86400000000000L); days = days + 719468; // 719468 is days between 0000-00-00 and 1970-01-01 constexpr int32_t daysInEra = 146097; // (400*365)+97 constexpr int32_t daysInCentury = 36524; // (100*365) + 24; constexpr int32_t daysIn4Years = 1461; // (4*365) + 1; constexpr int32_t daysInYear = 365; // The months are shifted so that March is the starting month and February // (with possible leap day in it) is the last month for the linear calculation. // Day offsets for each month: Mar Apr May June July Aug Sep Oct Nov Dec Jan Feb const int32_t monthDayOffset[] = {0, 31, 61, 92, 122, 153, 184, 214, 245, 275, 306, 337, 366}; // code logic handles leap years in chunks: 400y,100y,4y,1y int32_t year = 400 * (days / daysInEra); days = days % daysInEra; int32_t leapy = days / daysInCentury; days = days % daysInCentury; if (leapy == 4) { // landed exactly on a leap century days += daysInCentury; --leapy; } year += 100 * leapy; year += 4 * (days / daysIn4Years); days = days % daysIn4Years; leapy = days / daysInYear; days = days % daysInYear; if (leapy == 4) { // landed exactly on a leap year days += daysInYear; --leapy; } year += leapy; // int32_t month = 12; for (int32_t idx = 0; idx < month; ++idx) { // find the month if (days < monthDayOffset[idx + 1]) { month = idx; break; } } // compute day of the year and account for calculating with March being the first month // for month >= 10, leap-day has been already been included timeparts[TP_DAY_OF_YEAR] = (month >= 10) ? days - monthDayOffset[10] + 1 : days + /*Jan=*/31 + /*Feb=*/28 + 1 + // 2-month shift ((year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0))); int32_t day = days - monthDayOffset[month] + 1; // compute day of month if (month >= 10) ++year; month = ((month + 2) % 12) + 1; // adjust Jan-Mar offset timeparts[TP_YEAR] = year; timeparts[TP_MONTH] = month; timeparts[TP_DAY] = day; if (units == timestamp_units::days) return; // done with date // now work on time int64_t hour = timestamp, minute = timestamp, second = timestamp; if (units == timestamp_units::hours) { timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); return; } hour = hour / 60; if (units == timestamp_units::minutes) { timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); timeparts[TP_MINUTE] = static_cast<int32_t>(minute % 60); return; } hour = hour / 60; minute = minute / 60; if (units == timestamp_units::seconds) { timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); timeparts[TP_MINUTE] = static_cast<int32_t>(minute % 60); timeparts[TP_SECOND] = static_cast<int32_t>(second % 60); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; if (units == timestamp_units::ms) { timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); timeparts[TP_MINUTE] = static_cast<int32_t>(minute % 60); timeparts[TP_SECOND] = static_cast<int32_t>(second % 60); timeparts[TP_SUBSECOND] = static_cast<int32_t>(timestamp % 1000); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; if (units == timestamp_units::us) { timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); timeparts[TP_MINUTE] = static_cast<int32_t>(minute % 60); timeparts[TP_SECOND] = static_cast<int32_t>(second % 60); timeparts[TP_SUBSECOND] = static_cast<int32_t>(timestamp % 1000000); return; } hour = hour / 1000; minute = minute / 1000; second = second / 1000; timeparts[TP_HOUR] = static_cast<int32_t>(hour % 24); timeparts[TP_MINUTE] = static_cast<int32_t>(minute % 60); timeparts[TP_SECOND] = static_cast<int32_t>(second % 60); timeparts[TP_SUBSECOND] = static_cast<int32_t>(timestamp % 1000000000); } // utility to create 0-padded integers (up to 9 chars) __device__ char* int2str(char* str, int bytes, int val) { char tmpl[9] = {'0', '0', '0', '0', '0', '0', '0', '0', '0'}; char* ptr = tmpl; while (val > 0) { int digit = val % 10; *ptr++ = '0' + digit; val = val / 10; } ptr = tmpl + bytes - 1; while (bytes-- > 0) *str++ = *ptr--; return str; } __device__ char* format_from_parts(int32_t const* timeparts, char* ptr) { for (size_t idx = 0; idx < items_count; ++idx) { auto item = d_format_items[idx]; if (item.item_type == format_char_type::literal) { *ptr++ = item.value; continue; } // special logic for each specifier switch (item.value) { case 'Y': // 4-digit year ptr = int2str(ptr, item.length, timeparts[TP_YEAR]); break; case 'y': // 2-digit year ptr = int2str(ptr, item.length, timeparts[TP_YEAR] - 1900); break; case 'm': // month ptr = int2str(ptr, item.length, timeparts[TP_MONTH]); break; case 'd': // day of month ptr = int2str(ptr, item.length, timeparts[TP_DAY]); break; case 'j': // day of year ptr = int2str(ptr, item.length, timeparts[TP_DAY_OF_YEAR]); break; case 'H': // 24-hour ptr = int2str(ptr, item.length, timeparts[TP_HOUR]); break; case 'I': // 12-hour { // 0 = 12am; 12 = 12pm; 6 = 06am; 18 = 06pm auto hour = timeparts[TP_HOUR]; if (hour == 0) hour = 12; if (hour > 12) hour -= 12; ptr = int2str(ptr, item.length, hour); break; } case 'M': // minute ptr = int2str(ptr, item.length, timeparts[TP_MINUTE]); break; case 'S': // second ptr = int2str(ptr, item.length, timeparts[TP_SECOND]); break; case 'f': // sub-second { char subsecond_digits[] = "000000000"; // 9 max digits const int digits = [units = units] { if (units == timestamp_units::ms) return 3; if (units == timestamp_units::us) return 6; if (units == timestamp_units::ns) return 9; return 0; }(); int2str(subsecond_digits, digits, timeparts[TP_SUBSECOND]); ptr = copy_and_increment(ptr, subsecond_digits, item.length); break; } case 'p': // am or pm // 0 = 12am, 12 = 12pm if (timeparts[TP_HOUR] < 12) memcpy(ptr, "AM", 2); else memcpy(ptr, "PM", 2); ptr += 2; break; case 'z': // timezone memcpy(ptr, "+0000", 5); // always UTC ptr += 5; break; case 'Z': memcpy(ptr, "UTC", 3); ptr += 3; break; default: // ignore everything else break; } } return ptr; } __device__ void operator()(size_type idx) { if (d_timestamps.is_null(idx)) return; auto timestamp = d_timestamps.element<T>(idx); int32_t timeparts[TP_ARRAYSIZE] = {0}; dissect_timestamp(timestamp.time_since_epoch().count(), timeparts); // convert to characters format_from_parts(timeparts, d_chars + d_offsets[idx]); } }; // struct dispatch_from_timestamps_fn { template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr> void operator()(column_device_view const& d_timestamps, format_item const* d_format_items, size_type items_count, timestamp_units units, const int32_t* d_offsets, char* d_chars, cudaStream_t stream) const { datetime_formatter<T> pfn{d_timestamps, d_format_items, items_count, units, d_offsets, d_chars}; thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), d_timestamps.size(), pfn); } template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr> void operator()(column_device_view const&, format_item const*, size_type, timestamp_units, const int32_t*, char* d_chars, cudaStream_t stream) const { CUDF_FAIL("Only timestamps type are expected"); } }; } // namespace // std::unique_ptr<column> from_timestamps(column_view const& timestamps, std::string const& format, cudaStream_t stream, rmm::mr::device_memory_resource* mr) { size_type strings_count = timestamps.size(); if (strings_count == 0) return make_empty_strings_column(mr, stream); CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty."); timestamp_units units = cudf::type_dispatcher(timestamps.type(), dispatch_timestamp_to_units_fn()); format_compiler compiler(format.c_str(), units); auto d_format_items = compiler.compile_to_device(); auto column = column_device_view::create(timestamps, stream); auto d_column = *column; // copy null mask rmm::device_buffer null_mask = copy_bitmask(timestamps, stream, mr); // Each string will be the same number of bytes which can be determined // directly from the format string. auto d_str_bytes = compiler.template_bytes(); // size in bytes of each string // build offsets column auto offsets_transformer_itr = thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), [d_column, d_str_bytes] __device__(size_type idx) { return (d_column.is_null(idx) ? 0 : d_str_bytes); }); auto offsets_column = make_offsets_child_column( offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream); auto offsets_view = offsets_column->view(); auto d_new_offsets = offsets_view.template data<int32_t>(); // build chars column size_type bytes = thrust::device_pointer_cast(d_new_offsets)[strings_count]; auto chars_column = create_chars_child_column(strings_count, timestamps.null_count(), bytes, mr, stream); auto chars_view = chars_column->mutable_view(); auto d_chars = chars_view.template data<char>(); // fill in chars column with timestamps // dispatcher is called to handle the different timestamp types cudf::type_dispatcher(timestamps.type(), dispatch_from_timestamps_fn(), d_column, d_format_items, compiler.items_count(), units, d_new_offsets, d_chars, stream); // return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), timestamps.null_count(), std::move(null_mask), stream, mr); } } // namespace detail // external API std::unique_ptr<column> from_timestamps(column_view const& timestamps, std::string const& format, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::from_timestamps(timestamps, format, cudaStream_t{}, mr); } } // namespace strings } // namespace cudf
736faabf7b285d2e817615c201883bd957ffdb2e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorScatterGather.cu" #else #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_gatherKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(gather)(THCState* state, THCTensor *tensor, THCTensor *src, int dim, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 4, "Index tensor must have same dimensions as input tensor"); THLongStorage *indexSize = THCudaLongTensor_newSizeOf(state, index); THArgCheck(THCTensor_(isSize)(state, tensor, indexSize), 4, "Index tensor must have the same size as output tensor."); THLongStorage_free(indexSize); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 3, "Index dimension is out of bounds"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 2, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 2, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); THCudaCheck(hipGetLastError()); break; case 2: RUN(unsigned int, 2, real); THCudaCheck(hipGetLastError()); break; case 3: RUN(unsigned int, 3, real); THCudaCheck(hipGetLastError()); break; default: RUN(unsigned int, -1, real); THCudaCheck(hipGetLastError()); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); THCudaCheck(hipGetLastError()); } if (oldTensor) { TensorUtils<THCTensor>::copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_size(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(size)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(size)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } if (oldTensor) { TensorUtils<THCTensor>::copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); THLongStorage *indexDims = THCudaLongTensor_newSizeOf(state, index); THArgCheck(THCTensor_(isSize)(state, src, indexDims), 3, "Index tensor must have the same size as input tensor."); THLongStorage_free(indexDims); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 4, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } if (oldTensor) { TensorUtils<THCTensor>::copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, indexInfo, value, dim, (TYPE)totalElements); void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, tensor), 3, "Index tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCudaLongTensor_size(state, index, d), 4, "Index tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); } if (oldTensor) { TensorUtils<THCTensor>::copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #endif
736faabf7b285d2e817615c201883bd957ffdb2e.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorScatterGather.cu" #else #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_gatherKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(gather)(THCState* state, THCTensor *tensor, THCTensor *src, int dim, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 4, "Index tensor must have same dimensions as input tensor"); THLongStorage *indexSize = THCudaLongTensor_newSizeOf(state, index); THArgCheck(THCTensor_(isSize)(state, tensor, indexSize), 4, "Index tensor must have the same size as output tensor."); THLongStorage_free(indexSize); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 3, "Index dimension is out of bounds"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 2, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 2, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); THCudaCheck(cudaGetLastError()); break; case 2: RUN(unsigned int, 2, real); THCudaCheck(cudaGetLastError()); break; case 3: RUN(unsigned int, 3, real); THCudaCheck(cudaGetLastError()); break; default: RUN(unsigned int, -1, real); THCudaCheck(cudaGetLastError()); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); THCudaCheck(cudaGetLastError()); } if (oldTensor) { TensorUtils<THCTensor>::copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_size(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(size)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(size)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } if (oldTensor) { TensorUtils<THCTensor>::copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); THLongStorage *indexDims = THCudaLongTensor_newSizeOf(state, index); THArgCheck(THCTensor_(isSize)(state, src, indexDims), 3, "Index tensor must have the same size as input tensor."); THLongStorage_free(indexDims); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 4, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } if (oldTensor) { TensorUtils<THCTensor>::copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, indexInfo, value, dim, (TYPE)totalElements); void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, tensor), 3, "Index tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCudaLongTensor_size(state, index, d), 4, "Index tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); } if (oldTensor) { TensorUtils<THCTensor>::copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #endif
236922a448e3bcae5753363b26c921dc1acdb197.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 8192 #define TILE 32 #define SIZE N*N __global__ void transpose_gpu(double *b, const double *a, const int size) { int x = blockIdx.x * TILE + threadIdx.x; int y = blockIdx.y * TILE + threadIdx.y; int width = gridDim.x * TILE; for (int i = 0; i < TILE; i+= size) b[x*width + (y + i)] = a[(y + i) * width + x]; } __global__ void transpose_gpu_opt(double *b, const double *a, const int size) { __shared__ double tmp[TILE][TILE]; int x = blockIdx.x * TILE + threadIdx.x; int y = blockIdx.y * TILE + threadIdx.y; int id_a = x + (y) * N; x = blockIdx.y * TILE + threadIdx.x; y = blockIdx.x * TILE + threadIdx.y; int id_b = x + (y) * N; for (int i = 0; i < TILE; i+=size) { tmp[threadIdx.y + i][threadIdx.x] = a[id_a + i * N]; } __syncthreads(); for (int i = 0; i < TILE; i+=size) { b[id_b + i * N] = tmp[threadIdx.x][threadIdx.y + i]; } } void fill_matrix (double* a, const int dim) { for(int i = 0; i < dim; i++) { a[i] = (double) i; } } int main(int argc, char const *argv[]) { dim3 grid, block; block.x = TILE; block.y = atoi(argv[1]); grid.x = N / TILE; grid.y = N / TILE; double* host_input, * host_output; double* gpu_input, * gpu_output; host_input = (double* )malloc(SIZE * sizeof(double)); host_output = (double* )malloc(SIZE * sizeof(double)); hipMalloc((void**)&gpu_input, SIZE * sizeof(double)); hipMalloc((void**)&gpu_output, SIZE * sizeof(double)); char sep[] = "---------------------------------------------\n"; printf("%sTHREADS x BLOCK: %d\n%s", sep, atoi(argv[1]), sep); //TRANSPOSE. fill_matrix(host_input, SIZE); hipMemcpy(gpu_input, host_input, SIZE, hipMemcpyHostToDevice); float total_time, total_time_opt = 0.0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( transpose_gpu), dim3(grid),dim3(block), 0, 0, gpu_input, gpu_output, block.y); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&total_time, start, stop); double bandwidth = 2. * (double)SIZE * sizeof(double) / total_time / 1e6; printf("TRANSPOSE:\t\t %fms\n", total_time); printf("BANDWIDTH:\t\t %fms\n%s", bandwidth, sep); hipMemcpy(host_output, gpu_output, SIZE, hipMemcpyDeviceToHost); // TRANSPOSE OPT. fill_matrix(host_input, SIZE); hipMemcpy(gpu_input, host_input, SIZE, hipMemcpyHostToDevice); hipEventRecord(start,0); hipLaunchKernelGGL(( transpose_gpu_opt), dim3(grid), dim3(block), 0, 0, gpu_input, gpu_output, block.y); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&total_time_opt, start, stop); double bandwidth_opt = 2. * (double)SIZE * sizeof(double) / total_time_opt / 1e6; printf("TRANSPOSE OPT:\t\t %fms\n", total_time_opt); printf("BANDWIDTH:\t\t %fms\n%s\n", bandwidth_opt, sep); hipMemcpy(host_output, gpu_output, SIZE, hipMemcpyDeviceToHost); free(host_input); free(host_output); hipFree(gpu_input); hipFree(gpu_output); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
236922a448e3bcae5753363b26c921dc1acdb197.cu
#include <stdio.h> #define N 8192 #define TILE 32 #define SIZE N*N __global__ void transpose_gpu(double *b, const double *a, const int size) { int x = blockIdx.x * TILE + threadIdx.x; int y = blockIdx.y * TILE + threadIdx.y; int width = gridDim.x * TILE; for (int i = 0; i < TILE; i+= size) b[x*width + (y + i)] = a[(y + i) * width + x]; } __global__ void transpose_gpu_opt(double *b, const double *a, const int size) { __shared__ double tmp[TILE][TILE]; int x = blockIdx.x * TILE + threadIdx.x; int y = blockIdx.y * TILE + threadIdx.y; int id_a = x + (y) * N; x = blockIdx.y * TILE + threadIdx.x; y = blockIdx.x * TILE + threadIdx.y; int id_b = x + (y) * N; for (int i = 0; i < TILE; i+=size) { tmp[threadIdx.y + i][threadIdx.x] = a[id_a + i * N]; } __syncthreads(); for (int i = 0; i < TILE; i+=size) { b[id_b + i * N] = tmp[threadIdx.x][threadIdx.y + i]; } } void fill_matrix (double* a, const int dim) { for(int i = 0; i < dim; i++) { a[i] = (double) i; } } int main(int argc, char const *argv[]) { dim3 grid, block; block.x = TILE; block.y = atoi(argv[1]); grid.x = N / TILE; grid.y = N / TILE; double* host_input, * host_output; double* gpu_input, * gpu_output; host_input = (double* )malloc(SIZE * sizeof(double)); host_output = (double* )malloc(SIZE * sizeof(double)); cudaMalloc((void**)&gpu_input, SIZE * sizeof(double)); cudaMalloc((void**)&gpu_output, SIZE * sizeof(double)); char sep[] = "---------------------------------------------\n"; printf("%sTHREADS x BLOCK: %d\n%s", sep, atoi(argv[1]), sep); //TRANSPOSE. fill_matrix(host_input, SIZE); cudaMemcpy(gpu_input, host_input, SIZE, cudaMemcpyHostToDevice); float total_time, total_time_opt = 0.0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); transpose_gpu<<<grid,block>>>(gpu_input, gpu_output, block.y); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&total_time, start, stop); double bandwidth = 2. * (double)SIZE * sizeof(double) / total_time / 1e6; printf("TRANSPOSE:\t\t %fms\n", total_time); printf("BANDWIDTH:\t\t %fms\n%s", bandwidth, sep); cudaMemcpy(host_output, gpu_output, SIZE, cudaMemcpyDeviceToHost); // TRANSPOSE OPT. fill_matrix(host_input, SIZE); cudaMemcpy(gpu_input, host_input, SIZE, cudaMemcpyHostToDevice); cudaEventRecord(start,0); transpose_gpu_opt<<<grid, block>>>(gpu_input, gpu_output, block.y); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&total_time_opt, start, stop); double bandwidth_opt = 2. * (double)SIZE * sizeof(double) / total_time_opt / 1e6; printf("TRANSPOSE OPT:\t\t %fms\n", total_time_opt); printf("BANDWIDTH:\t\t %fms\n%s\n", bandwidth_opt, sep); cudaMemcpy(host_output, gpu_output, SIZE, cudaMemcpyDeviceToHost); free(host_input); free(host_output); cudaFree(gpu_input); cudaFree(gpu_output); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
f73c9f25e9f4df452e6303d5e86172e79430600e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/generate.h> #include <thrust/execution_policy.h> template<typename Iterator, typename Function> __global__ void generate_kernel(Iterator first, Iterator last, Function f) { thrust::generate(thrust::seq, first, last, f); } template<typename T> struct return_value { T val; return_value(void){} return_value(T v):val(v){} __host__ __device__ T operator()(void){ return val; } }; template<typename T> void TestGenerateDeviceSeq(const size_t n) { thrust::host_vector<T> h_result(n); thrust::device_vector<T> d_result(n); T value = 13; return_value<T> f(value); thrust::generate(h_result.begin(), h_result.end(), f); hipLaunchKernelGGL(( generate_kernel), dim3(1),dim3(1), 0, 0, d_result.begin(), d_result.end(), f); ASSERT_EQUAL(h_result, d_result); } DECLARE_VARIABLE_UNITTEST(TestGenerateDeviceSeq); template<typename Iterator, typename Size, typename Function> __global__ void generate_n_kernel(Iterator first, Size n, Function f) { thrust::generate_n(thrust::seq, first, n, f); } template<typename T> void TestGenerateNDeviceSeq(const size_t n) { thrust::host_vector<T> h_result(n); thrust::device_vector<T> d_result(n); T value = 13; return_value<T> f(value); thrust::generate_n(h_result.begin(), h_result.size(), f); hipLaunchKernelGGL(( generate_n_kernel), dim3(1),dim3(1), 0, 0, d_result.begin(), d_result.size(), f); ASSERT_EQUAL(h_result, d_result); } DECLARE_VARIABLE_UNITTEST(TestGenerateNDeviceSeq);
f73c9f25e9f4df452e6303d5e86172e79430600e.cu
#include <unittest/unittest.h> #include <thrust/generate.h> #include <thrust/execution_policy.h> template<typename Iterator, typename Function> __global__ void generate_kernel(Iterator first, Iterator last, Function f) { thrust::generate(thrust::seq, first, last, f); } template<typename T> struct return_value { T val; return_value(void){} return_value(T v):val(v){} __host__ __device__ T operator()(void){ return val; } }; template<typename T> void TestGenerateDeviceSeq(const size_t n) { thrust::host_vector<T> h_result(n); thrust::device_vector<T> d_result(n); T value = 13; return_value<T> f(value); thrust::generate(h_result.begin(), h_result.end(), f); generate_kernel<<<1,1>>>(d_result.begin(), d_result.end(), f); ASSERT_EQUAL(h_result, d_result); } DECLARE_VARIABLE_UNITTEST(TestGenerateDeviceSeq); template<typename Iterator, typename Size, typename Function> __global__ void generate_n_kernel(Iterator first, Size n, Function f) { thrust::generate_n(thrust::seq, first, n, f); } template<typename T> void TestGenerateNDeviceSeq(const size_t n) { thrust::host_vector<T> h_result(n); thrust::device_vector<T> d_result(n); T value = 13; return_value<T> f(value); thrust::generate_n(h_result.begin(), h_result.size(), f); generate_n_kernel<<<1,1>>>(d_result.begin(), d_result.size(), f); ASSERT_EQUAL(h_result, d_result); } DECLARE_VARIABLE_UNITTEST(TestGenerateNDeviceSeq);
d6eb33badc089c42e5687e5b30f8fc6f7bb8f921.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Created By: Sandeep Katragadda // https://github.com/ksandeep4u/CUDA-examples // Modified version of https://github.com/pwlnk/cuda-neural-network #pragma once #include "linearLayer.cuh" #include <assert.h> #include <random> __global__ void linearLayerForward(float* W, float* A, float* b, float* Z, int Wx, int Wy, int Ax, int Ay) { int rIdx = blockDim.x * blockIdx.x + threadIdx.x; int cIdx = blockDim.y * blockIdx.y + threadIdx.y; int stride_r = gridDim.x * blockDim.x; int stride_c = gridDim.y * blockDim.y; int Zx = Ax; int Zy = Wy; for (int row = rIdx; row < Zx; row+=stride_r) { for (int col = cIdx; col < Zy; col+=stride_c) { float Z_tmp = 0.0f; for (int i = 0; i < Wx; i++) Z_tmp += A[row * Ay + i] * W[i * Wy + col]; Z[row * Zy + col] = Z_tmp + b[col]; } } } __global__ void linearLayerBackProp(float* W, float* dZ, float* dA, int Wx, int Wy, int dZx, int dZy) { int rIdx = blockDim.x * blockIdx.x + threadIdx.x; int cIdx = blockDim.y * blockIdx.y + threadIdx.y; int stride_r = gridDim.x * blockDim.x; int stride_c = gridDim.y * blockDim.y; int dAx = dZx; int dAy = Wx; for (int row = rIdx; row < dAx; row += stride_r) { for (int col = cIdx; col < dAy; col += stride_c) { float dA_tmp = 0.0f; for (int i = 0; i < Wy; i++) dA_tmp += dZ[row * dZy + i] * W[col * Wy + i]; dA[row * dAy + col] = dA_tmp; } } } __global__ void linearLayerUpdateBias(float* dZ, float* b, int dZx, int dZy, int bx, float learning_rate) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int index = idx; index < dZx*dZy; index += stride) { int col = index % dZy; int row = index / dZy; atomicAdd(&b[col], -learning_rate * (dZ[row * dZy + col] / dZx)); } } __global__ void linearLayerUpdateWeights(float* dZ, float* A, float* W, int dZx, int dZy, int Ax, int Ay, float learning_rate) { int rIdx = blockDim.x * blockIdx.x + threadIdx.x; int cIdx = blockDim.y * blockIdx.y + threadIdx.y; int stride_r = gridDim.x * blockDim.x; int stride_c = gridDim.y * blockDim.y; int Wx = Ay; int Wy = dZy; for (int row = rIdx; row < Wx; row += stride_r) { for (int col = cIdx; col < Wy; col += stride_c) { float dW_tmp = 0.0f; for (int i = 0; i < dZx; i++) dW_tmp += A[i * Ay + row] * dZ[i * dZy + col]; W[row * Wy + col] -= (float)(learning_rate * (dW_tmp / Ax)); } } } LinearLayer::LinearLayer(std::string name, Shape shape, bool useGPU): W(shape), b(shape.y,1) { this->name = name; this->useGPU = useGPU; b.allocateMemory(); W.allocateMemory(); initBiasWithZeros(); initWeightsRandomly(); } LinearLayer::~LinearLayer(){} void LinearLayer::initBiasWithZeros() { for (int i = 0; i < b.shape.x; i++) b[i] = 0.0f; //b.copyHostToDevice(); } void LinearLayer::initWeightsRandomly() { std::default_random_engine generator; std::normal_distribution<float> normal_distribution; for (int i = 0; i < W.shape.x; i++) for (int j = 0; j < W.shape.y; j++) W[i * W.shape.y + j] = normal_distribution(generator) * weights_init_threshold; //W.copyHostToDevice(); } // Z = AW+b Matrix& LinearLayer::forward(Matrix A) { assert(W.shape.x == A.shape.y); this->A = A; int Zx = A.shape.x; int Zy = W.shape.y; Z.allocateMemoryIfNotAllocated(Shape(A.shape.x, W.shape.y)); if (useGPU) { dim3 num_threads(256, 256); dim3 num_blocks((Zx + num_threads.x - 1) / num_threads.x, (Zy + num_threads.y - 1) / num_threads.y); W.copyHostToDevice(); A.copyHostToDevice(); b.copyHostToDevice(); linearLayerForward << <num_blocks, num_threads >> > (W.data_device.get(), A.data_device.get(), b.data_device.get(), Z.data_device.get(), W.shape.x, W.shape.y, A.shape.x, A.shape.y); hipDeviceSynchronize(); Z.copyDeviceToHost(); } else { Z = A * W; for (int row = 0; row < Zx; row++) for (int col = 0; col < Zy; col++) Z[row * Zy + col] += b[col]; } return Z; } // dA = dZ*transpose(W) Matrix& LinearLayer::backprop(Matrix& dZ, float learning_rate) { dA.allocateMemoryIfNotAllocated(A.shape); assert(dA.shape.x == dZ.shape.x); assert(dA.shape.y == W.shape.x); if (useGPU) { dim3 num_threads(256, 256); dim3 num_blocks((A.shape.x + num_threads.x - 1) / num_threads.x, (A.shape.y + num_threads.y - 1) / num_threads.y); dZ.copyHostToDevice(); linearLayerBackProp << <num_blocks, num_threads >> > (W.data_device.get(), dZ.data_device.get(), dA.data_device.get(), W.shape.x, W.shape.y, dZ.shape.x, dZ.shape.y); hipDeviceSynchronize(); dA.copyDeviceToHost(); } else dA = dZ * W.transpose(); updateBias(dZ, learning_rate); updateWeights(dZ, learning_rate); return dA; } void LinearLayer::updateBias(Matrix& dZ, float learning_rate) { if (useGPU) { dim3 num_threads(256); dim3 num_blocks((dZ.shape.x + num_threads.x - 1) / num_threads.x, (dZ.shape.y + num_threads.y - 1) / num_threads.y); dZ.copyHostToDevice(); linearLayerUpdateBias << <num_blocks, num_threads >> > (dZ.data_device.get(), b.data_device.get(), dZ.shape.x, dZ.shape.y, b.shape.x, learning_rate); hipDeviceSynchronize(); b.copyDeviceToHost(); } else { for (int index = 0; index < dZ.shape.x * dZ.shape.y; index++) { int col = index % dZ.shape.y; int row = index / dZ.shape.y; b[col] -= learning_rate * (dZ[row * dZ.shape.y + col] / dZ.shape.x); } } } void LinearLayer::updateWeights(Matrix& dZ, float learning_rate) { assert(W.shape.x == A.shape.y); assert(W.shape.y == dZ.shape.y); if (useGPU) { dim3 num_threads(256, 256); dim3 num_blocks((W.shape.x + num_threads.x - 1) / num_threads.x, (W.shape.y + num_threads.y - 1) / num_threads.y); dZ.copyHostToDevice(); linearLayerUpdateWeights << <num_blocks, num_threads >> > (dZ.data_device.get(), A.data_device.get(), W.data_device.get(), dZ.shape.x, dZ.shape.y, A.shape.x, A.shape.y, learning_rate); hipDeviceSynchronize(); W.copyDeviceToHost(); } else { Matrix dW = A.transpose() * dZ; for (int row = 0; row < W.shape.x; row++) { for (int col = 0; col < W.shape.y; col++) { W[row * W.shape.y + col] -= (float)(learning_rate * (dW[row * dW.shape.y + col] / A.shape.x)); } } } }
d6eb33badc089c42e5687e5b30f8fc6f7bb8f921.cu
// Created By: Sandeep Katragadda // https://github.com/ksandeep4u/CUDA-examples // Modified version of https://github.com/pwlnk/cuda-neural-network #pragma once #include "linearLayer.cuh" #include <assert.h> #include <random> __global__ void linearLayerForward(float* W, float* A, float* b, float* Z, int Wx, int Wy, int Ax, int Ay) { int rIdx = blockDim.x * blockIdx.x + threadIdx.x; int cIdx = blockDim.y * blockIdx.y + threadIdx.y; int stride_r = gridDim.x * blockDim.x; int stride_c = gridDim.y * blockDim.y; int Zx = Ax; int Zy = Wy; for (int row = rIdx; row < Zx; row+=stride_r) { for (int col = cIdx; col < Zy; col+=stride_c) { float Z_tmp = 0.0f; for (int i = 0; i < Wx; i++) Z_tmp += A[row * Ay + i] * W[i * Wy + col]; Z[row * Zy + col] = Z_tmp + b[col]; } } } __global__ void linearLayerBackProp(float* W, float* dZ, float* dA, int Wx, int Wy, int dZx, int dZy) { int rIdx = blockDim.x * blockIdx.x + threadIdx.x; int cIdx = blockDim.y * blockIdx.y + threadIdx.y; int stride_r = gridDim.x * blockDim.x; int stride_c = gridDim.y * blockDim.y; int dAx = dZx; int dAy = Wx; for (int row = rIdx; row < dAx; row += stride_r) { for (int col = cIdx; col < dAy; col += stride_c) { float dA_tmp = 0.0f; for (int i = 0; i < Wy; i++) dA_tmp += dZ[row * dZy + i] * W[col * Wy + i]; dA[row * dAy + col] = dA_tmp; } } } __global__ void linearLayerUpdateBias(float* dZ, float* b, int dZx, int dZy, int bx, float learning_rate) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int index = idx; index < dZx*dZy; index += stride) { int col = index % dZy; int row = index / dZy; atomicAdd(&b[col], -learning_rate * (dZ[row * dZy + col] / dZx)); } } __global__ void linearLayerUpdateWeights(float* dZ, float* A, float* W, int dZx, int dZy, int Ax, int Ay, float learning_rate) { int rIdx = blockDim.x * blockIdx.x + threadIdx.x; int cIdx = blockDim.y * blockIdx.y + threadIdx.y; int stride_r = gridDim.x * blockDim.x; int stride_c = gridDim.y * blockDim.y; int Wx = Ay; int Wy = dZy; for (int row = rIdx; row < Wx; row += stride_r) { for (int col = cIdx; col < Wy; col += stride_c) { float dW_tmp = 0.0f; for (int i = 0; i < dZx; i++) dW_tmp += A[i * Ay + row] * dZ[i * dZy + col]; W[row * Wy + col] -= (float)(learning_rate * (dW_tmp / Ax)); } } } LinearLayer::LinearLayer(std::string name, Shape shape, bool useGPU): W(shape), b(shape.y,1) { this->name = name; this->useGPU = useGPU; b.allocateMemory(); W.allocateMemory(); initBiasWithZeros(); initWeightsRandomly(); } LinearLayer::~LinearLayer(){} void LinearLayer::initBiasWithZeros() { for (int i = 0; i < b.shape.x; i++) b[i] = 0.0f; //b.copyHostToDevice(); } void LinearLayer::initWeightsRandomly() { std::default_random_engine generator; std::normal_distribution<float> normal_distribution; for (int i = 0; i < W.shape.x; i++) for (int j = 0; j < W.shape.y; j++) W[i * W.shape.y + j] = normal_distribution(generator) * weights_init_threshold; //W.copyHostToDevice(); } // Z = AW+b Matrix& LinearLayer::forward(Matrix A) { assert(W.shape.x == A.shape.y); this->A = A; int Zx = A.shape.x; int Zy = W.shape.y; Z.allocateMemoryIfNotAllocated(Shape(A.shape.x, W.shape.y)); if (useGPU) { dim3 num_threads(256, 256); dim3 num_blocks((Zx + num_threads.x - 1) / num_threads.x, (Zy + num_threads.y - 1) / num_threads.y); W.copyHostToDevice(); A.copyHostToDevice(); b.copyHostToDevice(); linearLayerForward << <num_blocks, num_threads >> > (W.data_device.get(), A.data_device.get(), b.data_device.get(), Z.data_device.get(), W.shape.x, W.shape.y, A.shape.x, A.shape.y); cudaDeviceSynchronize(); Z.copyDeviceToHost(); } else { Z = A * W; for (int row = 0; row < Zx; row++) for (int col = 0; col < Zy; col++) Z[row * Zy + col] += b[col]; } return Z; } // dA = dZ*transpose(W) Matrix& LinearLayer::backprop(Matrix& dZ, float learning_rate) { dA.allocateMemoryIfNotAllocated(A.shape); assert(dA.shape.x == dZ.shape.x); assert(dA.shape.y == W.shape.x); if (useGPU) { dim3 num_threads(256, 256); dim3 num_blocks((A.shape.x + num_threads.x - 1) / num_threads.x, (A.shape.y + num_threads.y - 1) / num_threads.y); dZ.copyHostToDevice(); linearLayerBackProp << <num_blocks, num_threads >> > (W.data_device.get(), dZ.data_device.get(), dA.data_device.get(), W.shape.x, W.shape.y, dZ.shape.x, dZ.shape.y); cudaDeviceSynchronize(); dA.copyDeviceToHost(); } else dA = dZ * W.transpose(); updateBias(dZ, learning_rate); updateWeights(dZ, learning_rate); return dA; } void LinearLayer::updateBias(Matrix& dZ, float learning_rate) { if (useGPU) { dim3 num_threads(256); dim3 num_blocks((dZ.shape.x + num_threads.x - 1) / num_threads.x, (dZ.shape.y + num_threads.y - 1) / num_threads.y); dZ.copyHostToDevice(); linearLayerUpdateBias << <num_blocks, num_threads >> > (dZ.data_device.get(), b.data_device.get(), dZ.shape.x, dZ.shape.y, b.shape.x, learning_rate); cudaDeviceSynchronize(); b.copyDeviceToHost(); } else { for (int index = 0; index < dZ.shape.x * dZ.shape.y; index++) { int col = index % dZ.shape.y; int row = index / dZ.shape.y; b[col] -= learning_rate * (dZ[row * dZ.shape.y + col] / dZ.shape.x); } } } void LinearLayer::updateWeights(Matrix& dZ, float learning_rate) { assert(W.shape.x == A.shape.y); assert(W.shape.y == dZ.shape.y); if (useGPU) { dim3 num_threads(256, 256); dim3 num_blocks((W.shape.x + num_threads.x - 1) / num_threads.x, (W.shape.y + num_threads.y - 1) / num_threads.y); dZ.copyHostToDevice(); linearLayerUpdateWeights << <num_blocks, num_threads >> > (dZ.data_device.get(), A.data_device.get(), W.data_device.get(), dZ.shape.x, dZ.shape.y, A.shape.x, A.shape.y, learning_rate); cudaDeviceSynchronize(); W.copyDeviceToHost(); } else { Matrix dW = A.transpose() * dZ; for (int row = 0; row < W.shape.x; row++) { for (int col = 0; col < W.shape.y; col++) { W[row * W.shape.y + col] -= (float)(learning_rate * (dW[row * dW.shape.y + col] / A.shape.x)); } } } }
2b00c89571b96892bfcd783a4bf64592efd3bc2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Prints Thread ID of each thread * What to observe/ponder: * - Any trends on how the thread IDs are printed? * - Why are they printed like so? */ #include <stdio.h> void check_cuda_errors() { hipError_t rc; rc = hipGetLastError(); if (rc != hipSuccess) { printf("Last CUDA error %s\n", hipGetErrorString(rc)); } } __global__ void printer() { printf("%d\n", threadIdx.x); } int main(int argc, char **argv) { hipLaunchKernelGGL(( printer), dim3(1), dim3(1024), 0, 0, ); // Waits for all CUDA threads to complete. hipDeviceSynchronize(); check_cuda_errors(); return 0; }
2b00c89571b96892bfcd783a4bf64592efd3bc2a.cu
/** * Prints Thread ID of each thread * What to observe/ponder: * - Any trends on how the thread IDs are printed? * - Why are they printed like so? */ #include <stdio.h> void check_cuda_errors() { cudaError_t rc; rc = cudaGetLastError(); if (rc != cudaSuccess) { printf("Last CUDA error %s\n", cudaGetErrorString(rc)); } } __global__ void printer() { printf("%d\n", threadIdx.x); } int main(int argc, char **argv) { printer<<<1, 1024>>>(); // Waits for all CUDA threads to complete. cudaDeviceSynchronize(); check_cuda_errors(); return 0; }
3a187bb372b8a61be78815b1cb7823e637d8cd90.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @precisions mixed zc -> ds */ #include "common_magma.h" __global__ void clag2z_generic(int M, int N, const magmaFloatComplex *SA, int LDSA, magmaDoubleComplex *A, int LDA ) { int ibx = blockIdx.x * 64; int tx = threadIdx.x; int ty = threadIdx.y; int idt = ty * 16 + tx; if( (ibx+idt) >= M ){ SA += (M-1); A += (M-1); } else{ SA += ibx+idt; A += ibx+idt; } const magmaFloatComplex * SAend = SA+LDSA*N; magmaDoubleComplex Ap[1]={ cuComplexFloatToDouble(SA[0]) }; do { SA += LDSA; A[0] = Ap[0]; Ap[0]= cuComplexFloatToDouble(SA[0]); A += LDA; } while (SA < SAend); A[0] = Ap[0]; } __global__ void clag2z_special(int M, int N, const magmaFloatComplex *SA, int LDSA, magmaDoubleComplex *A, int LDA ) { int ibx = blockIdx.x * 64; int tx = threadIdx.x; int ty = threadIdx.y; int idt = ty * 16 + tx; if( (ibx+idt) >= M ){ SA += (M-1); A += (M-1); } else{ SA += ibx+idt; A += ibx+idt; } magmaDoubleComplex Ap[1] = { cuComplexFloatToDouble(SA[0]) }; A[0] = Ap[0]; } extern "C" void magmablas_clag2z_64_64_16_4_v2( magma_int_t M, magma_int_t N, const magmaFloatComplex *SA, magma_int_t LDSA, magmaDoubleComplex *A, magma_int_t LDA ) { if( M == 0 || N==0 ) { printf("One of the dimension is ZERO\n"); exit(-1); } dim3 threads( 16, 4 ); dim3 grid(M/64+(M%64!=0),1); if( N > 1 ) { hipLaunchKernelGGL(( clag2z_generic), dim3(grid), dim3(threads), 0, magma_stream , M, N, SA, LDSA, A, LDA ) ; } else{ hipLaunchKernelGGL(( clag2z_special), dim3(grid), dim3(threads), 0, magma_stream , M, N, SA, LDSA, A, LDA ) ; } } extern "C" void magmablas_clag2z( magma_int_t m, magma_int_t n, const magmaFloatComplex *SA, magma_int_t ldsa, magmaDoubleComplex *A, magma_int_t lda, magma_int_t *info) { /* Purpose ======= CLAG2Z converts a SINGLE PRECISION matrix, SA, to a DOUBLE PRECISION matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments ========= M (input) INTEGER The number of lines of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. SA (input) REAL array, dimension (LDSA,N) On entry, the M-by-N coefficient matrix SA. LDSA (input) INTEGER The leading dimension of the array SA. LDSA >= max(1,M). A (output) DOUBLE PRECISION array, dimension (LDA,N) On exit, the M-by-N coefficient matrix A. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== */ *info = 0; if ( m < 0 ) *info = -1; else if ( n < 0 ) *info = -2; else if ( ldsa < max(1,m) ) *info = -4; else if ( lda < max(1,m) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); //return *info; } magmablas_clag2z_64_64_16_4_v2( m, n, SA, ldsa, A, lda ); }
3a187bb372b8a61be78815b1cb7823e637d8cd90.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @precisions mixed zc -> ds */ #include "common_magma.h" __global__ void clag2z_generic(int M, int N, const magmaFloatComplex *SA, int LDSA, magmaDoubleComplex *A, int LDA ) { int ibx = blockIdx.x * 64; int tx = threadIdx.x; int ty = threadIdx.y; int idt = ty * 16 + tx; if( (ibx+idt) >= M ){ SA += (M-1); A += (M-1); } else{ SA += ibx+idt; A += ibx+idt; } const magmaFloatComplex * SAend = SA+LDSA*N; magmaDoubleComplex Ap[1]={ cuComplexFloatToDouble(SA[0]) }; do { SA += LDSA; A[0] = Ap[0]; Ap[0]= cuComplexFloatToDouble(SA[0]); A += LDA; } while (SA < SAend); A[0] = Ap[0]; } __global__ void clag2z_special(int M, int N, const magmaFloatComplex *SA, int LDSA, magmaDoubleComplex *A, int LDA ) { int ibx = blockIdx.x * 64; int tx = threadIdx.x; int ty = threadIdx.y; int idt = ty * 16 + tx; if( (ibx+idt) >= M ){ SA += (M-1); A += (M-1); } else{ SA += ibx+idt; A += ibx+idt; } magmaDoubleComplex Ap[1] = { cuComplexFloatToDouble(SA[0]) }; A[0] = Ap[0]; } extern "C" void magmablas_clag2z_64_64_16_4_v2( magma_int_t M, magma_int_t N, const magmaFloatComplex *SA, magma_int_t LDSA, magmaDoubleComplex *A, magma_int_t LDA ) { if( M == 0 || N==0 ) { printf("One of the dimension is ZERO\n"); exit(-1); } dim3 threads( 16, 4 ); dim3 grid(M/64+(M%64!=0),1); if( N > 1 ) { clag2z_generic<<< grid, threads, 0, magma_stream >>> ( M, N, SA, LDSA, A, LDA ) ; } else{ clag2z_special<<< grid, threads, 0, magma_stream >>> ( M, N, SA, LDSA, A, LDA ) ; } } extern "C" void magmablas_clag2z( magma_int_t m, magma_int_t n, const magmaFloatComplex *SA, magma_int_t ldsa, magmaDoubleComplex *A, magma_int_t lda, magma_int_t *info) { /* Purpose ======= CLAG2Z converts a SINGLE PRECISION matrix, SA, to a DOUBLE PRECISION matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments ========= M (input) INTEGER The number of lines of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. SA (input) REAL array, dimension (LDSA,N) On entry, the M-by-N coefficient matrix SA. LDSA (input) INTEGER The leading dimension of the array SA. LDSA >= max(1,M). A (output) DOUBLE PRECISION array, dimension (LDA,N) On exit, the M-by-N coefficient matrix A. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== */ *info = 0; if ( m < 0 ) *info = -1; else if ( n < 0 ) *info = -2; else if ( ldsa < max(1,m) ) *info = -4; else if ( lda < max(1,m) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); //return *info; } magmablas_clag2z_64_64_16_4_v2( m, n, SA, ldsa, A, lda ); }
115e94a9cf2276452120e00f70616b489d4eee7e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../common.h" #define N (1<<20) struct innerArray { float x[N]; float y[N]; }; void initialInnerArray(innerArray *ip, int size) { for (int i = 0; i < size; i++) { ip->x[i] = (float)(rand()&0xFF) / 100.0f; ip->y[i] = (float)(rand()&0xFF) / 100.0f; } } __global__ void warmup(innerArray *data, innerArray *result, const int n) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { float tmpX = data->x[i]; float tmpY = data->y[i]; tmpX += 10.f; tmpY += 20.f; result->x[i] = tmpX; result->y[i] = tmpY; } } __global__ void testInnerArray(innerArray *data, innerArray *result, const int n) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { float tmpX = data->x[i]; float tmpY = data->y[i]; tmpX += 10.f; tmpY += 20.f; result->x[i] = tmpX; result->y[i] = tmpY; /*if (i < 10) printf("%d: %f, %f\n", i, data->x[i], data->y[i]);*/ } } void testInnerArrayHost(innerArray *data, innerArray *result, const int n) { for (int i=0; i<n; i++) { float tmpX = data->x[i]; float tmpY = data->y[i]; tmpX += 10.f; tmpY += 20.f; result->x[i] = tmpX; result->y[i] = tmpY; } } bool checkInnerArray(innerArray *A, innerArray *B, const int n) { double delta = 1.0E-6; for (int i=0; i<n; i++) { if ((abs(A->x[i] - B->x[i]) > delta) || (abs(A->y[i] - B->y[i]) > delta)) { printf("%d Not Match -> X %f : %f, Y %f : %f\n", i, A->x[i], B->x[i], A->y[i], B->y[i]); return false; } } return true; } int main(int argc, char **argv) { int dev = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("%s test struct of array at device %d: %s\n", argv[0], dev, deviceProp.name); hipSetDevice(dev); int nElem = N; size_t nBytes = sizeof(innerArray); innerArray *h_A = (innerArray*)malloc(nBytes); innerArray *hostRef = (innerArray*)malloc(nBytes); innerArray *gpuRef = (innerArray*)malloc(nBytes); initialInnerArray(h_A, nElem); testInnerArrayHost(h_A, hostRef, nElem); innerArray *d_A, *d_C; hipMalloc((innerArray**)&d_A, nBytes); hipMalloc((innerArray**)&d_C, nBytes); hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); int blockSize = 128; if (argc > 1) blockSize = atoi(argv[1]); dim3 block(blockSize, 1); dim3 grid((nElem + block.x -1) / block.x); double iStart = cpuSecond(); hipLaunchKernelGGL(( warmup), dim3(grid), dim3(block), 0, 0, d_A, d_C, nElem); hipDeviceSynchronize(); double iElaps = cpuSecond() - iStart; printf("Warmup <<< %d, %d >>> elapsed %f sec\n", grid.x, block.x, iElaps); hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); checkInnerArray(hostRef, gpuRef, nElem); iStart = cpuSecond(); hipLaunchKernelGGL(( testInnerArray), dim3(grid), dim3(block), 0, 0, d_A, d_C, nElem); hipDeviceSynchronize(); iElaps = cpuSecond() - iStart; printf("testInnerArray <<< %d, %d >>> elapsed %f sec\n", grid.x, block.x, iElaps); hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); checkInnerArray(hostRef, gpuRef, nElem); hipFree(d_A); hipFree(d_C); free(h_A); free(hostRef); free(gpuRef); hipDeviceReset(); return EXIT_SUCCESS; }
115e94a9cf2276452120e00f70616b489d4eee7e.cu
#include "../common.h" #define N (1<<20) struct innerArray { float x[N]; float y[N]; }; void initialInnerArray(innerArray *ip, int size) { for (int i = 0; i < size; i++) { ip->x[i] = (float)(rand()&0xFF) / 100.0f; ip->y[i] = (float)(rand()&0xFF) / 100.0f; } } __global__ void warmup(innerArray *data, innerArray *result, const int n) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { float tmpX = data->x[i]; float tmpY = data->y[i]; tmpX += 10.f; tmpY += 20.f; result->x[i] = tmpX; result->y[i] = tmpY; } } __global__ void testInnerArray(innerArray *data, innerArray *result, const int n) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { float tmpX = data->x[i]; float tmpY = data->y[i]; tmpX += 10.f; tmpY += 20.f; result->x[i] = tmpX; result->y[i] = tmpY; /*if (i < 10) printf("%d: %f, %f\n", i, data->x[i], data->y[i]);*/ } } void testInnerArrayHost(innerArray *data, innerArray *result, const int n) { for (int i=0; i<n; i++) { float tmpX = data->x[i]; float tmpY = data->y[i]; tmpX += 10.f; tmpY += 20.f; result->x[i] = tmpX; result->y[i] = tmpY; } } bool checkInnerArray(innerArray *A, innerArray *B, const int n) { double delta = 1.0E-6; for (int i=0; i<n; i++) { if ((abs(A->x[i] - B->x[i]) > delta) || (abs(A->y[i] - B->y[i]) > delta)) { printf("%d Not Match -> X %f : %f, Y %f : %f\n", i, A->x[i], B->x[i], A->y[i], B->y[i]); return false; } } return true; } int main(int argc, char **argv) { int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("%s test struct of array at device %d: %s\n", argv[0], dev, deviceProp.name); cudaSetDevice(dev); int nElem = N; size_t nBytes = sizeof(innerArray); innerArray *h_A = (innerArray*)malloc(nBytes); innerArray *hostRef = (innerArray*)malloc(nBytes); innerArray *gpuRef = (innerArray*)malloc(nBytes); initialInnerArray(h_A, nElem); testInnerArrayHost(h_A, hostRef, nElem); innerArray *d_A, *d_C; cudaMalloc((innerArray**)&d_A, nBytes); cudaMalloc((innerArray**)&d_C, nBytes); cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); int blockSize = 128; if (argc > 1) blockSize = atoi(argv[1]); dim3 block(blockSize, 1); dim3 grid((nElem + block.x -1) / block.x); double iStart = cpuSecond(); warmup<<<grid, block>>>(d_A, d_C, nElem); cudaDeviceSynchronize(); double iElaps = cpuSecond() - iStart; printf("Warmup <<< %d, %d >>> elapsed %f sec\n", grid.x, block.x, iElaps); cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); checkInnerArray(hostRef, gpuRef, nElem); iStart = cpuSecond(); testInnerArray<<<grid, block>>>(d_A, d_C, nElem); cudaDeviceSynchronize(); iElaps = cpuSecond() - iStart; printf("testInnerArray <<< %d, %d >>> elapsed %f sec\n", grid.x, block.x, iElaps); cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); checkInnerArray(hostRef, gpuRef, nElem); cudaFree(d_A); cudaFree(d_C); free(h_A); free(hostRef); free(gpuRef); cudaDeviceReset(); return EXIT_SUCCESS; }
5fa970a4eb47526ae91b93c71e435c85e930bbab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define DEBUG __global__ void convol2D (float *a, float *h, float *c, int a_rows, int a_cols, int h_rows, int h_cols) { //Calculating indices along x and y directions int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; //Calculating Grid width in terms of number of threads int grid_width = gridDim.x * blockDim.x; int sum = 0; for(int i = 0; i<h_rows; i++) { for(int j = 0; j<h_cols;j++) { if((index_y-i)>=0 & (index_y-i)<=(a_rows-1) & (index_x-j)>=0 & (index_x-j)<=(a_cols-1)) sum = sum + (a[(index_y-i)*a_cols+(index_x-j)] * h[i*h_cols + j]); } } c[index_y*grid_width + index_x] = sum; } int main (int argc, char *argv[]) { if(argc != 2) { fprintf(stderr,"Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n"); exit(1); } FILE *f; int a_cols = 1; int h_cols = 1; int a_rows = 0; int h_rows = 0; int c_cols = 0; int c_rows = 0; float *a_h = 0; float *a_d = 0; float *hinv_h = 0; float *hinv_d = 0; float *c_h = 0; float *c_d = 0; size_t a_size = 0; size_t h_size = 0; size_t c_size = 0; dim3 block_size; dim3 grid_size; int i=0,j=0; char junk,junk_old; //Opening File f = fopen(argv[1],"r"); //First pass to find out size of the matrices junk = fgetc(f); while (junk != EOF) { if(junk == '\n') { a_rows++; } else if(junk == 0x20 & a_rows == 0) { a_cols++; } junk_old = junk; junk = fgetc(f); if(junk == '\n' & junk == junk_old) { break; } } junk = fgetc(f); while (junk != EOF) { if(junk == '\n') { h_rows++; } else if(junk == 0x20 & h_rows == 0) { h_cols++; } junk = fgetc(f); } //Calculating op dimensions c_rows = a_rows + h_rows - 1; block_size.y = c_rows > 32 ? 32 : c_rows; c_cols = a_cols + h_cols - 1; block_size.x = c_cols > 16 ? 16 : c_cols; grid_size.y = (c_rows/32)+1; grid_size.x = (c_cols/16)+1; #ifdef DEBUG printf("Size of A: %dx%d\n",a_rows,a_cols); printf("Size of H: %dx%d\n",h_rows,h_cols); printf("Size of C: %dx%d\n",c_rows,c_cols); printf("Size of grid: %dx%d\n",grid_size.y,grid_size.x); printf("Size of block: %dx%d\n",block_size.y,block_size.x); #endif //Calculating the sizes of all the involved matrices a_size = a_rows * a_cols *sizeof(float); h_size = h_rows * h_cols *sizeof(float); c_size = c_rows * c_cols *sizeof(float); //Allocating memory on host a_h = (float *) malloc(a_size); hinv_h = (float *) malloc(h_size); c_h = (float *) malloc(c_size); //Rewinding file to read the actual data rewind(f); //Reading all the data matrices for(i = 0;i<a_rows;i++) { for (j = 0; j<a_cols;j++) fscanf(f,"%f",&a_h[i*a_cols + j]); } for(i = 0 ; i<h_rows;i++) { for (j = 0; j<h_cols ;j++) { fscanf(f,"%f",&hinv_h[i*h_cols + j]); } } #ifdef DEBUG for(i = 0;i<a_rows;i++) { for (j = 0; j<a_cols;j++) printf("%f ",a_h[i*a_cols + j]); printf("\n"); } for(i = 0;i<h_rows;i++) { for (j = 0; j<h_cols;j++) { printf("%f ",hinv_h[i*h_cols + j]); } printf("\n"); } printf("Completed Loading Matrices...\n"); #endif //hipMalloc to allocate required matrices on the device hipMalloc((void **)&a_d,a_size); hipMalloc((void **)&hinv_d,h_size); hipMalloc((void **)&c_d,c_size); //Copying input data from the Host to Device hipMemcpy(a_d,a_h,a_size,hipMemcpyHostToDevice); hipMemcpy(hinv_d,hinv_h,h_size,hipMemcpyHostToDevice); //Setting Op matrix to all zeros hipMemset(c_d,0,c_size); //Convolution function hipLaunchKernelGGL(( convol2D), dim3(grid_size),dim3(block_size), 0, 0, a_d,hinv_d,c_d,a_rows,a_cols,h_rows,h_cols); //Synchronize to wait for the kernel to complete exectution hipDeviceSynchronize(); //Copy the output matrix from the Device to host hipMemcpy(c_h,c_d,c_size,hipMemcpyDeviceToHost); //Print Output for(i=0;i<c_rows;i++) { for(j=0;j<c_cols;j++) { printf("%f ",c_h[i*c_cols + j]); } printf("\n"); } //Freeing all the allocated memory from the device hipFree(a_d); hipFree(hinv_d); hipFree(c_d); //Freeing all the allocated memory from the host free(a_h); free(hinv_h); free(c_h); fclose(f); return 0; }
5fa970a4eb47526ae91b93c71e435c85e930bbab.cu
#include <stdio.h> #include <stdlib.h> #define DEBUG __global__ void convol2D (float *a, float *h, float *c, int a_rows, int a_cols, int h_rows, int h_cols) { //Calculating indices along x and y directions int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; //Calculating Grid width in terms of number of threads int grid_width = gridDim.x * blockDim.x; int sum = 0; for(int i = 0; i<h_rows; i++) { for(int j = 0; j<h_cols;j++) { if((index_y-i)>=0 & (index_y-i)<=(a_rows-1) & (index_x-j)>=0 & (index_x-j)<=(a_cols-1)) sum = sum + (a[(index_y-i)*a_cols+(index_x-j)] * h[i*h_cols + j]); } } c[index_y*grid_width + index_x] = sum; } int main (int argc, char *argv[]) { if(argc != 2) { fprintf(stderr,"Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n"); exit(1); } FILE *f; int a_cols = 1; int h_cols = 1; int a_rows = 0; int h_rows = 0; int c_cols = 0; int c_rows = 0; float *a_h = 0; float *a_d = 0; float *hinv_h = 0; float *hinv_d = 0; float *c_h = 0; float *c_d = 0; size_t a_size = 0; size_t h_size = 0; size_t c_size = 0; dim3 block_size; dim3 grid_size; int i=0,j=0; char junk,junk_old; //Opening File f = fopen(argv[1],"r"); //First pass to find out size of the matrices junk = fgetc(f); while (junk != EOF) { if(junk == '\n') { a_rows++; } else if(junk == 0x20 & a_rows == 0) { a_cols++; } junk_old = junk; junk = fgetc(f); if(junk == '\n' & junk == junk_old) { break; } } junk = fgetc(f); while (junk != EOF) { if(junk == '\n') { h_rows++; } else if(junk == 0x20 & h_rows == 0) { h_cols++; } junk = fgetc(f); } //Calculating op dimensions c_rows = a_rows + h_rows - 1; block_size.y = c_rows > 32 ? 32 : c_rows; c_cols = a_cols + h_cols - 1; block_size.x = c_cols > 16 ? 16 : c_cols; grid_size.y = (c_rows/32)+1; grid_size.x = (c_cols/16)+1; #ifdef DEBUG printf("Size of A: %dx%d\n",a_rows,a_cols); printf("Size of H: %dx%d\n",h_rows,h_cols); printf("Size of C: %dx%d\n",c_rows,c_cols); printf("Size of grid: %dx%d\n",grid_size.y,grid_size.x); printf("Size of block: %dx%d\n",block_size.y,block_size.x); #endif //Calculating the sizes of all the involved matrices a_size = a_rows * a_cols *sizeof(float); h_size = h_rows * h_cols *sizeof(float); c_size = c_rows * c_cols *sizeof(float); //Allocating memory on host a_h = (float *) malloc(a_size); hinv_h = (float *) malloc(h_size); c_h = (float *) malloc(c_size); //Rewinding file to read the actual data rewind(f); //Reading all the data matrices for(i = 0;i<a_rows;i++) { for (j = 0; j<a_cols;j++) fscanf(f,"%f",&a_h[i*a_cols + j]); } for(i = 0 ; i<h_rows;i++) { for (j = 0; j<h_cols ;j++) { fscanf(f,"%f",&hinv_h[i*h_cols + j]); } } #ifdef DEBUG for(i = 0;i<a_rows;i++) { for (j = 0; j<a_cols;j++) printf("%f ",a_h[i*a_cols + j]); printf("\n"); } for(i = 0;i<h_rows;i++) { for (j = 0; j<h_cols;j++) { printf("%f ",hinv_h[i*h_cols + j]); } printf("\n"); } printf("Completed Loading Matrices...\n"); #endif //cudaMalloc to allocate required matrices on the device cudaMalloc((void **)&a_d,a_size); cudaMalloc((void **)&hinv_d,h_size); cudaMalloc((void **)&c_d,c_size); //Copying input data from the Host to Device cudaMemcpy(a_d,a_h,a_size,cudaMemcpyHostToDevice); cudaMemcpy(hinv_d,hinv_h,h_size,cudaMemcpyHostToDevice); //Setting Op matrix to all zeros cudaMemset(c_d,0,c_size); //Convolution function convol2D<<<grid_size,block_size>>>(a_d,hinv_d,c_d,a_rows,a_cols,h_rows,h_cols); //Synchronize to wait for the kernel to complete exectution cudaThreadSynchronize(); //Copy the output matrix from the Device to host cudaMemcpy(c_h,c_d,c_size,cudaMemcpyDeviceToHost); //Print Output for(i=0;i<c_rows;i++) { for(j=0;j<c_cols;j++) { printf("%f ",c_h[i*c_cols + j]); } printf("\n"); } //Freeing all the allocated memory from the device cudaFree(a_d); cudaFree(hinv_d); cudaFree(c_d); //Freeing all the allocated memory from the host free(a_h); free(hinv_h); free(c_h); fclose(f); return 0; }
572f6eca31219852d312e1b798c2f4463d1082f0.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstring> #include <cmath> #include <hip/hip_runtime.h> #include <fstream> #include <iostream> #include "jpeg/cuda/ohmura/gpu_jpeg.cuh" #include "utils/util_cv.h" #include "utils/timer.h" #include "utils/cuda/cuda_timer.h" #include "utils/cuda/cuda_memory.hpp" void gpu_main(const std::string &file_name, const std::string &out_file_name) { using namespace std; using namespace util; using namespace util::cuda; using namespace jpeg; using namespace jpeg::ohmura; CudaStopWatch watch; //---------------------------------------------------------------------------- // //============================================================================ BitmapCVUtil source(file_name, BitmapCVUtil::RGB_COLOR); const int width = source.getWidth(); const int height = source.getHeight(); std::cout << "Encode" << std::endl; int result_size; watch.start(); cuda_memory<byte> encode_result(sizeof(byte) * (width * height * 3)); encode_result.fill_zero(); watch.stop(); cout << "Preprocess, " << watch.getLastElapsedTime() << endl; { watch.start(); jpeg::ohmura::JpegEncoder encoder(width, height); watch.stop(); cout << "Preprocess, " << watch.getLastElapsedTime() << endl; { result_size = encoder.encode((byte*) source.getRawData(), encode_result); watch.start(); encode_result.sync_to_host(); watch.stop(); cout << "Memory Transfer, " << watch.getLastElapsedTime() << endl; } } watch.clear(); std::cout << "\nDecode" << std::endl; watch.start(); BitmapCVUtil result(width, height, 8, source.getBytePerPixel()); watch.stop(); cout << "Preprocess, " << watch.getLastElapsedTime() << endl; { watch.start(); device_memory<byte> decode_result(width * height * 3); jpeg::ohmura::JpegDecoder decoder(width, height); watch.stop(); cout << "Preprocess, " << watch.getLastElapsedTime() << endl; { decoder.decode(encode_result.host_data(), result_size, decode_result); watch.start(); decode_result.copy_to_host((byte*) result.getRawData(), decode_result.size()); watch.stop(); cout << "Memory Transfer, " << watch.getLastElapsedTime() << "\n" << endl; } } result.saveToFile("gpu_" + out_file_name); }
572f6eca31219852d312e1b798c2f4463d1082f0.cu
#include <cstdlib> #include <cstring> #include <cmath> #include <cuda_runtime.h> #include <fstream> #include <iostream> #include "jpeg/cuda/ohmura/gpu_jpeg.cuh" #include "utils/util_cv.h" #include "utils/timer.h" #include "utils/cuda/cuda_timer.h" #include "utils/cuda/cuda_memory.hpp" void gpu_main(const std::string &file_name, const std::string &out_file_name) { using namespace std; using namespace util; using namespace util::cuda; using namespace jpeg; using namespace jpeg::ohmura; CudaStopWatch watch; //---------------------------------------------------------------------------- // 画像読み込み //============================================================================ BitmapCVUtil source(file_name, BitmapCVUtil::RGB_COLOR); const int width = source.getWidth(); const int height = source.getHeight(); std::cout << "Encode" << std::endl; int result_size; watch.start(); cuda_memory<byte> encode_result(sizeof(byte) * (width * height * 3)); encode_result.fill_zero(); watch.stop(); cout << "Preprocess, " << watch.getLastElapsedTime() << endl; { watch.start(); jpeg::ohmura::JpegEncoder encoder(width, height); watch.stop(); cout << "Preprocess, " << watch.getLastElapsedTime() << endl; { result_size = encoder.encode((byte*) source.getRawData(), encode_result); watch.start(); encode_result.sync_to_host(); watch.stop(); cout << "Memory Transfer, " << watch.getLastElapsedTime() << endl; } } watch.clear(); std::cout << "\nDecode" << std::endl; watch.start(); BitmapCVUtil result(width, height, 8, source.getBytePerPixel()); watch.stop(); cout << "Preprocess, " << watch.getLastElapsedTime() << endl; { watch.start(); device_memory<byte> decode_result(width * height * 3); jpeg::ohmura::JpegDecoder decoder(width, height); watch.stop(); cout << "Preprocess, " << watch.getLastElapsedTime() << endl; { decoder.decode(encode_result.host_data(), result_size, decode_result); watch.start(); decode_result.copy_to_host((byte*) result.getRawData(), decode_result.size()); watch.stop(); cout << "Memory Transfer, " << watch.getLastElapsedTime() << "\n" << endl; } } result.saveToFile("gpu_" + out_file_name); }
2b3608efd3af8b03d5848d39491e6d7d7753c417.hip
// !!! This is a file automatically generated by hipify!!! /****************************/ /* THIS IS OPEN SOURCE CODE */ /****************************/ /** * @file HelloWorld.c * CVS: $Id$ * @author Heike Jagode * jagode@eecs.utk.edu * Mods: <your name here> * <your email address> * test case for Example component * * * @brief * This file is a very simple HelloWorld C example which serves (together * with its Makefile) as a guideline on how to add tests to components. * The papi configure and papi Makefile will take care of the compilation * of the component tests (if all tests are added to a directory named * 'tests' in the specific component dir). * See components/README for more details. * * The string "Hello World!" is mangled and then restored. */ #include <hip/hip_runtime.h> #include <stdio.h> #include "papi_test.h" #define NUM_EVENTS 1 #define PAPI // Prototypes __global__ void helloWorld(char*); // Host function int main(int argc, char** argv) { #ifdef PAPI int retval, i; int EventSet = PAPI_NULL; long long values[NUM_EVENTS]; /* REPLACE THE EVENT NAME 'PAPI_FP_OPS' WITH A CUDA EVENT FOR THE CUDA DEVICE YOU ARE RUNNING ON. RUN papi_native_avail to get a list of CUDA events that are supported on your machine */ char *EventName[] = { "PAPI_FP_OPS" }; int events[NUM_EVENTS]; int eventCount = 0; /* PAPI Initialization */ retval = PAPI_library_init( PAPI_VER_CURRENT ); if( retval != PAPI_VER_CURRENT ) fprintf( stderr, "PAPI_library_init failed\n" ); printf( "PAPI_VERSION : %4d %6d %7d\n", PAPI_VERSION_MAJOR( PAPI_VERSION ), PAPI_VERSION_MINOR( PAPI_VERSION ), PAPI_VERSION_REVISION( PAPI_VERSION ) ); /* convert PAPI native events to PAPI code */ for( i = 0; i < NUM_EVENTS; i++ ){ retval = PAPI_event_name_to_code( EventName[i], &events[i] ); if( retval != PAPI_OK ) { fprintf( stderr, "PAPI_event_name_to_code failed\n" ); continue; } eventCount++; printf( "Name %s --- Code: %#x\n", EventName[i], events[i] ); } /* if we did not find any valid events, just report test failed. */ if (eventCount == 0) { printf( "Test FAILED: no valid events found.\n"); return 1; } retval = PAPI_create_eventset( &EventSet ); if( retval != PAPI_OK ) fprintf( stderr, "PAPI_create_eventset failed\n" ); retval = PAPI_add_events( EventSet, events, eventCount ); if( retval != PAPI_OK ) fprintf( stderr, "PAPI_add_events failed\n" ); retval = PAPI_start( EventSet ); if( retval != PAPI_OK ) fprintf( stderr, "PAPI_start failed\n" ); #endif int j; // desired output char str[] = "Hello World!"; // mangle contents of output // the null character is left intact for simplicity for(j = 0; j < 12; j++) { str[j] -= j; //printf("str=%s\n", str); } // allocate memory on the device char *d_str; size_t size = sizeof(str); hipMalloc((void**)&d_str, size); // copy the string to the device hipMemcpy(d_str, str, size, hipMemcpyHostToDevice); // set the grid and block sizes dim3 dimGrid(2); // one block per word dim3 dimBlock(6); // one thread per character // invoke the kernel hipLaunchKernelGGL(( helloWorld), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_str); // retrieve the results from the device hipMemcpy(str, d_str, size, hipMemcpyDeviceToHost); // free up the allocated memory on the device hipFree(d_str); printf("END: %s\n", str); #ifdef PAPI retval = PAPI_stop( EventSet, values ); if( retval != PAPI_OK ) fprintf( stderr, "PAPI_stop failed\n" ); retval = PAPI_cleanup_eventset(EventSet); if( retval != PAPI_OK ) fprintf(stderr, "PAPI_cleanup_eventset failed\n"); retval = PAPI_destroy_eventset(&EventSet); if (retval != PAPI_OK) fprintf(stderr, "PAPI_destroy_eventset failed\n"); PAPI_shutdown(); for( i = 0; i < eventCount; i++ ) printf( "%12lld \t\t --> %s \n", values[i], EventName[i] ); #endif return 0; } // Device kernel __global__ void helloWorld(char* str) { // determine where in the thread grid we are int idx = blockIdx.x * blockDim.x + threadIdx.x; // unmangle output str[idx] += idx; }
2b3608efd3af8b03d5848d39491e6d7d7753c417.cu
/****************************/ /* THIS IS OPEN SOURCE CODE */ /****************************/ /** * @file HelloWorld.c * CVS: $Id$ * @author Heike Jagode * jagode@eecs.utk.edu * Mods: <your name here> * <your email address> * test case for Example component * * * @brief * This file is a very simple HelloWorld C example which serves (together * with its Makefile) as a guideline on how to add tests to components. * The papi configure and papi Makefile will take care of the compilation * of the component tests (if all tests are added to a directory named * 'tests' in the specific component dir). * See components/README for more details. * * The string "Hello World!" is mangled and then restored. */ #include <cuda.h> #include <stdio.h> #include "papi_test.h" #define NUM_EVENTS 1 #define PAPI // Prototypes __global__ void helloWorld(char*); // Host function int main(int argc, char** argv) { #ifdef PAPI int retval, i; int EventSet = PAPI_NULL; long long values[NUM_EVENTS]; /* REPLACE THE EVENT NAME 'PAPI_FP_OPS' WITH A CUDA EVENT FOR THE CUDA DEVICE YOU ARE RUNNING ON. RUN papi_native_avail to get a list of CUDA events that are supported on your machine */ char *EventName[] = { "PAPI_FP_OPS" }; int events[NUM_EVENTS]; int eventCount = 0; /* PAPI Initialization */ retval = PAPI_library_init( PAPI_VER_CURRENT ); if( retval != PAPI_VER_CURRENT ) fprintf( stderr, "PAPI_library_init failed\n" ); printf( "PAPI_VERSION : %4d %6d %7d\n", PAPI_VERSION_MAJOR( PAPI_VERSION ), PAPI_VERSION_MINOR( PAPI_VERSION ), PAPI_VERSION_REVISION( PAPI_VERSION ) ); /* convert PAPI native events to PAPI code */ for( i = 0; i < NUM_EVENTS; i++ ){ retval = PAPI_event_name_to_code( EventName[i], &events[i] ); if( retval != PAPI_OK ) { fprintf( stderr, "PAPI_event_name_to_code failed\n" ); continue; } eventCount++; printf( "Name %s --- Code: %#x\n", EventName[i], events[i] ); } /* if we did not find any valid events, just report test failed. */ if (eventCount == 0) { printf( "Test FAILED: no valid events found.\n"); return 1; } retval = PAPI_create_eventset( &EventSet ); if( retval != PAPI_OK ) fprintf( stderr, "PAPI_create_eventset failed\n" ); retval = PAPI_add_events( EventSet, events, eventCount ); if( retval != PAPI_OK ) fprintf( stderr, "PAPI_add_events failed\n" ); retval = PAPI_start( EventSet ); if( retval != PAPI_OK ) fprintf( stderr, "PAPI_start failed\n" ); #endif int j; // desired output char str[] = "Hello World!"; // mangle contents of output // the null character is left intact for simplicity for(j = 0; j < 12; j++) { str[j] -= j; //printf("str=%s\n", str); } // allocate memory on the device char *d_str; size_t size = sizeof(str); cudaMalloc((void**)&d_str, size); // copy the string to the device cudaMemcpy(d_str, str, size, cudaMemcpyHostToDevice); // set the grid and block sizes dim3 dimGrid(2); // one block per word dim3 dimBlock(6); // one thread per character // invoke the kernel helloWorld<<< dimGrid, dimBlock >>>(d_str); // retrieve the results from the device cudaMemcpy(str, d_str, size, cudaMemcpyDeviceToHost); // free up the allocated memory on the device cudaFree(d_str); printf("END: %s\n", str); #ifdef PAPI retval = PAPI_stop( EventSet, values ); if( retval != PAPI_OK ) fprintf( stderr, "PAPI_stop failed\n" ); retval = PAPI_cleanup_eventset(EventSet); if( retval != PAPI_OK ) fprintf(stderr, "PAPI_cleanup_eventset failed\n"); retval = PAPI_destroy_eventset(&EventSet); if (retval != PAPI_OK) fprintf(stderr, "PAPI_destroy_eventset failed\n"); PAPI_shutdown(); for( i = 0; i < eventCount; i++ ) printf( "%12lld \t\t --> %s \n", values[i], EventName[i] ); #endif return 0; } // Device kernel __global__ void helloWorld(char* str) { // determine where in the thread grid we are int idx = blockIdx.x * blockDim.x + threadIdx.x; // unmangle output str[idx] += idx; }
bd92a023231df88ca835734c07fb44355af9f3b0.hip
// !!! This is a file automatically generated by hipify!!! // example1.cpp : Defines the entry point for the console application. // compile with: nvcc -o helloworld helloworld.cu -lcudart // #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> __device__ int get_global_id() { int index; index = blockIdx.x * blockDim.x + threadIdx.x; return index; } // Kernel that executes on the CUDA device __global__ void __global__ void square_array_gpu(float *a, int N) { int idx = get_global_id(); int i; int count = 0; int factors[5]; int remain; for(i = 0; i*i <= a[idx]; i += 2){ if(remain = fmod(a[idx], i+1) == 0 ){ //count++; factors[i] = i; } } for(i = 0; i < sizeof(factors); i++){ if(factors[i] > 5){ count++; } } a[idx] = count; //a[idx] = a[idx] * a[idx]; } // main routine that executes on the host int main(void) { int main(int argc, char **argv) { float *a_h, *a_d; // Pointer to host & device arrays const int N = atoi(argv[1]); // Number of elements in arrays size_t size = N * sizeof(float); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); a_h = (float *)malloc(size); // Allocate array on host hipMalloc((void **) &a_d, size); // Allocate array on device // Initialize host array and copy it to CUDA device for (int i=0; i < N; i++) { a_h[i] = (float)i; } hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); // Do calculation on device: //int block_size = 4; //int n_blocks = N/block_size + (N % block_size == 0 ? 0:1); hipLaunchKernelGGL(( square_array_gpu) , dim3(1), dim3(1000), 0, 0, a_d, sizeof(a_d)); // Retrieve result from device and store it in host array hipMemcpy(a_h, a_d, sizeof(float)*N, hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("Time to generate: %3.3f ms\n", elapsedTime); // Print results int count = 0; for (int i=0; i < N; i++) { if(a_h[i] == 0){ count++; } printf("%d %.0f\n", i, a_h[i]); } printf("count = %d\n", count); // Cleanup free(a_h); hipFree(a_d); return 0; }
bd92a023231df88ca835734c07fb44355af9f3b0.cu
// example1.cpp : Defines the entry point for the console application. // compile with: nvcc -o helloworld helloworld.cu -lcudart // #include <stdio.h> #include <stdlib.h> #include <cuda.h> __device__ int get_global_id() { int index; index = blockIdx.x * blockDim.x + threadIdx.x; return index; } // Kernel that executes on the CUDA device __global__ void __global__ void square_array_gpu(float *a, int N) { int idx = get_global_id(); int i; int count = 0; int factors[5]; int remain; for(i = 0; i*i <= a[idx]; i += 2){ if(remain = fmod(a[idx], i+1) == 0 ){ //count++; factors[i] = i; } } for(i = 0; i < sizeof(factors); i++){ if(factors[i] > 5){ count++; } } a[idx] = count; //a[idx] = a[idx] * a[idx]; } // main routine that executes on the host int main(void) { int main(int argc, char **argv) { float *a_h, *a_d; // Pointer to host & device arrays const int N = atoi(argv[1]); // Number of elements in arrays size_t size = N * sizeof(float); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); a_h = (float *)malloc(size); // Allocate array on host cudaMalloc((void **) &a_d, size); // Allocate array on device // Initialize host array and copy it to CUDA device for (int i=0; i < N; i++) { a_h[i] = (float)i; } cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); // Do calculation on device: //int block_size = 4; //int n_blocks = N/block_size + (N % block_size == 0 ? 0:1); square_array_gpu <<< 1, 1000>>> (a_d, sizeof(a_d)); // Retrieve result from device and store it in host array cudaMemcpy(a_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time to generate: %3.3f ms\n", elapsedTime); // Print results int count = 0; for (int i=0; i < N; i++) { if(a_h[i] == 0){ count++; } printf("%d %.0f\n", i, a_h[i]); } printf("count = %d\n", count); // Cleanup free(a_h); cudaFree(a_d); return 0; }
2f3ebd9dc60a3fb2d2c373173debf39f3730e8f3.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2017 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> #include <hip/hip_runtime.h> #include <hip/hip_cooperative_groups.h> #include <cooperative_groups/reduce.h> namespace cg = cooperative_groups; #define NUM_ELEMS 10000000 #define NUM_THREADS_PER_BLOCK 512 // warp-aggregated atomic increment __device__ int atomicAggInc(int *counter) { cg::coalesced_group active = cg::coalesced_threads(); // leader does the update int res = 0; if (active.thread_rank() == 0) { res = atomicAdd(counter, active.size()); } // broadcast result res = active.shfl(res, 0); // each thread computes its own value return res + active.thread_rank(); } __global__ void filter_arr(int *dst, int *nres, const int *src, int n) { int id = threadIdx.x + blockIdx.x * blockDim.x; for (int i = id; i < n; i += gridDim.x * blockDim.x) { if (src[i] > 0) dst[atomicAggInc(nres)] = src[i]; } } // warp-aggregated atomic multi bucket increment #if __CUDA_ARCH__ >= 700 __device__ int atomicAggIncMulti(const int bucket, int *counter) { cg::coalesced_group active = cg::coalesced_threads(); // group all threads with same bucket value. auto labeledGroup = cg::labeled_partition(active, bucket); int res = 0; if (labeledGroup.thread_rank() == 0) { res = atomicAdd(&counter[bucket], labeledGroup.size()); } // broadcast result res = labeledGroup.shfl(res, 0); // each thread computes its own value return res + labeledGroup.thread_rank(); } #endif // Places individual value indices into its corresponding buckets. __global__ void mapToBuckets(const int *srcArr, int *indicesBuckets, int *bucketCounters, const int srcSize, const int numOfBuckets) { #if __CUDA_ARCH__ >= 700 cg::grid_group grid = cg::this_grid(); for (int i=grid.thread_rank(); i < srcSize; i += grid.size()) { const int bucket = srcArr[i]; if (bucket < numOfBuckets) { indicesBuckets[atomicAggIncMulti(bucket, bucketCounters)] = i; } } #endif } int mapIndicesToBuckets(int *h_srcArr, int *d_srcArr, int numOfBuckets) { int *d_indicesBuckets, *d_bucketCounters; int *cpuBucketCounters = new int[numOfBuckets]; int *h_bucketCounters = new int[numOfBuckets]; memset(cpuBucketCounters, 0, sizeof(int)*numOfBuckets); // Initialize each bucket counters. for (int i = 0; i < numOfBuckets; i++) { h_bucketCounters[i] = i*NUM_ELEMS; } checkCudaErrors(hipMalloc(&d_indicesBuckets, sizeof(int) * NUM_ELEMS * numOfBuckets)); checkCudaErrors(hipMalloc(&d_bucketCounters, sizeof(int) * numOfBuckets)); checkCudaErrors(hipMemcpy(d_bucketCounters, h_bucketCounters, sizeof(int)*numOfBuckets, hipMemcpyHostToDevice)); dim3 dimBlock(NUM_THREADS_PER_BLOCK, 1, 1); dim3 dimGrid((NUM_ELEMS / NUM_THREADS_PER_BLOCK), 1, 1); hipLaunchKernelGGL(( mapToBuckets), dim3(dimGrid), dim3(dimBlock), 0, 0, d_srcArr, d_indicesBuckets, d_bucketCounters, NUM_ELEMS, numOfBuckets); checkCudaErrors(hipMemcpy(h_bucketCounters, d_bucketCounters, sizeof(int)*numOfBuckets, hipMemcpyDeviceToHost)); for (int i=0; i < NUM_ELEMS; i++) { cpuBucketCounters[h_srcArr[i]]++; } bool allMatch = true; int finalElems = 0; for (int i=0; i < numOfBuckets; i++) { finalElems += (h_bucketCounters[i] - i*NUM_ELEMS); if (cpuBucketCounters[i] != (h_bucketCounters[i] - i*NUM_ELEMS)) { allMatch = false; break; } } if (!allMatch && finalElems != NUM_ELEMS) { return EXIT_FAILURE; } return EXIT_SUCCESS; } // Warp-aggregated atomic Max in multi bucket #if __CUDA_ARCH__ >= 700 __device__ void atomicAggMaxMulti(const int bucket, int *counter, const int valueForMax) { cg::coalesced_group active = cg::coalesced_threads(); // group all threads with same bucket value. auto labeledGroup = cg::labeled_partition(active, bucket); const int maxValueInGroup = cg::reduce(labeledGroup, valueForMax, cg::greater<int>()); if (labeledGroup.thread_rank() == 0) { atomicMax(&counter[bucket], maxValueInGroup); } } #endif // Performs max calculation in each buckets. __global__ void calculateMaxInEachBuckets(const int *srcArr, const int *valueInBuckets, int *bucketsMax, const int srcSize, const int numOfBuckets) { #if __CUDA_ARCH__ >= 700 cg::grid_group grid = cg::this_grid(); for (int i=grid.thread_rank(); i < srcSize; i += grid.size()) { const int bucket = srcArr[i]; if (bucket < numOfBuckets) { atomicAggMaxMulti(bucket, bucketsMax, valueInBuckets[i]); } } #endif } int calculateMaxInBuckets(int *h_srcArr, int *d_srcArr, int numOfBuckets) { int *d_valueInBuckets, *d_bucketsMax; int *h_valueInBuckets = new int[NUM_ELEMS]; int *cpuBucketsMax = new int[numOfBuckets]; int *h_bucketsMax = new int[numOfBuckets]; memset(cpuBucketsMax, 0, sizeof(int) * numOfBuckets); // Here we create values which is assumed to correspond to each // buckets of srcArr at same array index. for (int i=0; i < NUM_ELEMS; i++) { h_valueInBuckets[i] = rand(); } checkCudaErrors(hipMalloc(&d_valueInBuckets, sizeof(int) * NUM_ELEMS)); checkCudaErrors(hipMalloc(&d_bucketsMax, sizeof(int) * numOfBuckets)); checkCudaErrors(hipMemset(d_bucketsMax, 0, sizeof(int) * numOfBuckets)); checkCudaErrors(hipMemcpy(d_valueInBuckets, h_valueInBuckets, sizeof(int) * NUM_ELEMS, hipMemcpyHostToDevice)); dim3 dimBlock(NUM_THREADS_PER_BLOCK, 1, 1); dim3 dimGrid((NUM_ELEMS / NUM_THREADS_PER_BLOCK), 1, 1); hipLaunchKernelGGL(( calculateMaxInEachBuckets), dim3(dimGrid), dim3(dimBlock), 0, 0, d_srcArr, d_valueInBuckets, d_bucketsMax, NUM_ELEMS, numOfBuckets); checkCudaErrors(hipMemcpy(h_bucketsMax, d_bucketsMax, sizeof(int) * numOfBuckets, hipMemcpyDeviceToHost)); for (int i = 0; i < NUM_ELEMS; i++) { if (cpuBucketsMax[h_srcArr[i]] < h_valueInBuckets[i]) { cpuBucketsMax[h_srcArr[i]] = h_valueInBuckets[i]; } } bool allMatch = true; int finalElems = 0; for (int i=0; i < numOfBuckets; i++) { if (cpuBucketsMax[i] != h_bucketsMax[i]) { allMatch = false; printf("CPU i=%d max = %d mismatches GPU max = %d\n", i, cpuBucketsMax[i], h_bucketsMax[i]); break; } } if (allMatch) { printf("CPU max matches GPU max\n"); } delete[] h_valueInBuckets; delete[] cpuBucketsMax; delete[] h_bucketsMax; checkCudaErrors(hipFree(d_valueInBuckets)); checkCudaErrors(hipFree(d_bucketsMax)); if (!allMatch && finalElems != NUM_ELEMS) { return EXIT_FAILURE; } return EXIT_SUCCESS; } int main(int argc, char **argv) { int *data_to_filter, *filtered_data, nres = 0; int *d_data_to_filter, *d_filtered_data, *d_nres; int numOfBuckets = 5; data_to_filter = reinterpret_cast<int *>(malloc(sizeof(int) * NUM_ELEMS)); // Generate input data. for (int i = 0; i < NUM_ELEMS; i++) { data_to_filter[i] = rand() % numOfBuckets; } int devId = findCudaDevice(argc, (const char **)argv); checkCudaErrors(hipMalloc(&d_data_to_filter, sizeof(int) * NUM_ELEMS)); checkCudaErrors(hipMalloc(&d_filtered_data, sizeof(int) * NUM_ELEMS)); checkCudaErrors(hipMalloc(&d_nres, sizeof(int))); checkCudaErrors(hipMemcpy(d_data_to_filter, data_to_filter, sizeof(int) * NUM_ELEMS, hipMemcpyHostToDevice)); checkCudaErrors(hipMemset(d_nres, 0, sizeof(int))); dim3 dimBlock(NUM_THREADS_PER_BLOCK, 1, 1); dim3 dimGrid((NUM_ELEMS / NUM_THREADS_PER_BLOCK) + 1, 1, 1); hipLaunchKernelGGL(( filter_arr), dim3(dimGrid), dim3(dimBlock), 0, 0, d_filtered_data, d_nres, d_data_to_filter, NUM_ELEMS); checkCudaErrors( hipMemcpy(&nres, d_nres, sizeof(int), hipMemcpyDeviceToHost)); filtered_data = reinterpret_cast<int *>(malloc(sizeof(int) * nres)); checkCudaErrors(hipMemcpy(filtered_data, d_filtered_data, sizeof(int) * nres, hipMemcpyDeviceToHost)); int *host_filtered_data = reinterpret_cast<int *>(malloc(sizeof(int) * NUM_ELEMS)); // Generate host output with host filtering code. int host_flt_count = 0; for (int i = 0; i < NUM_ELEMS; i++) { if (data_to_filter[i] > 0) { host_filtered_data[host_flt_count++] = data_to_filter[i]; } } int major = 0; checkCudaErrors(hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, devId)); int mapIndicesToBucketsStatus = EXIT_SUCCESS; int calculateMaxInBucketsStatus = EXIT_SUCCESS; // atomicAggIncMulti & atomicAggMaxMulti require a GPU of Volta (SM7X) architecture or higher, // so that it can take advantage of the new MATCH capability of Volta hardware if (major >= 7) { mapIndicesToBucketsStatus = mapIndicesToBuckets(data_to_filter, d_data_to_filter, numOfBuckets); calculateMaxInBucketsStatus = calculateMaxInBuckets(data_to_filter, d_data_to_filter, numOfBuckets); } printf("\nWarp Aggregated Atomics %s \n", (host_flt_count == nres) && (mapIndicesToBucketsStatus == EXIT_SUCCESS) && (calculateMaxInBucketsStatus == EXIT_SUCCESS) ? "PASSED" : "FAILED"); checkCudaErrors(hipFree(d_data_to_filter)); checkCudaErrors(hipFree(d_filtered_data)); checkCudaErrors(hipFree(d_nres)); free(data_to_filter); free(filtered_data); free(host_filtered_data); }
2f3ebd9dc60a3fb2d2c373173debf39f3730e8f3.cu
/** * Copyright 1993-2017 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> #include <cuda_runtime.h> #include <cooperative_groups.h> #include <cooperative_groups/reduce.h> namespace cg = cooperative_groups; #define NUM_ELEMS 10000000 #define NUM_THREADS_PER_BLOCK 512 // warp-aggregated atomic increment __device__ int atomicAggInc(int *counter) { cg::coalesced_group active = cg::coalesced_threads(); // leader does the update int res = 0; if (active.thread_rank() == 0) { res = atomicAdd(counter, active.size()); } // broadcast result res = active.shfl(res, 0); // each thread computes its own value return res + active.thread_rank(); } __global__ void filter_arr(int *dst, int *nres, const int *src, int n) { int id = threadIdx.x + blockIdx.x * blockDim.x; for (int i = id; i < n; i += gridDim.x * blockDim.x) { if (src[i] > 0) dst[atomicAggInc(nres)] = src[i]; } } // warp-aggregated atomic multi bucket increment #if __CUDA_ARCH__ >= 700 __device__ int atomicAggIncMulti(const int bucket, int *counter) { cg::coalesced_group active = cg::coalesced_threads(); // group all threads with same bucket value. auto labeledGroup = cg::labeled_partition(active, bucket); int res = 0; if (labeledGroup.thread_rank() == 0) { res = atomicAdd(&counter[bucket], labeledGroup.size()); } // broadcast result res = labeledGroup.shfl(res, 0); // each thread computes its own value return res + labeledGroup.thread_rank(); } #endif // Places individual value indices into its corresponding buckets. __global__ void mapToBuckets(const int *srcArr, int *indicesBuckets, int *bucketCounters, const int srcSize, const int numOfBuckets) { #if __CUDA_ARCH__ >= 700 cg::grid_group grid = cg::this_grid(); for (int i=grid.thread_rank(); i < srcSize; i += grid.size()) { const int bucket = srcArr[i]; if (bucket < numOfBuckets) { indicesBuckets[atomicAggIncMulti(bucket, bucketCounters)] = i; } } #endif } int mapIndicesToBuckets(int *h_srcArr, int *d_srcArr, int numOfBuckets) { int *d_indicesBuckets, *d_bucketCounters; int *cpuBucketCounters = new int[numOfBuckets]; int *h_bucketCounters = new int[numOfBuckets]; memset(cpuBucketCounters, 0, sizeof(int)*numOfBuckets); // Initialize each bucket counters. for (int i = 0; i < numOfBuckets; i++) { h_bucketCounters[i] = i*NUM_ELEMS; } checkCudaErrors(cudaMalloc(&d_indicesBuckets, sizeof(int) * NUM_ELEMS * numOfBuckets)); checkCudaErrors(cudaMalloc(&d_bucketCounters, sizeof(int) * numOfBuckets)); checkCudaErrors(cudaMemcpy(d_bucketCounters, h_bucketCounters, sizeof(int)*numOfBuckets, cudaMemcpyHostToDevice)); dim3 dimBlock(NUM_THREADS_PER_BLOCK, 1, 1); dim3 dimGrid((NUM_ELEMS / NUM_THREADS_PER_BLOCK), 1, 1); mapToBuckets<<<dimGrid, dimBlock>>>(d_srcArr, d_indicesBuckets, d_bucketCounters, NUM_ELEMS, numOfBuckets); checkCudaErrors(cudaMemcpy(h_bucketCounters, d_bucketCounters, sizeof(int)*numOfBuckets, cudaMemcpyDeviceToHost)); for (int i=0; i < NUM_ELEMS; i++) { cpuBucketCounters[h_srcArr[i]]++; } bool allMatch = true; int finalElems = 0; for (int i=0; i < numOfBuckets; i++) { finalElems += (h_bucketCounters[i] - i*NUM_ELEMS); if (cpuBucketCounters[i] != (h_bucketCounters[i] - i*NUM_ELEMS)) { allMatch = false; break; } } if (!allMatch && finalElems != NUM_ELEMS) { return EXIT_FAILURE; } return EXIT_SUCCESS; } // Warp-aggregated atomic Max in multi bucket #if __CUDA_ARCH__ >= 700 __device__ void atomicAggMaxMulti(const int bucket, int *counter, const int valueForMax) { cg::coalesced_group active = cg::coalesced_threads(); // group all threads with same bucket value. auto labeledGroup = cg::labeled_partition(active, bucket); const int maxValueInGroup = cg::reduce(labeledGroup, valueForMax, cg::greater<int>()); if (labeledGroup.thread_rank() == 0) { atomicMax(&counter[bucket], maxValueInGroup); } } #endif // Performs max calculation in each buckets. __global__ void calculateMaxInEachBuckets(const int *srcArr, const int *valueInBuckets, int *bucketsMax, const int srcSize, const int numOfBuckets) { #if __CUDA_ARCH__ >= 700 cg::grid_group grid = cg::this_grid(); for (int i=grid.thread_rank(); i < srcSize; i += grid.size()) { const int bucket = srcArr[i]; if (bucket < numOfBuckets) { atomicAggMaxMulti(bucket, bucketsMax, valueInBuckets[i]); } } #endif } int calculateMaxInBuckets(int *h_srcArr, int *d_srcArr, int numOfBuckets) { int *d_valueInBuckets, *d_bucketsMax; int *h_valueInBuckets = new int[NUM_ELEMS]; int *cpuBucketsMax = new int[numOfBuckets]; int *h_bucketsMax = new int[numOfBuckets]; memset(cpuBucketsMax, 0, sizeof(int) * numOfBuckets); // Here we create values which is assumed to correspond to each // buckets of srcArr at same array index. for (int i=0; i < NUM_ELEMS; i++) { h_valueInBuckets[i] = rand(); } checkCudaErrors(cudaMalloc(&d_valueInBuckets, sizeof(int) * NUM_ELEMS)); checkCudaErrors(cudaMalloc(&d_bucketsMax, sizeof(int) * numOfBuckets)); checkCudaErrors(cudaMemset(d_bucketsMax, 0, sizeof(int) * numOfBuckets)); checkCudaErrors(cudaMemcpy(d_valueInBuckets, h_valueInBuckets, sizeof(int) * NUM_ELEMS, cudaMemcpyHostToDevice)); dim3 dimBlock(NUM_THREADS_PER_BLOCK, 1, 1); dim3 dimGrid((NUM_ELEMS / NUM_THREADS_PER_BLOCK), 1, 1); calculateMaxInEachBuckets<<<dimGrid, dimBlock>>>(d_srcArr, d_valueInBuckets, d_bucketsMax, NUM_ELEMS, numOfBuckets); checkCudaErrors(cudaMemcpy(h_bucketsMax, d_bucketsMax, sizeof(int) * numOfBuckets, cudaMemcpyDeviceToHost)); for (int i = 0; i < NUM_ELEMS; i++) { if (cpuBucketsMax[h_srcArr[i]] < h_valueInBuckets[i]) { cpuBucketsMax[h_srcArr[i]] = h_valueInBuckets[i]; } } bool allMatch = true; int finalElems = 0; for (int i=0; i < numOfBuckets; i++) { if (cpuBucketsMax[i] != h_bucketsMax[i]) { allMatch = false; printf("CPU i=%d max = %d mismatches GPU max = %d\n", i, cpuBucketsMax[i], h_bucketsMax[i]); break; } } if (allMatch) { printf("CPU max matches GPU max\n"); } delete[] h_valueInBuckets; delete[] cpuBucketsMax; delete[] h_bucketsMax; checkCudaErrors(cudaFree(d_valueInBuckets)); checkCudaErrors(cudaFree(d_bucketsMax)); if (!allMatch && finalElems != NUM_ELEMS) { return EXIT_FAILURE; } return EXIT_SUCCESS; } int main(int argc, char **argv) { int *data_to_filter, *filtered_data, nres = 0; int *d_data_to_filter, *d_filtered_data, *d_nres; int numOfBuckets = 5; data_to_filter = reinterpret_cast<int *>(malloc(sizeof(int) * NUM_ELEMS)); // Generate input data. for (int i = 0; i < NUM_ELEMS; i++) { data_to_filter[i] = rand() % numOfBuckets; } int devId = findCudaDevice(argc, (const char **)argv); checkCudaErrors(cudaMalloc(&d_data_to_filter, sizeof(int) * NUM_ELEMS)); checkCudaErrors(cudaMalloc(&d_filtered_data, sizeof(int) * NUM_ELEMS)); checkCudaErrors(cudaMalloc(&d_nres, sizeof(int))); checkCudaErrors(cudaMemcpy(d_data_to_filter, data_to_filter, sizeof(int) * NUM_ELEMS, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemset(d_nres, 0, sizeof(int))); dim3 dimBlock(NUM_THREADS_PER_BLOCK, 1, 1); dim3 dimGrid((NUM_ELEMS / NUM_THREADS_PER_BLOCK) + 1, 1, 1); filter_arr<<<dimGrid, dimBlock>>>(d_filtered_data, d_nres, d_data_to_filter, NUM_ELEMS); checkCudaErrors( cudaMemcpy(&nres, d_nres, sizeof(int), cudaMemcpyDeviceToHost)); filtered_data = reinterpret_cast<int *>(malloc(sizeof(int) * nres)); checkCudaErrors(cudaMemcpy(filtered_data, d_filtered_data, sizeof(int) * nres, cudaMemcpyDeviceToHost)); int *host_filtered_data = reinterpret_cast<int *>(malloc(sizeof(int) * NUM_ELEMS)); // Generate host output with host filtering code. int host_flt_count = 0; for (int i = 0; i < NUM_ELEMS; i++) { if (data_to_filter[i] > 0) { host_filtered_data[host_flt_count++] = data_to_filter[i]; } } int major = 0; checkCudaErrors(cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, devId)); int mapIndicesToBucketsStatus = EXIT_SUCCESS; int calculateMaxInBucketsStatus = EXIT_SUCCESS; // atomicAggIncMulti & atomicAggMaxMulti require a GPU of Volta (SM7X) architecture or higher, // so that it can take advantage of the new MATCH capability of Volta hardware if (major >= 7) { mapIndicesToBucketsStatus = mapIndicesToBuckets(data_to_filter, d_data_to_filter, numOfBuckets); calculateMaxInBucketsStatus = calculateMaxInBuckets(data_to_filter, d_data_to_filter, numOfBuckets); } printf("\nWarp Aggregated Atomics %s \n", (host_flt_count == nres) && (mapIndicesToBucketsStatus == EXIT_SUCCESS) && (calculateMaxInBucketsStatus == EXIT_SUCCESS) ? "PASSED" : "FAILED"); checkCudaErrors(cudaFree(d_data_to_filter)); checkCudaErrors(cudaFree(d_filtered_data)); checkCudaErrors(cudaFree(d_nres)); free(data_to_filter); free(filtered_data); free(host_filtered_data); }
4a2792789c35b520133566bb0d416222b5320ce2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #define N 100 #define ITERS 5 __global__ void stencil(float* a, float* b) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * N; float update = 0.0; if (y > 0) { update += a[(y-1)*N+x]; } if (y < N-1) { update += a[(y+1)*N+x]; } if (x > 0) { update += a[y*N+(x-1)]; } if (x < N-1) { update += a[y*N+(x+1)]; } b[offset] = update / 4.0; } __global__ void copy(float* to, float* from) { int offset = blockIdx.x + blockIdx.y * N; to[offset] = from[offset]; } int main() { float a[N*N], b[N*N]; float *dev_a, *dev_b; dim3 blocks(N, N); hipMalloc((void**)&dev_a, N*N*sizeof(float)); hipMalloc((void**)&dev_b, N*N*sizeof(float)); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { a[i*N+j] = static_cast<float>(i+j); } } hipMemcpy(dev_a, a, N*N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, N*N*sizeof(float), hipMemcpyHostToDevice); for (int num_it = 0; num_it < ITERS; num_it++) { hipLaunchKernelGGL(( stencil), dim3(blocks), dim3(1), 0, 0, dev_a, dev_b); hipLaunchKernelGGL(( copy), dim3(blocks), dim3(1), 0, 0, dev_a, dev_b); } hipMemcpy(b, dev_b, N*N*sizeof(float), hipMemcpyDeviceToHost); // print out the new array b std::cout << std::endl; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { } } std::cout << std::endl; // find sum float sum = 0.0; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { sum += b[i*N+j]; std::cout << b[i*N+j] << " "; } std::cout << std::endl; } std::cout << "sum is " << sum << std::endl; hipFree(dev_a); hipFree(dev_b); }
4a2792789c35b520133566bb0d416222b5320ce2.cu
#include <iostream> #include <stdio.h> #define N 100 #define ITERS 5 __global__ void stencil(float* a, float* b) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * N; float update = 0.0; if (y > 0) { update += a[(y-1)*N+x]; } if (y < N-1) { update += a[(y+1)*N+x]; } if (x > 0) { update += a[y*N+(x-1)]; } if (x < N-1) { update += a[y*N+(x+1)]; } b[offset] = update / 4.0; } __global__ void copy(float* to, float* from) { int offset = blockIdx.x + blockIdx.y * N; to[offset] = from[offset]; } int main() { float a[N*N], b[N*N]; float *dev_a, *dev_b; dim3 blocks(N, N); cudaMalloc((void**)&dev_a, N*N*sizeof(float)); cudaMalloc((void**)&dev_b, N*N*sizeof(float)); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { a[i*N+j] = static_cast<float>(i+j); } } cudaMemcpy(dev_a, a, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*N*sizeof(float), cudaMemcpyHostToDevice); for (int num_it = 0; num_it < ITERS; num_it++) { stencil<<<blocks, 1>>>(dev_a, dev_b); copy<<<blocks, 1>>>(dev_a, dev_b); } cudaMemcpy(b, dev_b, N*N*sizeof(float), cudaMemcpyDeviceToHost); // print out the new array b std::cout << std::endl; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { } } std::cout << std::endl; // find sum float sum = 0.0; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { sum += b[i*N+j]; std::cout << b[i*N+j] << " "; } std::cout << std::endl; } std::cout << "sum is " << sum << std::endl; cudaFree(dev_a); cudaFree(dev_b); }
40a221945fb2e6e471d771ab383a5e9181c52df9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <helper_cuda.h> #include <helper_functions.h> #include <time.h> #define CHARGE 1 #define THREAD_SIZE_X 256 #define THREAD_SIZE_Y 32 #define THREAD_SIZE_Z 1 #define BLOCK_SIZE_X 2 #define BLOCK_SIZE_Y 2 #define BLOCK_SIZE_Z 1 unsigned int list_num; __global__ void prime_cal( unsigned int *prime_result){ /*ID*/ unsigned int id = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y + blockIdx.y * blockDim.x * blockDim.y * gridDim.x + blockIdx.z * blockDim.x * blockDim.y * gridDim.x * gridDim.y; unsigned int scan_idx; unsigned int dev; unsigned int flag; /**/ for ( scan_idx = id * CHARGE ; scan_idx < (id + 1) * CHARGE; scan_idx++ ) { flag=0; if ( scan_idx == 1 ){ prime_result[scan_idx]=0; }else if ( scan_idx == 2 ){ prime_result[scan_idx]=2; }else if ( scan_idx % 2 == 0 ){ prime_result[scan_idx]=0; }else{ dev=3; while ( (dev * dev) <= scan_idx ){ if ( scan_idx % dev == 0 ){ flag=1; break; } dev+=2; } if (flag == 0){ prime_result[scan_idx]=scan_idx; }else if (flag == 1){ prime_result[scan_idx]=0; } } } __syncthreads(); } /* timer */ int timer(void){ time_t now = time(NULL); struct tm *pnow = localtime(&now); char buff[128]=""; sprintf(buff,"%d:%d:%d",pnow->tm_hour,pnow->tm_min,pnow->tm_sec); printf("%s\n",buff); return 0; } int main(int argc, char** argv){ FILE *outputfile; outputfile = fopen("./prime_data_cuda.txt", "w"); if (outputfile == NULL) { printf("cannot open\n"); exit(1); } timer(); /**/ list_num=CHARGE * THREAD_SIZE_X * THREAD_SIZE_Y * THREAD_SIZE_Z * BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; /**/ unsigned int *host_result; /**/ unsigned int *prime_result; /**/ checkCudaErrors(hipMalloc((void**)&prime_result, list_num * sizeof(unsigned int) )); /**/ dim3 threads(THREAD_SIZE_X,THREAD_SIZE_Y,THREAD_SIZE_Z); dim3 blocks(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); /**/ hipEvent_t start; hipEvent_t stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipEventRecord(start, NULL)); /**/ hipLaunchKernelGGL(( prime_cal), dim3(blocks) , dim3(threads), 0, 0, prime_result); hipDeviceSynchronize(); /**/ checkCudaErrors(hipEventRecord(stop, NULL)); checkCudaErrors(hipEventSynchronize(stop)); float msecTotal = 0.0f; checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop)); printf("Processing time: %f (msec)\n", msecTotal); /**/ checkCudaErrors(hipEventRecord(start, NULL)); /**/ host_result = (unsigned int*)malloc( list_num * sizeof(unsigned int) ); checkCudaErrors(hipMemcpy(host_result, prime_result, list_num * sizeof(unsigned int) , hipMemcpyDeviceToHost)); /**/ checkCudaErrors(hipEventRecord(stop, NULL)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop)); printf("Memory copy time: %f (msec)\n", msecTotal); printf("Now Writing...\n"); for(int l=0; l < list_num ; l++){ if ( host_result[l] != 0 ){ fprintf(outputfile,"%u\n",host_result[l]); } } fclose(outputfile); /**/ free(host_result); checkCudaErrors(hipFree(prime_result)); timer(); /**/ hipDeviceReset(); return 0; }
40a221945fb2e6e471d771ab383a5e9181c52df9.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <helper_cuda.h> #include <helper_functions.h> #include <time.h> #define CHARGE 1 #define THREAD_SIZE_X 256 #define THREAD_SIZE_Y 32 #define THREAD_SIZE_Z 1 #define BLOCK_SIZE_X 2 #define BLOCK_SIZE_Y 2 #define BLOCK_SIZE_Z 1 unsigned int list_num; __global__ void prime_cal( unsigned int *prime_result){ /*スレッドIDの割り当て*/ unsigned int id = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y + blockIdx.y * blockDim.x * blockDim.y * gridDim.x + blockIdx.z * blockDim.x * blockDim.y * gridDim.x * gridDim.y; unsigned int scan_idx; unsigned int dev; unsigned int flag; /*素数判定を行う*/ for ( scan_idx = id * CHARGE ; scan_idx < (id + 1) * CHARGE; scan_idx++ ) { flag=0; if ( scan_idx == 1 ){ prime_result[scan_idx]=0; }else if ( scan_idx == 2 ){ prime_result[scan_idx]=2; }else if ( scan_idx % 2 == 0 ){ prime_result[scan_idx]=0; }else{ dev=3; while ( (dev * dev) <= scan_idx ){ if ( scan_idx % dev == 0 ){ flag=1; break; } dev+=2; } if (flag == 0){ prime_result[scan_idx]=scan_idx; }else if (flag == 1){ prime_result[scan_idx]=0; } } } __syncthreads(); } /* timer */ int timer(void){ time_t now = time(NULL); struct tm *pnow = localtime(&now); char buff[128]=""; sprintf(buff,"%d:%d:%d",pnow->tm_hour,pnow->tm_min,pnow->tm_sec); printf("%s\n",buff); return 0; } int main(int argc, char** argv){ FILE *outputfile; outputfile = fopen("./prime_data_cuda.txt", "w"); if (outputfile == NULL) { printf("cannot open\n"); exit(1); } timer(); /*素数チェックする範囲を定義*/ list_num=CHARGE * THREAD_SIZE_X * THREAD_SIZE_Y * THREAD_SIZE_Z * BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z; /*ホスト側の変数設定*/ unsigned int *host_result; /*デバイス側の変数設定*/ unsigned int *prime_result; /*デバイスメモリ領域の確保*/ checkCudaErrors(cudaMalloc((void**)&prime_result, list_num * sizeof(unsigned int) )); /*ブロックサイズとグリッドサイズの設定*/ dim3 threads(THREAD_SIZE_X,THREAD_SIZE_Y,THREAD_SIZE_Z); dim3 blocks(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); /*タイマーを作成して計測開始*/ cudaEvent_t start; cudaEvent_t stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaEventRecord(start, NULL)); /*カーネルの起動*/ prime_cal<<<blocks , threads>>>(prime_result); cudaThreadSynchronize(); /*タイマーを停止しかかった時間を表示*/ checkCudaErrors(cudaEventRecord(stop, NULL)); checkCudaErrors(cudaEventSynchronize(stop)); float msecTotal = 0.0f; checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop)); printf("Processing time: %f (msec)\n", msecTotal); /*再度タイマー開始*/ checkCudaErrors(cudaEventRecord(start, NULL)); /*結果の領域確保とデバイス側からのメモリ転送*/ host_result = (unsigned int*)malloc( list_num * sizeof(unsigned int) ); checkCudaErrors(cudaMemcpy(host_result, prime_result, list_num * sizeof(unsigned int) , cudaMemcpyDeviceToHost)); /*タイマーを停止しかかった時間を表示*/ checkCudaErrors(cudaEventRecord(stop, NULL)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop)); printf("Memory copy time: %f (msec)\n", msecTotal); printf("Now Writing...\n"); for(int l=0; l < list_num ; l++){ if ( host_result[l] != 0 ){ fprintf(outputfile,"%u\n",host_result[l]); } } fclose(outputfile); /*ホスト・デバイスメモリの開放*/ free(host_result); checkCudaErrors(cudaFree(prime_result)); timer(); /*終了処理*/ cudaThreadExit(); return 0; }
d5dbf0efc74f1a2faa6b9b0d36bc81ff6ece386b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float* var_12,float var_13) { for (int i=0; i < var_1; ++i) { comp = +1.5683E-36f + (+0.0f + var_4); float tmp_1 = +1.9481E-36f; comp += tmp_1 * var_5 + (var_6 * -0.0f); comp = var_7 * (var_8 * sinhf(var_9 + (var_10 / var_11))); for (int i=0; i < var_2; ++i) { var_12[i] = +1.1261E35f; comp += var_12[i] + (+1.7149E-42f + (-0.0f + -1.1505E-35f)); } for (int i=0; i < var_3; ++i) { comp += (+1.8887E36f * var_13); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float* tmp_13 = initPointer( atof(argv[13]) ); float tmp_14 = atof(argv[14]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14); hipDeviceSynchronize(); return 0; }
d5dbf0efc74f1a2faa6b9b0d36bc81ff6ece386b.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float* var_12,float var_13) { for (int i=0; i < var_1; ++i) { comp = +1.5683E-36f + (+0.0f + var_4); float tmp_1 = +1.9481E-36f; comp += tmp_1 * var_5 + (var_6 * -0.0f); comp = var_7 * (var_8 * sinhf(var_9 + (var_10 / var_11))); for (int i=0; i < var_2; ++i) { var_12[i] = +1.1261E35f; comp += var_12[i] + (+1.7149E-42f + (-0.0f + -1.1505E-35f)); } for (int i=0; i < var_3; ++i) { comp += (+1.8887E36f * var_13); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float* tmp_13 = initPointer( atof(argv[13]) ); float tmp_14 = atof(argv[14]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14); cudaDeviceSynchronize(); return 0; }
cdf25b75b9998dedb847a5cfa26a7dd681cc0f87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zmergeidr.cu normal z -> c, Tue Feb 9 16:05:43 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_c // These routines merge multiple kernels from cidr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_cidr_smoothing_1_kernel( int num_rows, int num_cols, magmaFloatComplex *drs, magmaFloatComplex *dr, magmaFloatComplex *dt ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: dt = drs - dr Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] drs magmaFloatComplex_ptr vector @param[in] dr magmaFloatComplex_ptr vector @param[in,out] dt magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cidr_smoothing_1( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex_ptr drs, magmaFloatComplex_ptr dr, magmaFloatComplex_ptr dt, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_cidr_smoothing_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, drs, dr, dt ); return MAGMA_SUCCESS; } __global__ void magma_cidr_smoothing_2_kernel( int num_rows, int num_cols, magmaFloatComplex omega, magmaFloatComplex *dx, magmaFloatComplex *dxs ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ] - omega * dx[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: dxs = dxs - gamma*(dxs-dx) Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] omega magmaFloatComplex scalar @param[in] dx magmaFloatComplex_ptr vector @param[in,out] dxs magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cidr_smoothing_2( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex omega, magmaFloatComplex_ptr dx, magmaFloatComplex_ptr dxs, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_cidr_smoothing_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, omega, dx, dxs); return MAGMA_SUCCESS; }
cdf25b75b9998dedb847a5cfa26a7dd681cc0f87.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zmergeidr.cu normal z -> c, Tue Feb 9 16:05:43 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_c // These routines merge multiple kernels from cidr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_cidr_smoothing_1_kernel( int num_rows, int num_cols, magmaFloatComplex *drs, magmaFloatComplex *dr, magmaFloatComplex *dt ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: dt = drs - dr Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] drs magmaFloatComplex_ptr vector @param[in] dr magmaFloatComplex_ptr vector @param[in,out] dt magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cidr_smoothing_1( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex_ptr drs, magmaFloatComplex_ptr dr, magmaFloatComplex_ptr dt, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_cidr_smoothing_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, drs, dr, dt ); return MAGMA_SUCCESS; } __global__ void magma_cidr_smoothing_2_kernel( int num_rows, int num_cols, magmaFloatComplex omega, magmaFloatComplex *dx, magmaFloatComplex *dxs ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ] - omega * dx[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: dxs = dxs - gamma*(dxs-dx) Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] omega magmaFloatComplex scalar @param[in] dx magmaFloatComplex_ptr vector @param[in,out] dxs magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cidr_smoothing_2( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex omega, magmaFloatComplex_ptr dx, magmaFloatComplex_ptr dxs, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_cidr_smoothing_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, omega, dx, dxs); return MAGMA_SUCCESS; }
70481db3c30dc28df87db14944c57e126532657e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zmergecg.cu normal z -> c, Fri Jul 18 17:34:28 2014 @author Hartwig Anzt */ #include "common_magma.h" #include "../include/magmasparse.h" #define BLOCK_SIZE 512 #define PRECISION_c // These routines merge multiple kernels from cmergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_ccgreduce_kernel_spmv1( int Gs, int n, magmaFloatComplex *vtmp, magmaFloatComplex *vtmp2 ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_C_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_ccgmerge_spmvcsr_kernel( int n, magmaFloatComplex *d_val, magma_index_t *d_rowptr, magma_index_t *d_colind, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if( i<n ){ magmaFloatComplex dot = MAGMA_C_ZERO; int start = d_rowptr[ i ]; int end = d_rowptr[ i+1 ]; for( j=start; j<end; j++) dot += d_val[ j ] * d[ d_colind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_ccgmerge_spmvellpackt_kernel( int n, int num_cols_per_row, magmaFloatComplex *d_val, magma_index_t *d_colind, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < n ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = d_colind [ n * k + i ]; magmaFloatComplex val = d_val [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_ccgmerge_spmvellpack_kernel( int n, int num_cols_per_row, magmaFloatComplex *d_val, magma_index_t *d_colind, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < n ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = d_colind [ num_cols_per_row * i + k ]; magmaFloatComplex val = d_val [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_8( int n, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_16( int n, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_32( int n, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_ccgmerge_spmvellpackrt_kernel2( int n, magmaFloatComplex *z, magmaFloatComplex *d, magmaFloatComplex *vtmp2 ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_C_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_ccgmerge_spmvsellc_kernel( int num_rows, int blocksize, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++){ int col = d_colind [offset+ blocksize * n + Idx ]; magmaFloatComplex val = d_val[offset+ blocksize * n + Idx]; if( val != 0){ dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d, magmaFloatComplex *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d, magmaFloatComplex *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d, magmaFloatComplex *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_ccg_rhokernel( magmaFloatComplex *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ magmaFloatComplex tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param A magma_c_sparse_matrix input matrix @param d1 magmaFloatComplex* temporary vector @param d2 magmaFloatComplex* temporary vector @param d_d magmaFloatComplex* input vector d @param d_z magmaFloatComplex* input vector z @param skp magmaFloatComplex* array for parameters ( skp[3]=rho ) @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_ccgmerge_spmv1( magma_c_sparse_matrix A, magmaFloatComplex *d1, magmaFloatComplex *d2, magmaFloatComplex *d_d, magmaFloatComplex *d_z, magmaFloatComplex *skp ){ int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (A.num_rows+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex *aux1 = d1, *aux2 = d2; int b = 1; if( A.storage_type == Magma_CSR ) hipLaunchKernelGGL(( magma_ccgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, A.val, A.row, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_ELLPACK ) hipLaunchKernelGGL(( magma_ccgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_ELL ) hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackt_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_SELLC || A.storage_type == Magma_SELLP ){ if( A.blocksize==256){ hipLaunchKernelGGL(( magma_ccgmerge_spmvsellc_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, A.blocksize, A. val, A.col, A.row, d_d, d_z, d1 ); } else printf("error: SELLC only for blocksize 256.\n"); } else if( A.storage_type == Magma_SELLP ){ int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = sqrt(A.numblocks); int dimgrid2 = (A.numblocks + dimgrid1 -1 ) / dimgrid1; dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( magmaFloatComplex ); if( A.alignment == 8) hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_8) , dim3(gridsellp), dim3(block), Mssellp, magma_stream , A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else if( A.alignment == 16) hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_16) , dim3(gridsellp), dim3(block), Mssellp, magma_stream , A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else if( A.alignment == 32) hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_32) , dim3(gridsellp), dim3(block), Mssellp, magma_stream , A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, d_z, d_d, d1 ); } else if( A.storage_type == Magma_ELLRT ){ // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize); int num_threads = A.alignment*A.blocksize; int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment) *A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = sqrt(num_blocks); int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1; dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( magmaFloatComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if( A.alignment == 32 ){ hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_32) , dim3(gridellrt), dim3(num_threads) , Mellrt, magma_stream , A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else if( A.alignment == 16 ){ hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_16) , dim3(gridellrt), dim3(num_threads) , Mellrt, magma_stream , A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else if( A.alignment == 8 ){ hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_8) , dim3(gridellrt), dim3(num_threads) , Mellrt, magma_stream , A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else{ printf("error: alignment %d not supported.\n", A.alignment); exit(-1); } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, d_z, d_d, d1 ); } while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_ccgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+4, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_ccg_rhokernel), dim3(Gs2), dim3(Bs2), 0, 0, skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_ccgmerge_xrbeta_kernel( int n, magmaFloatComplex *x, magmaFloatComplex *r, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *skp, magmaFloatComplex *vtmp ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; magmaFloatComplex rho = skp[3]; magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if( i<n ){ x[i] += rho * d[i] ; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_ccg_alphabetakernel( magmaFloatComplex *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ magmaFloatComplex tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_C_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_ccg_d_kernel( int n, magmaFloatComplex *skp, magmaFloatComplex *r, magmaFloatComplex *d ){ int i = blockIdx.x * blockDim.x + threadIdx.x; magmaFloatComplex alpha = skp[0]; if( i<n ){ d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param n int dimension n @param d1 magmaFloatComplex* temporary vector @param d2 magmaFloatComplex* temporary vector @param d_x magmaFloatComplex* input vector x @param d_r magmaFloatComplex* input/output vector r @param d_d magmaFloatComplex* input vector d @param d_z magmaFloatComplex* input vector z @param skp magmaFloatComplex* array for parameters @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_ccgmerge_xrbeta( int n, magmaFloatComplex *d1, magmaFloatComplex *d2, magmaFloatComplex *d_x, magmaFloatComplex *d_r, magmaFloatComplex *d_d, magmaFloatComplex *d_z, magmaFloatComplex *skp ){ int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (n+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex *aux1 = d1, *aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_ccgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, 0, n, d_x, d_r, d_d, d_z, skp, d1); while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_ccgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+1, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_ccg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp ); dim3 Bs3( local_block_size ); dim3 Gs3( (n+local_block_size-1)/local_block_size ); hipLaunchKernelGGL(( magma_ccg_d_kernel), dim3(Gs3), dim3(Bs3), 0, 0, n, skp, d_r, d_d ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
70481db3c30dc28df87db14944c57e126532657e.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zmergecg.cu normal z -> c, Fri Jul 18 17:34:28 2014 @author Hartwig Anzt */ #include "common_magma.h" #include "../include/magmasparse.h" #define BLOCK_SIZE 512 #define PRECISION_c // These routines merge multiple kernels from cmergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_ccgreduce_kernel_spmv1( int Gs, int n, magmaFloatComplex *vtmp, magmaFloatComplex *vtmp2 ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_C_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_ccgmerge_spmvcsr_kernel( int n, magmaFloatComplex *d_val, magma_index_t *d_rowptr, magma_index_t *d_colind, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if( i<n ){ magmaFloatComplex dot = MAGMA_C_ZERO; int start = d_rowptr[ i ]; int end = d_rowptr[ i+1 ]; for( j=start; j<end; j++) dot += d_val[ j ] * d[ d_colind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_ccgmerge_spmvellpackt_kernel( int n, int num_cols_per_row, magmaFloatComplex *d_val, magma_index_t *d_colind, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < n ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = d_colind [ n * k + i ]; magmaFloatComplex val = d_val [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_ccgmerge_spmvellpack_kernel( int n, int num_cols_per_row, magmaFloatComplex *d_val, magma_index_t *d_colind, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < n ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = d_colind [ num_cols_per_row * i + k ]; magmaFloatComplex val = d_val [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_8( int n, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_16( int n, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_32( int n, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_ccgmerge_spmvellpackrt_kernel2( int n, magmaFloatComplex *z, magmaFloatComplex *d, magmaFloatComplex *vtmp2 ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_C_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_ccgmerge_spmvsellc_kernel( int num_rows, int blocksize, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *vtmp){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++){ int col = d_colind [offset+ blocksize * n + Idx ]; magmaFloatComplex val = d_val[offset+ blocksize * n + Idx]; if( val != 0){ dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d, magmaFloatComplex *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d, magmaFloatComplex *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, magmaFloatComplex *d, magmaFloatComplex *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ magmaFloatComplex val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_ccg_rhokernel( magmaFloatComplex *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ magmaFloatComplex tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param A magma_c_sparse_matrix input matrix @param d1 magmaFloatComplex* temporary vector @param d2 magmaFloatComplex* temporary vector @param d_d magmaFloatComplex* input vector d @param d_z magmaFloatComplex* input vector z @param skp magmaFloatComplex* array for parameters ( skp[3]=rho ) @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_ccgmerge_spmv1( magma_c_sparse_matrix A, magmaFloatComplex *d1, magmaFloatComplex *d2, magmaFloatComplex *d_d, magmaFloatComplex *d_z, magmaFloatComplex *skp ){ int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (A.num_rows+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex *aux1 = d1, *aux2 = d2; int b = 1; if( A.storage_type == Magma_CSR ) magma_ccgmerge_spmvcsr_kernel<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, A.val, A.row, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_ELLPACK ) magma_ccgmerge_spmvellpack_kernel<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_ELL ) magma_ccgmerge_spmvellpackt_kernel<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_SELLC || A.storage_type == Magma_SELLP ){ if( A.blocksize==256){ magma_ccgmerge_spmvsellc_kernel<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, A.blocksize, A. val, A.col, A.row, d_d, d_z, d1 ); } else printf("error: SELLC only for blocksize 256.\n"); } else if( A.storage_type == Magma_SELLP ){ int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = sqrt(A.numblocks); int dimgrid2 = (A.numblocks + dimgrid1 -1 ) / dimgrid1; dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( magmaFloatComplex ); if( A.alignment == 8) magma_ccgmerge_spmvsellpt_kernel_8 <<< gridsellp, block, Mssellp, magma_stream >>> ( A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else if( A.alignment == 16) magma_ccgmerge_spmvsellpt_kernel_16 <<< gridsellp, block, Mssellp, magma_stream >>> ( A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else if( A.alignment == 32) magma_ccgmerge_spmvsellpt_kernel_32 <<< gridsellp, block, Mssellp, magma_stream >>> ( A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_ccgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, d_z, d_d, d1 ); } else if( A.storage_type == Magma_ELLRT ){ // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize); int num_threads = A.alignment*A.blocksize; int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment) *A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = sqrt(num_blocks); int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1; dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( magmaFloatComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if( A.alignment == 32 ){ magma_ccgmerge_spmvellpackrt_kernel_32 <<< gridellrt, num_threads , Mellrt, magma_stream >>> ( A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else if( A.alignment == 16 ){ magma_ccgmerge_spmvellpackrt_kernel_16 <<< gridellrt, num_threads , Mellrt, magma_stream >>> ( A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else if( A.alignment == 8 ){ magma_ccgmerge_spmvellpackrt_kernel_8 <<< gridellrt, num_threads , Mellrt, magma_stream >>> ( A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else{ printf("error: alignment %d not supported.\n", A.alignment); exit(-1); } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_ccgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, d_z, d_d, d1 ); } while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; magma_ccgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+4, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_ccg_rhokernel<<<Gs2, Bs2, 0>>>( skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_ccgmerge_xrbeta_kernel( int n, magmaFloatComplex *x, magmaFloatComplex *r, magmaFloatComplex *d, magmaFloatComplex *z, magmaFloatComplex *skp, magmaFloatComplex *vtmp ){ extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; magmaFloatComplex rho = skp[3]; magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if( i<n ){ x[i] += rho * d[i] ; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_ccg_alphabetakernel( magmaFloatComplex *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ magmaFloatComplex tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_C_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_ccg_d_kernel( int n, magmaFloatComplex *skp, magmaFloatComplex *r, magmaFloatComplex *d ){ int i = blockIdx.x * blockDim.x + threadIdx.x; magmaFloatComplex alpha = skp[0]; if( i<n ){ d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param n int dimension n @param d1 magmaFloatComplex* temporary vector @param d2 magmaFloatComplex* temporary vector @param d_x magmaFloatComplex* input vector x @param d_r magmaFloatComplex* input/output vector r @param d_d magmaFloatComplex* input vector d @param d_z magmaFloatComplex* input vector z @param skp magmaFloatComplex* array for parameters @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_ccgmerge_xrbeta( int n, magmaFloatComplex *d1, magmaFloatComplex *d2, magmaFloatComplex *d_x, magmaFloatComplex *d_r, magmaFloatComplex *d_d, magmaFloatComplex *d_z, magmaFloatComplex *skp ){ int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (n+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex *aux1 = d1, *aux2 = d2; int b = 1; magma_ccgmerge_xrbeta_kernel<<<Gs, Bs, Ms>>> ( n, d_x, d_r, d_d, d_z, skp, d1); while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; magma_ccgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+1, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_ccg_alphabetakernel<<<Gs2, Bs2, 0>>>( skp ); dim3 Bs3( local_block_size ); dim3 Gs3( (n+local_block_size-1)/local_block_size ); magma_ccg_d_kernel<<<Gs3, Bs3, 0>>>( n, skp, d_r, d_d ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
8d1f09e0ad03b70fc68a62dad5940b24db418a19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sort.h" #include "utils.h" #define MAX_BLOCK_SZ 128 extern "C" __global__ void gpu_radix_sort_local(unsigned int* d_out_sorted, unsigned int* d_prefix_sums, unsigned int* d_block_sums, unsigned int input_shift_width, unsigned int* d_in, unsigned int d_in_len, unsigned int max_elems_per_block, unsigned int* d_idx_in, unsigned int* d_idx_out) { // need shared memory array for: // - block's share of the input data (local sort will be put here too) // - mask outputs // - scanned mask outputs // - merged scaned mask outputs ("local prefix sum") // - local sums of scanned mask outputs // - scanned local sums of scanned mask outputs // for all radix combinations: // build mask output for current radix combination // scan mask ouput // store needed value from current prefix sum array to merged prefix sum array // store total sum of mask output (obtained from scan) to global block sum array // calculate local sorted address from local prefix sum and scanned mask output's total sums // shuffle input block according to calculated local sorted addresses // shuffle local prefix sums according to calculated local sorted addresses // copy locally sorted array back to global memory // copy local prefix sum array back to global memory extern __shared__ unsigned int shmem[]; unsigned int* s_data = shmem; // s_mask_out[] will be scanned in place unsigned int s_mask_out_len = max_elems_per_block + 1; unsigned int* s_mask_out = &s_data[max_elems_per_block]; unsigned int* s_merged_scan_mask_out = &s_mask_out[s_mask_out_len]; unsigned int* s_mask_out_sums = &s_merged_scan_mask_out[max_elems_per_block]; unsigned int* s_scan_mask_out_sums = &s_mask_out_sums[4]; unsigned int thid = threadIdx.x; // Copy block's portion of global input data to shared memory unsigned int cpy_idx = max_elems_per_block * blockIdx.x + thid; if (cpy_idx < d_in_len) s_data[thid] = d_in[cpy_idx]; else s_data[thid] = 0; __syncthreads(); // To extract the correct 2 bits, we first shift the number // to the right until the correct 2 bits are in the 2 LSBs, // then mask on the number with 11 (3) to remove the bits // on the left unsigned int t_data = s_data[thid]; unsigned int t_2bit_extract = (t_data >> input_shift_width) & 3; for (unsigned int i = 0; i < 4; ++i) { // Zero out s_mask_out s_mask_out[thid] = 0; if (thid == 0) s_mask_out[s_mask_out_len - 1] = 0; __syncthreads(); // build bit mask output bool val_equals_i = false; if (cpy_idx < d_in_len) { val_equals_i = t_2bit_extract == i; s_mask_out[thid] = val_equals_i; } __syncthreads(); // Scan mask outputs (Hillis-Steele) int partner = 0; unsigned int sum = 0; unsigned int max_steps = (unsigned int) log2f(max_elems_per_block); for (unsigned int d = 0; d < max_steps; d++) { partner = thid - (1 << d); if (partner >= 0) { sum = s_mask_out[thid] + s_mask_out[partner]; } else { sum = s_mask_out[thid]; } __syncthreads(); s_mask_out[thid] = sum; __syncthreads(); } // Shift elements to produce the same effect as exclusive scan unsigned int cpy_val = 0; cpy_val = s_mask_out[thid]; __syncthreads(); s_mask_out[thid + 1] = cpy_val; __syncthreads(); if (thid == 0) { // Zero out first element to produce the same effect as exclusive scan s_mask_out[0] = 0; unsigned int total_sum = s_mask_out[s_mask_out_len - 1]; s_mask_out_sums[i] = total_sum; d_block_sums[i * gridDim.x + blockIdx.x] = total_sum; } __syncthreads(); if (val_equals_i && (cpy_idx < d_in_len)) { s_merged_scan_mask_out[thid] = s_mask_out[thid]; } __syncthreads(); } // Scan mask output sums // Just do a naive scan since the array is really small if (thid == 0) { unsigned int run_sum = 0; for (unsigned int i = 0; i < 4; ++i) { s_scan_mask_out_sums[i] = run_sum; run_sum += s_mask_out_sums[i]; } } __syncthreads(); if (cpy_idx < d_in_len) { // Calculate the new indices of the input elements for sorting unsigned int t_prefix_sum = s_merged_scan_mask_out[thid]; unsigned int new_pos = t_prefix_sum + s_scan_mask_out_sums[t_2bit_extract]; __syncthreads(); // Shuffle the block's input elements to actually sort them // Do this step for greater global memory transfer coalescing // in next step s_data[new_pos] = t_data; s_merged_scan_mask_out[new_pos] = t_prefix_sum; __syncthreads(); // Copy block - wise prefix sum results to global memory // Copy block-wise sort results to global d_prefix_sums[cpy_idx] = s_merged_scan_mask_out[thid]; d_out_sorted[cpy_idx] = s_data[thid]; if (d_idx_out != NULL) d_idx_out[max_elems_per_block * blockIdx.x + new_pos] = d_idx_in[cpy_idx]; } } extern "C" __global__ void gpu_glbl_shuffle(unsigned int* d_out, unsigned int* d_in, unsigned int* d_scan_block_sums, unsigned int* d_prefix_sums, unsigned int input_shift_width, unsigned int d_in_len, unsigned int max_elems_per_block, unsigned int* d_idx_in, unsigned int* d_idx_out) { // get d = digit // get n = blockIdx // get m = local prefix sum array value // calculate global position = P_d[n] + m // copy input element to final position in d_out unsigned int thid = threadIdx.x; unsigned int cpy_idx = max_elems_per_block * blockIdx.x + thid; if (cpy_idx < d_in_len) { unsigned int t_data = d_in[cpy_idx]; unsigned int t_2bit_extract = (t_data >> input_shift_width) & 3; unsigned int t_prefix_sum = d_prefix_sums[cpy_idx]; unsigned int data_glbl_pos = d_scan_block_sums[t_2bit_extract * gridDim.x + blockIdx.x] + t_prefix_sum; __syncthreads(); d_out[data_glbl_pos] = t_data; if (d_idx_out != NULL) { d_idx_out[data_glbl_pos] = d_idx_in[cpy_idx]; } } } extern "C" __global__ void float_flip(unsigned int* d_out, float* d_in, unsigned int d_len) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < d_len; i += stride) { unsigned int d = reinterpret_cast<unsigned int&>(d_in[i]); unsigned int mask = -(unsigned int)(d >> 31) | 0x80000000; d_out[i] = d ^ mask; } } extern "C" __global__ void inverse_float_flip(float* d_out, unsigned int* d_in, unsigned int d_len) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < d_len; i += stride) { unsigned int mask = ((d_in[i] >> 31) - 1) | 0x80000000; unsigned int f = d_in[i] ^ mask; d_out[i] = reinterpret_cast<float&>(f); } } extern "C" __global__ void init_idx(unsigned int* d_idx_out, unsigned int d_len) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < d_len; i += stride) { d_idx_out[i] = i; } } // An attempt at the gpu radix sort variant described in this paper: // https://vgc.poly.edu/~csilva/papers/cgf.pdf void radix_sort(float* const d_f_out, unsigned int* const d_idx_out, float* const d_f_in, unsigned int d_in_len) { unsigned int block_sz = MAX_BLOCK_SZ; unsigned int max_elems_per_block = block_sz; unsigned int grid_sz = d_in_len / max_elems_per_block; // Take advantage of the fact that integer division drops the decimals if (d_in_len % max_elems_per_block != 0) grid_sz += 1; unsigned int* d_in = reinterpret_cast<unsigned int*>(d_f_in); unsigned int* d_out = reinterpret_cast<unsigned int*>(d_f_out); hipLaunchKernelGGL(( float_flip), dim3(grid_sz), dim3(block_sz), 0, 0, d_in, d_f_in, d_in_len); unsigned int* d_idx_in = NULL; if (d_idx_out != NULL) { checkCudaErrors(hipMalloc(&d_idx_in, sizeof(unsigned int) * d_in_len)); hipLaunchKernelGGL(( init_idx), dim3(grid_sz), dim3(block_sz), 0, 0, d_idx_in, d_in_len); } unsigned int* d_prefix_sums; unsigned int d_prefix_sums_len = d_in_len; checkCudaErrors(hipMalloc(&d_prefix_sums, sizeof(unsigned int) * d_prefix_sums_len)); checkCudaErrors(hipMemset(d_prefix_sums, 0, sizeof(unsigned int) * d_prefix_sums_len)); unsigned int* d_block_sums; unsigned int d_block_sums_len = 4 * grid_sz; // 4-way split checkCudaErrors(hipMalloc(&d_block_sums, sizeof(unsigned int) * d_block_sums_len)); checkCudaErrors(hipMemset(d_block_sums, 0, sizeof(unsigned int) * d_block_sums_len)); unsigned int* d_scan_block_sums; checkCudaErrors(hipMalloc(&d_scan_block_sums, sizeof(unsigned int) * d_block_sums_len)); checkCudaErrors(hipMemset(d_scan_block_sums, 0, sizeof(unsigned int) * d_block_sums_len)); // shared memory consists of 3 arrays the size of the block-wise input // and 2 arrays the size of n in the current n-way split (4) unsigned int s_data_len = max_elems_per_block; unsigned int s_mask_out_len = max_elems_per_block + 1; unsigned int s_merged_scan_mask_out_len = max_elems_per_block; unsigned int s_mask_out_sums_len = 4; // 4-way split unsigned int s_scan_mask_out_sums_len = 4; unsigned int shmem_sz = (s_data_len + s_mask_out_len + s_merged_scan_mask_out_len + s_mask_out_sums_len + s_scan_mask_out_sums_len) * sizeof(unsigned int); // for every 2 bits from LSB to MSB: // block-wise radix sort (write blocks back to global memory) for (unsigned int shift_width = 0; shift_width <= 30; shift_width += 2) { hipLaunchKernelGGL(( gpu_radix_sort_local), dim3(grid_sz), dim3(block_sz), shmem_sz, 0, d_out, d_prefix_sums, d_block_sums, shift_width, d_in, d_in_len, max_elems_per_block, d_idx_in, d_idx_out); //unsigned int* h_test = new unsigned int[d_in_len]; //checkCudaErrors(hipMemcpy(h_test, d_in, sizeof(unsigned int) * d_in_len, hipMemcpyDeviceToHost)); //for (unsigned int i = 0; i < d_in_len; ++i) // std::cout << h_test[i] << " "; //std::cout << std::endl; //delete[] h_test; // scan global block sum array sum_scan_blelloch(d_scan_block_sums, d_block_sums, d_block_sums_len); // scatter/shuffle block-wise sorted array to final positions hipLaunchKernelGGL(( gpu_glbl_shuffle), dim3(grid_sz), dim3(block_sz), 0, 0, d_in, d_out, d_scan_block_sums, d_prefix_sums, shift_width, d_in_len, max_elems_per_block, d_idx_out, d_idx_in); } checkCudaErrors(hipMemcpy(d_out, d_in, sizeof(unsigned int) * d_in_len, hipMemcpyDeviceToDevice)); hipLaunchKernelGGL(( inverse_float_flip), dim3(grid_sz), dim3(block_sz), 0, 0, d_f_out, d_out, d_in_len); if (d_idx_out != NULL) checkCudaErrors(hipMemcpy(d_idx_out, d_idx_in, sizeof(unsigned int) * d_in_len, hipMemcpyDeviceToDevice)); checkCudaErrors(hipFree(d_scan_block_sums)); checkCudaErrors(hipFree(d_block_sums)); checkCudaErrors(hipFree(d_prefix_sums)); }
8d1f09e0ad03b70fc68a62dad5940b24db418a19.cu
#include "sort.h" #include "utils.h" #define MAX_BLOCK_SZ 128 extern "C" __global__ void gpu_radix_sort_local(unsigned int* d_out_sorted, unsigned int* d_prefix_sums, unsigned int* d_block_sums, unsigned int input_shift_width, unsigned int* d_in, unsigned int d_in_len, unsigned int max_elems_per_block, unsigned int* d_idx_in, unsigned int* d_idx_out) { // need shared memory array for: // - block's share of the input data (local sort will be put here too) // - mask outputs // - scanned mask outputs // - merged scaned mask outputs ("local prefix sum") // - local sums of scanned mask outputs // - scanned local sums of scanned mask outputs // for all radix combinations: // build mask output for current radix combination // scan mask ouput // store needed value from current prefix sum array to merged prefix sum array // store total sum of mask output (obtained from scan) to global block sum array // calculate local sorted address from local prefix sum and scanned mask output's total sums // shuffle input block according to calculated local sorted addresses // shuffle local prefix sums according to calculated local sorted addresses // copy locally sorted array back to global memory // copy local prefix sum array back to global memory extern __shared__ unsigned int shmem[]; unsigned int* s_data = shmem; // s_mask_out[] will be scanned in place unsigned int s_mask_out_len = max_elems_per_block + 1; unsigned int* s_mask_out = &s_data[max_elems_per_block]; unsigned int* s_merged_scan_mask_out = &s_mask_out[s_mask_out_len]; unsigned int* s_mask_out_sums = &s_merged_scan_mask_out[max_elems_per_block]; unsigned int* s_scan_mask_out_sums = &s_mask_out_sums[4]; unsigned int thid = threadIdx.x; // Copy block's portion of global input data to shared memory unsigned int cpy_idx = max_elems_per_block * blockIdx.x + thid; if (cpy_idx < d_in_len) s_data[thid] = d_in[cpy_idx]; else s_data[thid] = 0; __syncthreads(); // To extract the correct 2 bits, we first shift the number // to the right until the correct 2 bits are in the 2 LSBs, // then mask on the number with 11 (3) to remove the bits // on the left unsigned int t_data = s_data[thid]; unsigned int t_2bit_extract = (t_data >> input_shift_width) & 3; for (unsigned int i = 0; i < 4; ++i) { // Zero out s_mask_out s_mask_out[thid] = 0; if (thid == 0) s_mask_out[s_mask_out_len - 1] = 0; __syncthreads(); // build bit mask output bool val_equals_i = false; if (cpy_idx < d_in_len) { val_equals_i = t_2bit_extract == i; s_mask_out[thid] = val_equals_i; } __syncthreads(); // Scan mask outputs (Hillis-Steele) int partner = 0; unsigned int sum = 0; unsigned int max_steps = (unsigned int) log2f(max_elems_per_block); for (unsigned int d = 0; d < max_steps; d++) { partner = thid - (1 << d); if (partner >= 0) { sum = s_mask_out[thid] + s_mask_out[partner]; } else { sum = s_mask_out[thid]; } __syncthreads(); s_mask_out[thid] = sum; __syncthreads(); } // Shift elements to produce the same effect as exclusive scan unsigned int cpy_val = 0; cpy_val = s_mask_out[thid]; __syncthreads(); s_mask_out[thid + 1] = cpy_val; __syncthreads(); if (thid == 0) { // Zero out first element to produce the same effect as exclusive scan s_mask_out[0] = 0; unsigned int total_sum = s_mask_out[s_mask_out_len - 1]; s_mask_out_sums[i] = total_sum; d_block_sums[i * gridDim.x + blockIdx.x] = total_sum; } __syncthreads(); if (val_equals_i && (cpy_idx < d_in_len)) { s_merged_scan_mask_out[thid] = s_mask_out[thid]; } __syncthreads(); } // Scan mask output sums // Just do a naive scan since the array is really small if (thid == 0) { unsigned int run_sum = 0; for (unsigned int i = 0; i < 4; ++i) { s_scan_mask_out_sums[i] = run_sum; run_sum += s_mask_out_sums[i]; } } __syncthreads(); if (cpy_idx < d_in_len) { // Calculate the new indices of the input elements for sorting unsigned int t_prefix_sum = s_merged_scan_mask_out[thid]; unsigned int new_pos = t_prefix_sum + s_scan_mask_out_sums[t_2bit_extract]; __syncthreads(); // Shuffle the block's input elements to actually sort them // Do this step for greater global memory transfer coalescing // in next step s_data[new_pos] = t_data; s_merged_scan_mask_out[new_pos] = t_prefix_sum; __syncthreads(); // Copy block - wise prefix sum results to global memory // Copy block-wise sort results to global d_prefix_sums[cpy_idx] = s_merged_scan_mask_out[thid]; d_out_sorted[cpy_idx] = s_data[thid]; if (d_idx_out != NULL) d_idx_out[max_elems_per_block * blockIdx.x + new_pos] = d_idx_in[cpy_idx]; } } extern "C" __global__ void gpu_glbl_shuffle(unsigned int* d_out, unsigned int* d_in, unsigned int* d_scan_block_sums, unsigned int* d_prefix_sums, unsigned int input_shift_width, unsigned int d_in_len, unsigned int max_elems_per_block, unsigned int* d_idx_in, unsigned int* d_idx_out) { // get d = digit // get n = blockIdx // get m = local prefix sum array value // calculate global position = P_d[n] + m // copy input element to final position in d_out unsigned int thid = threadIdx.x; unsigned int cpy_idx = max_elems_per_block * blockIdx.x + thid; if (cpy_idx < d_in_len) { unsigned int t_data = d_in[cpy_idx]; unsigned int t_2bit_extract = (t_data >> input_shift_width) & 3; unsigned int t_prefix_sum = d_prefix_sums[cpy_idx]; unsigned int data_glbl_pos = d_scan_block_sums[t_2bit_extract * gridDim.x + blockIdx.x] + t_prefix_sum; __syncthreads(); d_out[data_glbl_pos] = t_data; if (d_idx_out != NULL) { d_idx_out[data_glbl_pos] = d_idx_in[cpy_idx]; } } } extern "C" __global__ void float_flip(unsigned int* d_out, float* d_in, unsigned int d_len) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < d_len; i += stride) { unsigned int d = reinterpret_cast<unsigned int&>(d_in[i]); unsigned int mask = -(unsigned int)(d >> 31) | 0x80000000; d_out[i] = d ^ mask; } } extern "C" __global__ void inverse_float_flip(float* d_out, unsigned int* d_in, unsigned int d_len) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < d_len; i += stride) { unsigned int mask = ((d_in[i] >> 31) - 1) | 0x80000000; unsigned int f = d_in[i] ^ mask; d_out[i] = reinterpret_cast<float&>(f); } } extern "C" __global__ void init_idx(unsigned int* d_idx_out, unsigned int d_len) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < d_len; i += stride) { d_idx_out[i] = i; } } // An attempt at the gpu radix sort variant described in this paper: // https://vgc.poly.edu/~csilva/papers/cgf.pdf void radix_sort(float* const d_f_out, unsigned int* const d_idx_out, float* const d_f_in, unsigned int d_in_len) { unsigned int block_sz = MAX_BLOCK_SZ; unsigned int max_elems_per_block = block_sz; unsigned int grid_sz = d_in_len / max_elems_per_block; // Take advantage of the fact that integer division drops the decimals if (d_in_len % max_elems_per_block != 0) grid_sz += 1; unsigned int* d_in = reinterpret_cast<unsigned int*>(d_f_in); unsigned int* d_out = reinterpret_cast<unsigned int*>(d_f_out); float_flip<<<grid_sz, block_sz>>>(d_in, d_f_in, d_in_len); unsigned int* d_idx_in = NULL; if (d_idx_out != NULL) { checkCudaErrors(cudaMalloc(&d_idx_in, sizeof(unsigned int) * d_in_len)); init_idx<<<grid_sz, block_sz>>>(d_idx_in, d_in_len); } unsigned int* d_prefix_sums; unsigned int d_prefix_sums_len = d_in_len; checkCudaErrors(cudaMalloc(&d_prefix_sums, sizeof(unsigned int) * d_prefix_sums_len)); checkCudaErrors(cudaMemset(d_prefix_sums, 0, sizeof(unsigned int) * d_prefix_sums_len)); unsigned int* d_block_sums; unsigned int d_block_sums_len = 4 * grid_sz; // 4-way split checkCudaErrors(cudaMalloc(&d_block_sums, sizeof(unsigned int) * d_block_sums_len)); checkCudaErrors(cudaMemset(d_block_sums, 0, sizeof(unsigned int) * d_block_sums_len)); unsigned int* d_scan_block_sums; checkCudaErrors(cudaMalloc(&d_scan_block_sums, sizeof(unsigned int) * d_block_sums_len)); checkCudaErrors(cudaMemset(d_scan_block_sums, 0, sizeof(unsigned int) * d_block_sums_len)); // shared memory consists of 3 arrays the size of the block-wise input // and 2 arrays the size of n in the current n-way split (4) unsigned int s_data_len = max_elems_per_block; unsigned int s_mask_out_len = max_elems_per_block + 1; unsigned int s_merged_scan_mask_out_len = max_elems_per_block; unsigned int s_mask_out_sums_len = 4; // 4-way split unsigned int s_scan_mask_out_sums_len = 4; unsigned int shmem_sz = (s_data_len + s_mask_out_len + s_merged_scan_mask_out_len + s_mask_out_sums_len + s_scan_mask_out_sums_len) * sizeof(unsigned int); // for every 2 bits from LSB to MSB: // block-wise radix sort (write blocks back to global memory) for (unsigned int shift_width = 0; shift_width <= 30; shift_width += 2) { gpu_radix_sort_local<<<grid_sz, block_sz, shmem_sz>>>(d_out, d_prefix_sums, d_block_sums, shift_width, d_in, d_in_len, max_elems_per_block, d_idx_in, d_idx_out); //unsigned int* h_test = new unsigned int[d_in_len]; //checkCudaErrors(cudaMemcpy(h_test, d_in, sizeof(unsigned int) * d_in_len, cudaMemcpyDeviceToHost)); //for (unsigned int i = 0; i < d_in_len; ++i) // std::cout << h_test[i] << " "; //std::cout << std::endl; //delete[] h_test; // scan global block sum array sum_scan_blelloch(d_scan_block_sums, d_block_sums, d_block_sums_len); // scatter/shuffle block-wise sorted array to final positions gpu_glbl_shuffle<<<grid_sz, block_sz>>>(d_in, d_out, d_scan_block_sums, d_prefix_sums, shift_width, d_in_len, max_elems_per_block, d_idx_out, d_idx_in); } checkCudaErrors(cudaMemcpy(d_out, d_in, sizeof(unsigned int) * d_in_len, cudaMemcpyDeviceToDevice)); inverse_float_flip<<<grid_sz, block_sz>>>(d_f_out, d_out, d_in_len); if (d_idx_out != NULL) checkCudaErrors(cudaMemcpy(d_idx_out, d_idx_in, sizeof(unsigned int) * d_in_len, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaFree(d_scan_block_sums)); checkCudaErrors(cudaFree(d_block_sums)); checkCudaErrors(cudaFree(d_prefix_sums)); }
b4c934a1566f5e678fdbb44e87c4cfa780bcf015.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_helper.h" __global__ void scale_kernel(float* ptr, coord_t size, float a, float b) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = (b - a) * ptr[i] + a; } } __global__ void ones_kernel(float* ptr, coord_t size) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = 1.0f; } } template<typename DT> __global__ void assign_kernel(DT* ptr, coord_t size, DT value) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = value; } } template<typename DT> __global__ void copy_kernel(DT* dst, const DT* src, coord_t size) { CUDA_KERNEL_LOOP(i, size) { dst[i] = src[i]; } } __global__ void reluBackward(float *grad_ptr, const float *output, int n) { CUDA_KERNEL_LOOP(i, n) { grad_ptr[i] = (output[i] > 0.0f) ? grad_ptr[i] : 0; } } __global__ void apply_add(float *data_ptr, const float *replica_ptr, size_t size) { CUDA_KERNEL_LOOP(i, size) { data_ptr[i] += replica_ptr[i]; } } __global__ void apply_add_with_scale(float *data_ptr, const float *grad_ptr, size_t size, float scale) { CUDA_KERNEL_LOOP(i, size) { data_ptr[i] += grad_ptr[i] * scale; } } __host__ void updateGAS(float* para_ptr, const float* grad_ptr, size_t replica_size, int num_replica, float learning_rate) { // Step 1: gater gradients to the first replica for (int i = 1; i < num_replica; i++) { const float *replica = grad_ptr + i * replica_size; hipLaunchKernelGGL(( apply_add), dim3(GET_BLOCKS(replica_size)), dim3(CUDA_NUM_THREADS), 0, 0, (float*)grad_ptr, replica, replica_size); } // Step 2: scale the first replica float scale_factor = 1.0f / num_replica * (-learning_rate); hipLaunchKernelGGL(( apply_add_with_scale), dim3(GET_BLOCKS(replica_size)), dim3(CUDA_NUM_THREADS), 0, 0, para_ptr, grad_ptr, replica_size, scale_factor); } template __global__ void assign_kernel<float>(float* ptr, coord_t size, float value); template __global__ void assign_kernel<int32_t>(int32_t* ptr, coord_t size, int32_t value); template __global__ void assign_kernel<int64_t>(int64_t* ptr, coord_t size, int64_t value); template __global__ void copy_kernel<float>(float* dst, const float* src, coord_t size);
b4c934a1566f5e678fdbb44e87c4cfa780bcf015.cu
#include "cuda_helper.h" __global__ void scale_kernel(float* ptr, coord_t size, float a, float b) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = (b - a) * ptr[i] + a; } } __global__ void ones_kernel(float* ptr, coord_t size) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = 1.0f; } } template<typename DT> __global__ void assign_kernel(DT* ptr, coord_t size, DT value) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = value; } } template<typename DT> __global__ void copy_kernel(DT* dst, const DT* src, coord_t size) { CUDA_KERNEL_LOOP(i, size) { dst[i] = src[i]; } } __global__ void reluBackward(float *grad_ptr, const float *output, int n) { CUDA_KERNEL_LOOP(i, n) { grad_ptr[i] = (output[i] > 0.0f) ? grad_ptr[i] : 0; } } __global__ void apply_add(float *data_ptr, const float *replica_ptr, size_t size) { CUDA_KERNEL_LOOP(i, size) { data_ptr[i] += replica_ptr[i]; } } __global__ void apply_add_with_scale(float *data_ptr, const float *grad_ptr, size_t size, float scale) { CUDA_KERNEL_LOOP(i, size) { data_ptr[i] += grad_ptr[i] * scale; } } __host__ void updateGAS(float* para_ptr, const float* grad_ptr, size_t replica_size, int num_replica, float learning_rate) { // Step 1: gater gradients to the first replica for (int i = 1; i < num_replica; i++) { const float *replica = grad_ptr + i * replica_size; apply_add<<<GET_BLOCKS(replica_size), CUDA_NUM_THREADS>>>( (float*)grad_ptr, replica, replica_size); } // Step 2: scale the first replica float scale_factor = 1.0f / num_replica * (-learning_rate); apply_add_with_scale<<<GET_BLOCKS(replica_size), CUDA_NUM_THREADS>>>( para_ptr, grad_ptr, replica_size, scale_factor); } template __global__ void assign_kernel<float>(float* ptr, coord_t size, float value); template __global__ void assign_kernel<int32_t>(int32_t* ptr, coord_t size, int32_t value); template __global__ void assign_kernel<int64_t>(int64_t* ptr, coord_t size, int64_t value); template __global__ void copy_kernel<float>(float* dst, const float* src, coord_t size);
4a1206fa699720acb784ad0b6f5e38efd7fd6936.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Demonstrate CUTLASS debugging tool for dumping fragments and shared memory */ /////////////////////////////////////////////////////////////////////////////////////////////////// // Standard Library includes #include <iostream> // // CUTLASS includes // #include "cutlass/aligned_buffer.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h" #include "cutlass/util/debug.h" #include "cutlass/util/device_dump.h" #define EXAMPLE_MATRIX_ROW 64 #define EXAMPLE_MATRIX_COL 32 /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, typename GmemIterator, typename SmemIterator> __global__ void kernel_dump(typename GmemIterator::Params params, typename GmemIterator::TensorRef ref) { extern __shared__ Element shared_storage[]; // Construct the global iterator and load the data to the fragments. int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; GmemIterator gmem_iterator(params, ref.data(), {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}, tb_thread_id); typename GmemIterator::Fragment frag; frag.clear(); gmem_iterator.load(frag); // Call dump_fragment() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nAll threads dump all the elements:\n"); cutlass::debug::dump_fragment(frag); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps all the elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements with a stride of 8:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16, /*S = */ 8); // Construct the shared iterator and store the data to the shared memory. SmemIterator smem_iterator( typename SmemIterator::TensorRef( {shared_storage, SmemIterator::Layout::packed( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL})}), tb_thread_id); smem_iterator.store(frag); // Call dump_shmem() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements:\n"); cutlass::debug::dump_shmem(shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements with a stride of 8:\n"); cutlass::debug::dump_shmem( shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL, /*S = */ 8); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point for dump_reg_shmem example. // // usage: // // 02_dump_reg_shmem // int main() { // Initialize a 64x32 column major matrix with sequential data (1,2,3...). using Element = cutlass::half_t; using Layout = cutlass::layout::ColumnMajor; cutlass::HostTensor<Element, Layout> matrix( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}); cutlass::reference::host::BlockFillSequential(matrix.host_data(), matrix.capacity()); // Dump the matrix. std::cout << "Matrix:\n" << matrix.host_view() << "\n"; // Copy the matrix to the device. matrix.sync_device(); // Define a global iterator, a shared iterator and their thread map. using ThreadMap = cutlass::transform::PitchLinearWarpRakedThreadMap< cutlass::layout::PitchLinearShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, 32, cutlass::layout::PitchLinearShape<8, 4>, 8>; using GmemIterator = cutlass::transform::threadblock::PredicatedTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, Layout, 1, ThreadMap>; typename GmemIterator::Params params(matrix.layout()); using SmemIterator = cutlass::transform::threadblock::RegularTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<16, 64>, 1, ThreadMap>; dim3 grid(1, 1); dim3 block(32, 1, 1); int smem_size = int(sizeof(Element) * EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); hipLaunchKernelGGL(( kernel_dump<Element, GmemIterator, SmemIterator>) , dim3(grid), dim3(block), smem_size, 0, params, matrix.device_ref()); hipError_t result = hipDeviceSynchronize(); if (result != hipSuccess) { std::cout << "Failed" << std::endl; } return (result == hipSuccess ? 0 : -1); } ///////////////////////////////////////////////////////////////////////////////////////////////////
4a1206fa699720acb784ad0b6f5e38efd7fd6936.cu
/*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Demonstrate CUTLASS debugging tool for dumping fragments and shared memory */ /////////////////////////////////////////////////////////////////////////////////////////////////// // Standard Library includes #include <iostream> // // CUTLASS includes // #include "cutlass/aligned_buffer.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h" #include "cutlass/util/debug.h" #include "cutlass/util/device_dump.h" #define EXAMPLE_MATRIX_ROW 64 #define EXAMPLE_MATRIX_COL 32 /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, typename GmemIterator, typename SmemIterator> __global__ void kernel_dump(typename GmemIterator::Params params, typename GmemIterator::TensorRef ref) { extern __shared__ Element shared_storage[]; // Construct the global iterator and load the data to the fragments. int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; GmemIterator gmem_iterator(params, ref.data(), {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}, tb_thread_id); typename GmemIterator::Fragment frag; frag.clear(); gmem_iterator.load(frag); // Call dump_fragment() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nAll threads dump all the elements:\n"); cutlass::debug::dump_fragment(frag); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps all the elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements with a stride of 8:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16, /*S = */ 8); // Construct the shared iterator and store the data to the shared memory. SmemIterator smem_iterator( typename SmemIterator::TensorRef( {shared_storage, SmemIterator::Layout::packed( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL})}), tb_thread_id); smem_iterator.store(frag); // Call dump_shmem() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements:\n"); cutlass::debug::dump_shmem(shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements with a stride of 8:\n"); cutlass::debug::dump_shmem( shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL, /*S = */ 8); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point for dump_reg_shmem example. // // usage: // // 02_dump_reg_shmem // int main() { // Initialize a 64x32 column major matrix with sequential data (1,2,3...). using Element = cutlass::half_t; using Layout = cutlass::layout::ColumnMajor; cutlass::HostTensor<Element, Layout> matrix( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}); cutlass::reference::host::BlockFillSequential(matrix.host_data(), matrix.capacity()); // Dump the matrix. std::cout << "Matrix:\n" << matrix.host_view() << "\n"; // Copy the matrix to the device. matrix.sync_device(); // Define a global iterator, a shared iterator and their thread map. using ThreadMap = cutlass::transform::PitchLinearWarpRakedThreadMap< cutlass::layout::PitchLinearShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, 32, cutlass::layout::PitchLinearShape<8, 4>, 8>; using GmemIterator = cutlass::transform::threadblock::PredicatedTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, Layout, 1, ThreadMap>; typename GmemIterator::Params params(matrix.layout()); using SmemIterator = cutlass::transform::threadblock::RegularTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<16, 64>, 1, ThreadMap>; dim3 grid(1, 1); dim3 block(32, 1, 1); int smem_size = int(sizeof(Element) * EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); kernel_dump<Element, GmemIterator, SmemIterator> <<<grid, block, smem_size, 0>>>(params, matrix.device_ref()); cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cout << "Failed" << std::endl; } return (result == cudaSuccess ? 0 : -1); } ///////////////////////////////////////////////////////////////////////////////////////////////////
f5f408f2f774d06b8e36fdd56188785d3afe58a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Raster.cuh" #include "TensorflowOp_generated.h" #include <hip/hip_fp16.h> #include "MNNCUDAFunction.cuh" namespace MNN { namespace CUDA { // Blit don't care offset template <typename T> __global__ void blitRegion(const T *inputO, T *outputO, int count, int loopCount, const int32_t* dstIndice, const int32_t* srcIndice, int dstUseIndice, int srcUseIndice, int dstStep, int srcStep,int srcLimit, int sizeZ, int sizeY, int sizeX, int strideZ, int strideY, int strideX, int dstStrideZ, int dstStrideY, int dstStrideX ) { int total = count; for (size_t fuseIndex = blockIdx.x * blockDim.x + threadIdx.x; fuseIndex < total; fuseIndex += blockDim.x * gridDim.x) { int x = fuseIndex % sizeX; int temp = fuseIndex / sizeX; int y = temp % sizeY; temp = temp / sizeY; int z = temp % sizeZ; int i = temp / sizeZ; int srcOffsetO = i * srcStep; if (srcUseIndice >= 0) { srcOffsetO = srcIndice[i] * srcStep; } int dstOffsetO = i * dstStep; if (dstUseIndice >= 0) { dstOffsetO = dstIndice[i] * dstStep; } if (srcOffsetO >= 0 && srcOffsetO < srcLimit) { const T* input = inputO + srcOffsetO; T* output = outputO + dstOffsetO; int srcOffset = z * strideZ + y * strideY + x * strideX; int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX; output[dstOffset] = input[srcOffset]; } else { T* output = outputO + dstOffsetO; int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX; output[dstOffset] = (T)0; } } } void BlitWithIndice(uint8_t* output, const uint8_t* input, const int32_t* dstIndices, const int32_t* srcIndices, int dstUseIndice, int srcUseIndice, int loopCount, int dstStep, int srcStep, int srcLimit, const Tensor::InsideDescribe::Region& reg, int bytes, CUDARuntime* runtime) { int count = loopCount * reg.size[0]*reg.size[1]*reg.size[2]; int block_num = runtime->blocks_num(count); int threads_num = ALIMIN(runtime->threads_num(), count); switch (bytes) { case 4: hipLaunchKernelGGL(( blitRegion), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, count, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; case 2: hipLaunchKernelGGL(( blitRegion), dim3(block_num), dim3(threads_num), 0, 0, (const int16_t*)input, (int16_t*)output, count, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; case 1: hipLaunchKernelGGL(( blitRegion), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output, count, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; default: break; } } #define UNARY_FUNC(Name, Func)\ template<typename T>\ __global__ void Name(const T *input, T *output,\ int count,\ DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,\ int strideZ, int strideY, int strideX,\ int dstStrideZ, int dstStrideY, int dstStrideX\ ) { \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {\ int ix, tmp, iy, iz;\ sizeX.divmod(i, tmp, ix);\ sizeY.divmod(tmp, iz, iy);\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ T x = input[srcOffset];\ output[dstOffset] = Func;\ }\ }\ template<typename T>\ __global__ void FLOAT##Name(const T *input, T *output,\ int count,\ DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,\ int strideZ, int strideY, int strideX,\ int dstStrideZ, int dstStrideY, int dstStrideX\ ) { \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {\ int ix, tmp, iy, iz;\ sizeX.divmod(i, tmp, ix);\ sizeY.divmod(tmp, iz, iy);\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ float x = (float)input[srcOffset];\ output[dstOffset] = (float)(Func);\ }\ }\ template<typename T> __global__ void blit_2_float(const T *input, T *output, int count, DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX, int strideZ, int strideY, int dstStrideZ, int dstStrideY ) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) { int ix, tmp, iy, iz; sizeX.divmod(i, tmp, ix); sizeY.divmod(tmp, iz, iy); int srcOffset = iz * strideZ + iy * strideY + (ix << 1); int dstOffset = iz * dstStrideZ + iy * dstStrideY + (ix << 1); int2 * dstF = (int2 *)(output+dstOffset); dstF[0] = ((int2 *)(input+srcOffset))[0]; } } template<typename T> __global__ void blit_2_half(const T *input, T *output, int count, DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX, int strideZ, int strideY, int dstStrideZ, int dstStrideY ) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) { int ix, tmp, iy, iz; sizeX.divmod(i, tmp, ix); sizeY.divmod(tmp, iz, iy); int srcOffset = iz * strideZ + iy * strideY + (ix << 1); int dstOffset = iz * dstStrideZ + iy * dstStrideY + (ix << 1); int* dstF = (int *)(output+dstOffset); dstF[0] = ((int *)(input+srcOffset))[0]; } } struct Bytes512 { int4 x[4]; }; UNARY_FUNC(blit, x); UNARY_FUNC(ABS, abs(x)); UNARY_FUNC(EXP, exp(x)); UNARY_FUNC(NEG, -x); UNARY_FUNC(RECIPROCAL, (1.0)/x); UNARY_FUNC(FLOOR, floor(x)); UNARY_FUNC(CEIL, ceil(x)); UNARY_FUNC(SQUARE, x*x); UNARY_FUNC(SQRT, (T)(sqrt((float)x))); UNARY_FUNC(RSQRT, (T)(rsqrt((float)x))); UNARY_FUNC(LOG, (T)(log((float)x))); UNARY_FUNC(SIN, (T)(sin((float)x))); UNARY_FUNC(COS, (T)(cos((float)x))); UNARY_FUNC(TAN, (T)(tan((float)x))); UNARY_FUNC(ASIN, (T)(asin((float)x))); UNARY_FUNC(ACOS, (T)(acos((float)x))); UNARY_FUNC(ATAN, (T)(atan((float)x))); UNARY_FUNC(LOG1P, log(1+x)); UNARY_FUNC(TANH, tanh(x)); UNARY_FUNC(SIGMOID, 1./(1.+exp(-x))); UNARY_FUNC(EXPM1, exp(x)-1); UNARY_FUNC(ATANH, atanh(x)); UNARY_FUNC(ACOSH, acosh(x)); UNARY_FUNC(COSH, cosh(x)); UNARY_FUNC(SIGN, x > 0 ? 1 : (x<0 ? -1 : 0)); UNARY_FUNC(ROUND, round(x)); UNARY_FUNC(SINH, sinh(x)); UNARY_FUNC(ASINH, asinh(x)); UNARY_FUNC(HARDSWISH, 1.0/6.0 * x * min(max(x+3.0, 0.0), 6.0)); UNARY_FUNC(ERF, erf(x)); UNARY_FUNC(ERFC, erfc(x)); UNARY_FUNC(ERFINV, erfinv(x)); UNARY_FUNC(GELU, (1.0f + tanh(0.79788458f * (0.044715f * x * x * x + x))) * x * 0.5f); UNARY_FUNC(GELU_STANDARD, (erf(x*0.7071067932881648f)+1.f)*x*0.5); void RasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime) { int count = size[0] * size[1] * size[2]; // MNN_PRINT("blit info size:%d-%d-%d, srcStride:%d-%d-%d, dstStride:%d-%d-%d\n", size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); bool isThirdSizeVector = (size[2] % 2 == 0 && srcStride[2] == 1 && dstStride[2] == 1); bool isSecondSizeVector = (size[1] % 2 == 0 && srcStride[1] == 1 && dstStride[1] == 1) && (size[2] == 1 && srcStride[2] == 1 && dstStride[2] == 1); bool isFirstSizeVector = (size[0] % 2 == 0 && srcStride[0] == 1 && dstStride[0] == 1) && (size[1] == 1 && srcStride[1] == 1 && dstStride[1] == 1) && (size[2] == 1 && srcStride[2] == 1 && dstStride[2] == 1); bool isSizeVector = isThirdSizeVector || isSecondSizeVector || isFirstSizeVector; if(count > 16384 && isSizeVector) { int32_t newSize[3], newSrcStride[3], newDstStride[3]; newSize[0] = size[0]; newSize[1] = size[1]; newSize[2] = size[2]; newSrcStride[0] = srcStride[0]; newSrcStride[1] = srcStride[1]; newSrcStride[2] = srcStride[2]; newDstStride[0] = dstStride[0]; newDstStride[1] = dstStride[1]; newDstStride[2] = dstStride[2]; if(isSecondSizeVector) { /* size : [size_0, size_1, 1] srcStride : [ss_0, 1, 1] dstStride : [ds_0, 1, 1] --> newSize: [1, size_0, size_1] newSrcStride: [1, ss_0, 1] newDstStride: [1, ds_0, 1] */ newSize[2] = size[1]; newSize[1] = size[0]; newSize[0] = 1; newSrcStride[1] = srcStride[0]; newSrcStride[0] = 1; newDstStride[1] = dstStride[0]; newDstStride[0] = 1; } if(isFirstSizeVector) { /* size : [size_0, 1, 1] srcStride : [1, 1, 1] dstStride : [1, 1, 1] --> newSize: [1, 1, size_0] newSrcStride: [1, 1, 1] newDstStride: [1, 1, 1] */ newSize[2] = size[0]; newSize[0] = 1; } DivModFast new_sz(newSize[0]); DivModFast new_sy(newSize[1]); DivModFast new_sx(newSize[2]/2); int newCount = count / 2; int block_num = runtime->blocks_num(newCount); int threads_num = runtime->threads_num(); // Forbid addresss misalign if(bytes == 4 && reinterpret_cast<std::uintptr_t>(input) % 8 == 0 && reinterpret_cast<std::uintptr_t>(output) % 8 == 0) { hipLaunchKernelGGL(( blit_2_float), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, newCount, new_sz, new_sy, new_sx, newSrcStride[0], newSrcStride[1], newDstStride[0], newDstStride[1]); checkKernelErrors; return; } else if(bytes == 2 && reinterpret_cast<std::uintptr_t>(input) % 4 == 0 && reinterpret_cast<std::uintptr_t>(output) % 4 == 0) { hipLaunchKernelGGL(( blit_2_half), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)output, newCount, new_sz, new_sy, new_sx, newSrcStride[0], newSrcStride[1], newDstStride[0], newDstStride[1]); checkKernelErrors; return; } } DivModFast sz(size[0]); DivModFast sy(size[1]); DivModFast sx(size[2]); int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); switch (bytes) { case 64: hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const Bytes512*)input, (Bytes512*)output, count, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 32: hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const double4*)input, (double4*)output, count, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 4: hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, count, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 2: hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const int16_t*)input, (int16_t*)output, count, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 1: hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output, count, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; default: break; } checkKernelErrors; } template<typename T0, typename T1> __global__ void fuseblit(const T0 *input, T1 *output, int fuseNum, int count, const int32_t* sliceOffset, DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX, int strideZ, int strideY, int strideX, int dstStrideZ, int dstStrideY, int dstStrideX ) { size_t c = blockIdx.x * blockDim.x + threadIdx.x; for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) { int ix, tmp, iy, tmp2, iz, j; sizeX.divmod(c, tmp, ix); sizeY.divmod(tmp, tmp2, iy); sizeZ.divmod(tmp2, j, iz); int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + ix * strideX; int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX; output[dst_offset] = input[src_offset]; } } __global__ void fuseblit_4(const int32_t *input, int32_t *output, int fuseNum, int count, const int32_t* sliceOffset, DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX, int strideZ, int strideY, int dstStrideZ, int dstStrideY ) { for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) { int ix, tmp, iy, tmp2, iz, j; sizeX.divmod(c, tmp, ix); sizeY.divmod(tmp, tmp2, iy); sizeZ.divmod(tmp2, j, iz); int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + (ix << 2); int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + (ix << 2); int4* srcF = (int4 *)(input + src_offset); int4* dstF = (int4 *)(output + dst_offset); dstF[0] = srcF[0]; } } __global__ void fuseblit_half_4(const int16_t *input, int16_t *output, int fuseNum, int count, const int32_t* sliceOffset, DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX, int strideZ, int strideY, int dstStrideZ, int dstStrideY ) { for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) { int ix, tmp, iy, tmp2, iz, j; sizeX.divmod(c, tmp, ix); sizeY.divmod(tmp, tmp2, iy); sizeZ.divmod(tmp2, j, iz); int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + (ix << 2); int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + (ix << 2); int2* srcF = (int2 *)(input + src_offset); int2* dstF = (int2 *)(output + dst_offset); dstF[0] = srcF[0]; } } void FuseRasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int fuseNum, void* sliceOffset, int bytes, CUDARuntime* runtime, int unit) { DivModFast sz(size[0]); DivModFast sy(size[1]); int count = fuseNum * size[0] * size[1] * size[2]; bool strideC4Support = srcStride[0] % 4 == 0 && srcStride[1] % 4 == 0 && dstStride[0] % 4 == 0 && dstStride[1] % 4 == 0; if(size[2] % 4 == 0 && count > 16384 && srcStride[2] == 1 && dstStride[2] == 1 && unit == 4 && strideC4Support) { int xL4 = size[2] / 4; int countC4 = fuseNum * size[0] * size[1] * xL4; int numBlocks = runtime->blocks_num(countC4); int threadsPerBlock = runtime->threads_num(); DivModFast sx_4(xL4); if(bytes == 4) { hipLaunchKernelGGL(( fuseblit_4), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, (const int32_t*)input, (int32_t*)output, fuseNum, countC4, (const int32_t*)sliceOffset, sz, sy, sx_4, srcStride[0], srcStride[1], dstStride[0], dstStride[1]); return; } else if(bytes == 2){ hipLaunchKernelGGL(( fuseblit_half_4), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, (const int16_t*)input, (int16_t*)output, fuseNum, countC4, (const int32_t*)sliceOffset, sz, sy, sx_4, srcStride[0], srcStride[1], dstStride[0], dstStride[1]); return; } } DivModFast sx(size[2]); int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); switch (bytes) { case 64: hipLaunchKernelGGL(( fuseblit), dim3(block_num), dim3(threads_num), 0, 0, (const Bytes512*)input, (Bytes512*)output, fuseNum, count, (const int32_t*)sliceOffset, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 16: hipLaunchKernelGGL(( fuseblit), dim3(block_num), dim3(threads_num), 0, 0, (const int4*)input, (int4*)output, fuseNum, count, (const int32_t*)sliceOffset, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 4: hipLaunchKernelGGL(( fuseblit), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, fuseNum, count, (const int32_t*)sliceOffset, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 2: hipLaunchKernelGGL(( fuseblit), dim3(block_num), dim3(threads_num), 0, 0, (const int16_t*)input, (int16_t*)output, fuseNum, count, (const int32_t*)sliceOffset, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 1: hipLaunchKernelGGL(( fuseblit), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output, fuseNum, count, (const int32_t*)sliceOffset, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; default: break; } //printf("%s, %d-%d-%d-%d\n", hipGetErrorString(hipGetLastError()), numBlocks.x, numBlocks.y, threadsPerBlock.x, threadsPerBlock.y); } template<typename T0, typename T1> __global__ void fuseblitLimit(const T0 *input, T1 *output, const FuseRegion* info, const int32_t* sliceOffset ) { int sizeZ = info->size[0]; int sizeY = info->size[1]; int sizeX = info->size[2]; int strideZ = info->srcStride[0]; int strideY = info->srcStride[1]; int strideX = info->srcStride[2]; int dstStrideZ = info->dstStride[0]; int dstStrideY = info->dstStride[1]; int dstStrideX = info->dstStride[2]; int fuseNum = info->fuseNumber; int count = fuseNum*sizeZ * sizeY * sizeX; for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < (count); c += blockDim.x * gridDim.x) { int j = c / (sizeZ * sizeY * sizeX); int i = c % (sizeZ * sizeY * sizeX); int ix = i % sizeX; int tmp = i / sizeX; int iy = tmp % sizeY; int iz = tmp / sizeY; const int* srcOffsetPtr = sliceOffset + 8 * j; const int* dstOffsetPtr = sliceOffset + 8 * j + 4; T0 srcValue = (T0)0; int src_offset = srcOffsetPtr[3] + iz * strideZ + iy * strideY + ix * strideX; if (srcOffsetPtr[0] > iz && srcOffsetPtr[1] > iy && srcOffsetPtr[2] > ix) { srcValue = input[src_offset]; } int dst_offset = dstOffsetPtr[3] + iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX; //printf("%d -> %d - %f\n", src_offset, dst_offset, srcValue); if (dstOffsetPtr[0] > iz && dstOffsetPtr[1] > iy && dstOffsetPtr[2] > ix) { output[dst_offset] = srcValue; } } } void FuseRasterBlitFloatToHalf(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) { auto& prop = runtime->prop(); int threads_num = prop.maxThreadsPerBlock; int block_num = prop.multiProcessorCount; hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (half*)output, info, (const int32_t*)sliceOffset); } void FuseRasterBlitHalfToFloat(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) { auto& prop = runtime->prop(); int threads_num = prop.maxThreadsPerBlock; int block_num = prop.multiProcessorCount; hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (float*)output, info, (const int32_t*)sliceOffset); } void FuseRasterBlitFloatToFloat(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) { auto& prop = runtime->prop(); int threads_num = prop.maxThreadsPerBlock; int block_num = prop.multiProcessorCount; hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, info, (const int32_t*)sliceOffset); } void FuseRasterBlitCommon(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime, int bytes) { auto& prop = runtime->prop(); int threads_num = prop.maxThreadsPerBlock; int block_num = prop.multiProcessorCount; switch (bytes) { case 4: hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, info, (const int32_t*)sliceOffset); break; case 2: hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)output, info, (const int32_t*)sliceOffset); break; case 1: hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output, info, (const int32_t*)sliceOffset); break; default: break; } } void UnaryBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); DivModFast sz(size[0]); DivModFast sy(size[1]); DivModFast sx(size[2]); // TODO: Support FP16 #define COMPUTE(TYPE)\ if (opType == MNN::UnaryOpOperation_##TYPE ) {\ if(bytes==2) {\ hipLaunchKernelGGL(( FLOAT##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)output,\ count, \ sz, sy, sx,\ srcStride[0], srcStride[1], srcStride[2],\ dstStride[0], dstStride[1], dstStride[2]);\ } else {\ hipLaunchKernelGGL(( TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output,\ count, \ sz, sy, sx,\ srcStride[0], srcStride[1], srcStride[2],\ dstStride[0], dstStride[1], dstStride[2]);\ }\ return;\ }\ COMPUTE(ABS); COMPUTE(NEG); COMPUTE(FLOOR); COMPUTE(CEIL); COMPUTE(SQUARE); COMPUTE(SQRT); COMPUTE(RSQRT); COMPUTE(EXP); COMPUTE(LOG); COMPUTE(SIN); COMPUTE(COS); COMPUTE(TAN); COMPUTE(GELU); COMPUTE(GELU_STANDARD); COMPUTE(ASIN); COMPUTE(ACOS); COMPUTE(ATAN); COMPUTE(RECIPROCAL); COMPUTE(LOG1P); COMPUTE(TANH); COMPUTE(SIGMOID); COMPUTE(EXPM1); COMPUTE(ACOSH); COMPUTE(ATANH); COMPUTE(SIGN); COMPUTE(COSH); COMPUTE(ROUND); COMPUTE(SINH); COMPUTE(ASINH); COMPUTE(HARDSWISH); COMPUTE(ERF); COMPUTE(ERFC); COMPUTE(ERFINV); #undef COMPUTE } #define BINARY_FUNC(Name, Func)\ template<typename TIn, typename TOut>\ __global__ void Binary##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY, int strideX,\ int strideZ1, int strideY1, int strideX1,\ int dstStrideZ, int dstStrideY, int dstStrideX, int activationType\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int ix = i % sizeX;\ int tmp = i / sizeX;\ int iy = tmp % sizeY;\ int iz = tmp / sizeY;\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix * strideX1;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ TIn x = input0[srcOffset];\ TIn y = input1[srcOffset1];\ TOut val = (TOut)(Func);\ if(activationType == 1) {\ val = (val < (TOut)0 ? (TOut)0 : val);\ }\ output[dstOffset] = val;\ }\ }\ #define BINARY_FUNC_FLOATMID(Name, Func)\ template<typename TIn, typename TOut>\ __global__ void BinaryMid##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY, int strideX,\ int strideZ1, int strideY1, int strideX1,\ int dstStrideZ, int dstStrideY, int dstStrideX, int activationType,\ DivModFast d_sizeY, DivModFast d_sizeX\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int ix, tmp, iy, iz;\ d_sizeX.divmod(i, tmp, ix);\ d_sizeY.divmod(tmp, iz, iy);\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix * strideX1;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ float x = input0[srcOffset];\ float y = input1[srcOffset1];\ float val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset] = val;\ }\ }\ template<typename TIn, typename TOut>\ __global__ void BinaryMid4_##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY,\ int strideZ1, int strideY1,\ int dstStrideZ, int dstStrideY, int activationType,\ DivModFast d_sizeY, DivModFast d_sizeX,\ bool inp0Broadcast, bool inp1Broadcast\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int ix, tmp, iy, iz;\ d_sizeX.divmod(i, tmp, ix);\ d_sizeY.divmod(tmp, iz, iy);\ ix = ix << 2;\ int srcOffset = iz * strideZ + iy * strideY + ix;\ int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix;\ float4 xx = inp0Broadcast ? make_float4(input0[srcOffset-ix],input0[srcOffset-ix], input0[srcOffset-ix], input0[srcOffset-ix]) : ((float4 *)(input0+srcOffset))[0];\ float4 yy = inp1Broadcast ? make_float4(input1[srcOffset1-ix],input1[srcOffset1-ix], input1[srcOffset1-ix], input1[srcOffset1-ix]) :((float4 *)(input1+srcOffset1))[0];\ float x = xx.x;\ float y = yy.x;\ float val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset] = val;\ x = xx.y;\ y = yy.y;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+1] = val;\ x = xx.z;\ y = yy.z;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+2] = val;\ x = xx.w;\ y = yy.w;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+3] = val;\ }\ }\ template<typename TIn, typename TOut>\ __global__ void BinaryMidHalf2_##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY,\ int strideZ1, int strideY1,\ int dstStrideZ, int dstStrideY, int activationType,\ DivModFast d_sizeY, DivModFast d_sizeX,\ bool inp0Broadcast, bool inp1Broadcast\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int ix, tmp, iy, iz;\ d_sizeX.divmod(i, tmp, ix);\ d_sizeY.divmod(tmp, iz, iy);\ ix = ix << 1;\ int srcOffset = iz * strideZ + iy * strideY + ix;\ int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix;\ half2 xx = inp0Broadcast ? make_half2(input0[srcOffset-ix], input0[srcOffset-ix]) : ((half2 *)(input0+srcOffset))[0];\ half2 yy = inp1Broadcast ? make_half2(input1[srcOffset1-ix], input1[srcOffset1-ix]) : ((half2 *)(input1+srcOffset1))[0];\ float x = xx.x;\ float y = yy.x;\ float val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset] = val;\ x = xx.y;\ y = yy.y;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+1] = val;\ }\ }\ template<typename TIn, typename TOut>\ __global__ void BinaryMidLinear##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ,\ int strideZ,\ int strideZ1,\ int dstStrideZ,\ int activationType\ ) { \ int count = sizeZ;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int iz = i;\ int srcOffset = iz * strideZ;\ int srcOffset1 = iz * strideZ1;\ int dstOffset = iz * dstStrideZ;\ float x = input0[srcOffset];\ float y = input1[srcOffset1];\ float val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset] = (TOut)val;\ }\ }\ #define BINARY_FUNC_FLOATMID4(Name, Func)\ template<typename TIn, typename TOut>\ __global__ void BinaryMidLinear4_##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int count_4, int activationType,\ bool inp0Broadcast, bool inp1Broadcast\ ) { \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count_4); i += blockDim.x * gridDim.x) {\ int iz = i;\ int srcOffset = iz << 2;\ int srcOffset1 = iz << 2;\ int dstOffset = iz << 2;\ float4 xx = inp0Broadcast ? make_float4(input0[0], input0[0], input0[0], input0[0]) : ((float4 *)(input0+srcOffset))[0];\ float4 yy = inp1Broadcast ? make_float4(input1[0], input1[0], input1[0], input1[0]) : ((float4 *)(input1+srcOffset1))[0];\ float x = xx.x;\ float y = yy.x;\ TOut val = (TOut)(Func);\ if(activationType == 1) {\ val = (val < (TOut)0 ? (TOut)0 : val);\ }\ output[dstOffset] = val;\ x = xx.y;\ y = yy.y;\ val = (TOut)(Func);\ if(activationType == 1) {\ val = (val < (TOut)0 ? (TOut)0 : val);\ }\ output[dstOffset+1] = val;\ x = xx.z;\ y = yy.z;\ val = (TOut)(Func);\ if(activationType == 1) {\ val = (val < (TOut)0 ? (TOut)0 : val);\ }\ output[dstOffset+2] = val;\ x = xx.w;\ y = yy.w;\ val = (TOut)(Func);\ if(activationType == 1) {\ val = (val < (TOut)0 ? (TOut)0 : val);\ }\ output[dstOffset+3] = val;\ }\ }\ template<typename TIn, typename TOut>\ __global__ void BinaryMidLinearHalf4_##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int count_4, int activationType,\ bool inp0Broadcast, bool inp1Broadcast\ ) { \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count_4); i += blockDim.x * gridDim.x) {\ int iz = i;\ int srcOffset = iz << 2;\ int srcOffset1 = iz << 2;\ int dstOffset = iz << 2;\ half2 xx = inp0Broadcast ? make_half2(input0[0], input0[0]) : ((half2 *)(input0+srcOffset))[0];\ half2 yy = inp1Broadcast ? make_half2(input1[0], input1[0]) : ((half2 *)(input1+srcOffset1))[0];\ float x = (float)xx.x;\ float y = (float)yy.x;\ float val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset] = (TOut)val;\ x = (float)xx.y;\ y = (float)yy.y;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+1] = (TOut)val;\ xx = inp0Broadcast ? make_half2(input0[0], input0[0]) : ((half2 *)(input0+srcOffset))[1];\ yy = inp1Broadcast ? make_half2(input1[0], input1[0]) : ((half2 *)(input1+srcOffset1))[1];\ x = (float)xx.x;\ y = (float)yy.x;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+2] = (TOut)val;\ x = (float)xx.y;\ y = (float)yy.y;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+3] = (TOut)val;\ }\ }\ #define sign(y) ((y) > 0 ? 1 : ((y) < 0 ? -1 : 0)) BINARY_FUNC(ADD, x+y); BINARY_FUNC(SUB, x-y); BINARY_FUNC(MUL, x*y); BINARY_FUNC(DIV, x/y); BINARY_FUNC(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001)); BINARY_FUNC(MINIMUM, min(x, y)); BINARY_FUNC(MAXIMUM, max(x, y)); BINARY_FUNC(GREATER, x > y ? 1 : 0); BINARY_FUNC(LESS, x < y ? 1 : 0); BINARY_FUNC(LESS_EQUAL, x <= y ? 1 : 0); BINARY_FUNC(GREATER_EQUAL, x >= y ? 1 : 0); BINARY_FUNC(EQUAL, x == y ? 1 : 0); BINARY_FUNC(NOTEQUAL, x != y ? 1 : 0); BINARY_FUNC(FLOORDIV, floor(x / y)); BINARY_FUNC(FLOORMOD, x - floor(x / y) * y); BINARY_FUNC(SquaredDifference, (x-y)*(x-y)); BINARY_FUNC(POW, pow(x, y)); BINARY_FUNC(ATAN2, atan2(x, y)); BINARY_FUNC(MOD, (x % y)); BINARY_FUNC(LOGICALOR, (x || y) ? 1 : 0); BINARY_FUNC_FLOATMID(ADD, x+y); BINARY_FUNC_FLOATMID(SUB, x-y); BINARY_FUNC_FLOATMID(MUL, x*y); BINARY_FUNC_FLOATMID(DIV, x/y); BINARY_FUNC_FLOATMID(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001)); BINARY_FUNC_FLOATMID(MINIMUM, min(x, y)); BINARY_FUNC_FLOATMID(MAXIMUM, max(x, y)); BINARY_FUNC_FLOATMID(GREATER, x > y ? 1 : 0); BINARY_FUNC_FLOATMID(LESS, x < y ? 1 : 0); BINARY_FUNC_FLOATMID(LESS_EQUAL, x <= y ? 1 : 0); BINARY_FUNC_FLOATMID(GREATER_EQUAL, x >= y ? 1 : 0); BINARY_FUNC_FLOATMID(EQUAL, x == y ? 1 : 0); BINARY_FUNC_FLOATMID(NOTEQUAL, x != y ? 1 : 0); BINARY_FUNC_FLOATMID(FLOORDIV, floor(x / y)); BINARY_FUNC_FLOATMID(FLOORMOD, x - floor(x / y) * y); BINARY_FUNC_FLOATMID(SquaredDifference, (x-y)*(x-y)); BINARY_FUNC_FLOATMID(POW, pow(x, y)); BINARY_FUNC_FLOATMID(ATAN2, atan2(x, y)); BINARY_FUNC_FLOATMID(MOD, fmod(x, y)); BINARY_FUNC_FLOATMID(LOGICALOR, (x || y) ? 1 : 0); BINARY_FUNC_FLOATMID4(ADD, x+y); BINARY_FUNC_FLOATMID4(SUB, x-y); BINARY_FUNC_FLOATMID4(MUL, x*y); BINARY_FUNC_FLOATMID4(DIV, x/y); BINARY_FUNC_FLOATMID4(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001)); BINARY_FUNC_FLOATMID4(MINIMUM, min(x, y)); BINARY_FUNC_FLOATMID4(MAXIMUM, max(x, y)); BINARY_FUNC_FLOATMID4(GREATER, x > y ? 1 : 0); BINARY_FUNC_FLOATMID4(LESS, x < y ? 1 : 0); BINARY_FUNC_FLOATMID4(LESS_EQUAL, x <= y ? 1 : 0); BINARY_FUNC_FLOATMID4(GREATER_EQUAL, x >= y ? 1 : 0); BINARY_FUNC_FLOATMID4(EQUAL, x == y ? 1 : 0); BINARY_FUNC_FLOATMID4(NOTEQUAL, x != y ? 1 : 0); BINARY_FUNC_FLOATMID4(FLOORDIV, floor(x / y)); BINARY_FUNC_FLOATMID4(FLOORMOD, x - floor(x / y) * y); BINARY_FUNC_FLOATMID4(SquaredDifference, (x-y)*(x-y)); BINARY_FUNC_FLOATMID4(POW, pow(x, y)); BINARY_FUNC_FLOATMID4(ATAN2, atan2(x, y)); BINARY_FUNC_FLOATMID4(MOD, fmod(x, y)); BINARY_FUNC_FLOATMID4(LOGICALOR, (x || y) ? 1 : 0); template<typename T> void BinaryBlitTemplateFloat(T* output, const T* input, const T* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType, int activationType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); // MNN_PRINT("binary :%d %d %d, %d %d %d, %d %d %d, %d %d %d, \n", size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], srcStride1[0], srcStride1[1], srcStride1[2], dstStride[0], dstStride[1], dstStride[2]); #define COMPUTE_FLOAT(TYPE, TOut)\ if (opType == MNN::BinaryOpOperation_##TYPE ) {\ if (size[2] == count) {\ if(count % 4 == 0 && count > 16384 && (srcStride[2] == 0 || srcStride[2] == 1) && (srcStride1[2] == 0 || srcStride1[2] == 1) && dstStride[2] == 1) {\ block_num = runtime->blocks_num(count/4);\ threads_num = runtime->threads_num();\ bool srcBroadcast = srcStride[2] == 0;\ bool srcBroadcast1 = srcStride1[2] == 0;\ if(bytes == 4) {\ hipLaunchKernelGGL(( BinaryMidLinear4_##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const T*)input, (const T*)(input1), (TOut*)output,\ count/4, activationType, srcBroadcast, srcBroadcast1);\ } else {\ hipLaunchKernelGGL(( BinaryMidLinearHalf4_##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const T*)input, (const T*)(input1), (TOut*)output,\ count/4, activationType, srcBroadcast, srcBroadcast1);\ }\ } else {\ hipLaunchKernelGGL(( BinaryMidLinear##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const T*)input, (const T*)(input1), (TOut*)output,\ size[2],\ srcStride[2],\ srcStride1[2],\ dstStride[2],\ activationType);\ }\ } else {\ bool isVectorSizeZ = (size[0] == 1 || ((srcStride[2] == 0 || srcStride[0] % bytes == 0) && (srcStride1[2] == 0 || srcStride1[0] % bytes == 0) && dstStride[0] % bytes == 0));\ bool isVectorSizeY = (size[1] == 1 || ((srcStride[2] == 0 || srcStride[1] % bytes == 0) && (srcStride1[2] == 0 || srcStride1[1] % bytes == 0) && dstStride[1] % bytes == 0));\ bool isVector4 = size[2] % bytes == 0 && isVectorSizeZ && isVectorSizeY;\ if(isVector4 && count > 16384 && (srcStride[2] == 0 || srcStride[2] == 1) && (srcStride1[2] == 0 || srcStride1[2] == 1) && dstStride[2] == 1) {\ block_num = runtime->blocks_num(count/bytes);\ threads_num = runtime->threads_num();\ DivModFast sy(size[1]);\ DivModFast sx(size[2]/bytes);\ bool srcBroadcast = srcStride[2] == 0;\ bool srcBroadcast1 = srcStride1[2] == 0;\ if(bytes == 4) {\ hipLaunchKernelGGL(( BinaryMid4_##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const T*)input, (const T*)(input1), (TOut*)output,\ size[0], size[1], size[2]/4,\ srcStride[0], srcStride[1],\ srcStride1[0], srcStride1[1],\ dstStride[0], dstStride[1], activationType, sy, sx, srcBroadcast, srcBroadcast1);\ } else {\ hipLaunchKernelGGL(( BinaryMidHalf2_##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const T*)input, (const T*)(input1), (TOut*)output,\ size[0], size[1], size[2]/2,\ srcStride[0], srcStride[1],\ srcStride1[0], srcStride1[1],\ dstStride[0], dstStride[1], activationType, sy, sx, srcBroadcast, srcBroadcast1);\ }\ } else {\ DivModFast sy(size[1]);\ DivModFast sx(size[2]);\ hipLaunchKernelGGL(( BinaryMid##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const T*)input, (const T*)(input1), (TOut*)output,\ size[0], size[1], size[2],\ srcStride[0], srcStride[1], srcStride[2],\ srcStride1[0], srcStride1[1], srcStride1[2],\ dstStride[0], dstStride[1], dstStride[2], activationType, sy, sx);\ }\ }\ return;\ }\ COMPUTE_FLOAT(ADD, T); COMPUTE_FLOAT(SUB, T); COMPUTE_FLOAT(MUL, T); COMPUTE_FLOAT(DIV, T); COMPUTE_FLOAT(REALDIV, T); COMPUTE_FLOAT(MINIMUM, T); COMPUTE_FLOAT(MAXIMUM, T); COMPUTE_FLOAT(GREATER, int); COMPUTE_FLOAT(LESS, int); COMPUTE_FLOAT(LESS_EQUAL, int); COMPUTE_FLOAT(GREATER_EQUAL, int); COMPUTE_FLOAT(EQUAL, int); COMPUTE_FLOAT(NOTEQUAL, int); COMPUTE_FLOAT(FLOORDIV, T); COMPUTE_FLOAT(FLOORMOD, T); COMPUTE_FLOAT(POW, T); COMPUTE_FLOAT(SquaredDifference, T); COMPUTE_FLOAT(ATAN2, T); COMPUTE_FLOAT(MOD, T); #undef COMPUTE_FLOAT } void BinaryBlitTemplateInt32(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType, int activationType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); #define COMPUTE_INT(TYPE, TOut)\ if (opType == MNN::BinaryOpOperation_##TYPE ) {\ hipLaunchKernelGGL(( Binary##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const int*)input, (const int*)(input1), (TOut*)output,\ size[0], size[1], size[2],\ srcStride[0], srcStride[1], srcStride[2],\ srcStride1[0], srcStride1[1], srcStride1[2],\ dstStride[0], dstStride[1], dstStride[2], activationType);\ return;\ }\ COMPUTE_INT(ADD, int); COMPUTE_INT(SUB, int); COMPUTE_INT(MUL, int); COMPUTE_INT(DIV, int); COMPUTE_INT(MINIMUM, int); COMPUTE_INT(MAXIMUM, int); COMPUTE_INT(GREATER, int); COMPUTE_INT(LESS, int); COMPUTE_INT(LESS_EQUAL, int); COMPUTE_INT(GREATER_EQUAL, int); COMPUTE_INT(EQUAL, int); COMPUTE_INT(NOTEQUAL, int); COMPUTE_INT(SquaredDifference, int); COMPUTE_INT(MOD, int); COMPUTE_INT(LOGICALOR, int); } void BinaryBlit(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, halide_type_t type, CUDARuntime* runtime, int opType, int activationType) { if (type.code == halide_type_float) { if (type.bits == 32) { BinaryBlitTemplateFloat((float*)output, (float*)input, (float*)input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType, activationType); } else if (type.bits == 16) { BinaryBlitTemplateFloat((half*)output, (half*)input, (half*)input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType, activationType); } } else if (type.code == halide_type_int) { BinaryBlitTemplateInt32(output, input, input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType, activationType); } } }// namespace CUDA }// namespace MNN
f5f408f2f774d06b8e36fdd56188785d3afe58a1.cu
#include "Raster.cuh" #include "TensorflowOp_generated.h" #include <cuda_fp16.h> #include "MNNCUDAFunction.cuh" namespace MNN { namespace CUDA { // Blit don't care offset template <typename T> __global__ void blitRegion(const T *inputO, T *outputO, int count, int loopCount, const int32_t* dstIndice, const int32_t* srcIndice, int dstUseIndice, int srcUseIndice, int dstStep, int srcStep,int srcLimit, int sizeZ, int sizeY, int sizeX, int strideZ, int strideY, int strideX, int dstStrideZ, int dstStrideY, int dstStrideX ) { int total = count; for (size_t fuseIndex = blockIdx.x * blockDim.x + threadIdx.x; fuseIndex < total; fuseIndex += blockDim.x * gridDim.x) { int x = fuseIndex % sizeX; int temp = fuseIndex / sizeX; int y = temp % sizeY; temp = temp / sizeY; int z = temp % sizeZ; int i = temp / sizeZ; int srcOffsetO = i * srcStep; if (srcUseIndice >= 0) { srcOffsetO = srcIndice[i] * srcStep; } int dstOffsetO = i * dstStep; if (dstUseIndice >= 0) { dstOffsetO = dstIndice[i] * dstStep; } if (srcOffsetO >= 0 && srcOffsetO < srcLimit) { const T* input = inputO + srcOffsetO; T* output = outputO + dstOffsetO; int srcOffset = z * strideZ + y * strideY + x * strideX; int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX; output[dstOffset] = input[srcOffset]; } else { T* output = outputO + dstOffsetO; int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX; output[dstOffset] = (T)0; } } } void BlitWithIndice(uint8_t* output, const uint8_t* input, const int32_t* dstIndices, const int32_t* srcIndices, int dstUseIndice, int srcUseIndice, int loopCount, int dstStep, int srcStep, int srcLimit, const Tensor::InsideDescribe::Region& reg, int bytes, CUDARuntime* runtime) { int count = loopCount * reg.size[0]*reg.size[1]*reg.size[2]; int block_num = runtime->blocks_num(count); int threads_num = ALIMIN(runtime->threads_num(), count); switch (bytes) { case 4: blitRegion<<<block_num, threads_num>>>((const float*)input, (float*)output, count, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; case 2: blitRegion<<<block_num, threads_num>>>((const int16_t*)input, (int16_t*)output, count, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; case 1: blitRegion<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output, count, loopCount, dstIndices, srcIndices, dstUseIndice, srcUseIndice, dstStep, srcStep, srcLimit, reg.size[0], reg.size[1], reg.size[2], reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); break; default: break; } } #define UNARY_FUNC(Name, Func)\ template<typename T>\ __global__ void Name(const T *input, T *output,\ int count,\ DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,\ int strideZ, int strideY, int strideX,\ int dstStrideZ, int dstStrideY, int dstStrideX\ ) { \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {\ int ix, tmp, iy, iz;\ sizeX.divmod(i, tmp, ix);\ sizeY.divmod(tmp, iz, iy);\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ T x = input[srcOffset];\ output[dstOffset] = Func;\ }\ }\ template<typename T>\ __global__ void FLOAT##Name(const T *input, T *output,\ int count,\ DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,\ int strideZ, int strideY, int strideX,\ int dstStrideZ, int dstStrideY, int dstStrideX\ ) { \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {\ int ix, tmp, iy, iz;\ sizeX.divmod(i, tmp, ix);\ sizeY.divmod(tmp, iz, iy);\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ float x = (float)input[srcOffset];\ output[dstOffset] = (float)(Func);\ }\ }\ template<typename T> __global__ void blit_2_float(const T *input, T *output, int count, DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX, int strideZ, int strideY, int dstStrideZ, int dstStrideY ) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) { int ix, tmp, iy, iz; sizeX.divmod(i, tmp, ix); sizeY.divmod(tmp, iz, iy); int srcOffset = iz * strideZ + iy * strideY + (ix << 1); int dstOffset = iz * dstStrideZ + iy * dstStrideY + (ix << 1); int2 * dstF = (int2 *)(output+dstOffset); dstF[0] = ((int2 *)(input+srcOffset))[0]; } } template<typename T> __global__ void blit_2_half(const T *input, T *output, int count, DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX, int strideZ, int strideY, int dstStrideZ, int dstStrideY ) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) { int ix, tmp, iy, iz; sizeX.divmod(i, tmp, ix); sizeY.divmod(tmp, iz, iy); int srcOffset = iz * strideZ + iy * strideY + (ix << 1); int dstOffset = iz * dstStrideZ + iy * dstStrideY + (ix << 1); int* dstF = (int *)(output+dstOffset); dstF[0] = ((int *)(input+srcOffset))[0]; } } struct Bytes512 { int4 x[4]; }; UNARY_FUNC(blit, x); UNARY_FUNC(ABS, abs(x)); UNARY_FUNC(EXP, exp(x)); UNARY_FUNC(NEG, -x); UNARY_FUNC(RECIPROCAL, (1.0)/x); UNARY_FUNC(FLOOR, floor(x)); UNARY_FUNC(CEIL, ceil(x)); UNARY_FUNC(SQUARE, x*x); UNARY_FUNC(SQRT, (T)(sqrt((float)x))); UNARY_FUNC(RSQRT, (T)(rsqrt((float)x))); UNARY_FUNC(LOG, (T)(log((float)x))); UNARY_FUNC(SIN, (T)(sin((float)x))); UNARY_FUNC(COS, (T)(cos((float)x))); UNARY_FUNC(TAN, (T)(tan((float)x))); UNARY_FUNC(ASIN, (T)(asin((float)x))); UNARY_FUNC(ACOS, (T)(acos((float)x))); UNARY_FUNC(ATAN, (T)(atan((float)x))); UNARY_FUNC(LOG1P, log(1+x)); UNARY_FUNC(TANH, tanh(x)); UNARY_FUNC(SIGMOID, 1./(1.+exp(-x))); UNARY_FUNC(EXPM1, exp(x)-1); UNARY_FUNC(ATANH, atanh(x)); UNARY_FUNC(ACOSH, acosh(x)); UNARY_FUNC(COSH, cosh(x)); UNARY_FUNC(SIGN, x > 0 ? 1 : (x<0 ? -1 : 0)); UNARY_FUNC(ROUND, round(x)); UNARY_FUNC(SINH, sinh(x)); UNARY_FUNC(ASINH, asinh(x)); UNARY_FUNC(HARDSWISH, 1.0/6.0 * x * min(max(x+3.0, 0.0), 6.0)); UNARY_FUNC(ERF, erf(x)); UNARY_FUNC(ERFC, erfc(x)); UNARY_FUNC(ERFINV, erfinv(x)); UNARY_FUNC(GELU, (1.0f + tanh(0.79788458f * (0.044715f * x * x * x + x))) * x * 0.5f); UNARY_FUNC(GELU_STANDARD, (erf(x*0.7071067932881648f)+1.f)*x*0.5); void RasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime) { int count = size[0] * size[1] * size[2]; // MNN_PRINT("blit info size:%d-%d-%d, srcStride:%d-%d-%d, dstStride:%d-%d-%d\n", size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); bool isThirdSizeVector = (size[2] % 2 == 0 && srcStride[2] == 1 && dstStride[2] == 1); bool isSecondSizeVector = (size[1] % 2 == 0 && srcStride[1] == 1 && dstStride[1] == 1) && (size[2] == 1 && srcStride[2] == 1 && dstStride[2] == 1); bool isFirstSizeVector = (size[0] % 2 == 0 && srcStride[0] == 1 && dstStride[0] == 1) && (size[1] == 1 && srcStride[1] == 1 && dstStride[1] == 1) && (size[2] == 1 && srcStride[2] == 1 && dstStride[2] == 1); bool isSizeVector = isThirdSizeVector || isSecondSizeVector || isFirstSizeVector; if(count > 16384 && isSizeVector) { int32_t newSize[3], newSrcStride[3], newDstStride[3]; newSize[0] = size[0]; newSize[1] = size[1]; newSize[2] = size[2]; newSrcStride[0] = srcStride[0]; newSrcStride[1] = srcStride[1]; newSrcStride[2] = srcStride[2]; newDstStride[0] = dstStride[0]; newDstStride[1] = dstStride[1]; newDstStride[2] = dstStride[2]; if(isSecondSizeVector) { /* size : [size_0, size_1, 1] srcStride : [ss_0, 1, 1] dstStride : [ds_0, 1, 1] --> newSize: [1, size_0, size_1] newSrcStride: [1, ss_0, 1] newDstStride: [1, ds_0, 1] */ newSize[2] = size[1]; newSize[1] = size[0]; newSize[0] = 1; newSrcStride[1] = srcStride[0]; newSrcStride[0] = 1; newDstStride[1] = dstStride[0]; newDstStride[0] = 1; } if(isFirstSizeVector) { /* size : [size_0, 1, 1] srcStride : [1, 1, 1] dstStride : [1, 1, 1] --> newSize: [1, 1, size_0] newSrcStride: [1, 1, 1] newDstStride: [1, 1, 1] */ newSize[2] = size[0]; newSize[0] = 1; } DivModFast new_sz(newSize[0]); DivModFast new_sy(newSize[1]); DivModFast new_sx(newSize[2]/2); int newCount = count / 2; int block_num = runtime->blocks_num(newCount); int threads_num = runtime->threads_num(); // Forbid addresss misalign if(bytes == 4 && reinterpret_cast<std::uintptr_t>(input) % 8 == 0 && reinterpret_cast<std::uintptr_t>(output) % 8 == 0) { blit_2_float<<<block_num, threads_num>>>((const float*)input, (float*)output, newCount, new_sz, new_sy, new_sx, newSrcStride[0], newSrcStride[1], newDstStride[0], newDstStride[1]); checkKernelErrors; return; } else if(bytes == 2 && reinterpret_cast<std::uintptr_t>(input) % 4 == 0 && reinterpret_cast<std::uintptr_t>(output) % 4 == 0) { blit_2_half<<<block_num, threads_num>>>((const half*)input, (half*)output, newCount, new_sz, new_sy, new_sx, newSrcStride[0], newSrcStride[1], newDstStride[0], newDstStride[1]); checkKernelErrors; return; } } DivModFast sz(size[0]); DivModFast sy(size[1]); DivModFast sx(size[2]); int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); switch (bytes) { case 64: blit<<<block_num, threads_num>>>((const Bytes512*)input, (Bytes512*)output, count, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 32: blit<<<block_num, threads_num>>>((const double4*)input, (double4*)output, count, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 4: blit<<<block_num, threads_num>>>((const float*)input, (float*)output, count, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 2: blit<<<block_num, threads_num>>>((const int16_t*)input, (int16_t*)output, count, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 1: blit<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output, count, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; default: break; } checkKernelErrors; } template<typename T0, typename T1> __global__ void fuseblit(const T0 *input, T1 *output, int fuseNum, int count, const int32_t* sliceOffset, DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX, int strideZ, int strideY, int strideX, int dstStrideZ, int dstStrideY, int dstStrideX ) { size_t c = blockIdx.x * blockDim.x + threadIdx.x; for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) { int ix, tmp, iy, tmp2, iz, j; sizeX.divmod(c, tmp, ix); sizeY.divmod(tmp, tmp2, iy); sizeZ.divmod(tmp2, j, iz); int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + ix * strideX; int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX; output[dst_offset] = input[src_offset]; } } __global__ void fuseblit_4(const int32_t *input, int32_t *output, int fuseNum, int count, const int32_t* sliceOffset, DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX, int strideZ, int strideY, int dstStrideZ, int dstStrideY ) { for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) { int ix, tmp, iy, tmp2, iz, j; sizeX.divmod(c, tmp, ix); sizeY.divmod(tmp, tmp2, iy); sizeZ.divmod(tmp2, j, iz); int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + (ix << 2); int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + (ix << 2); int4* srcF = (int4 *)(input + src_offset); int4* dstF = (int4 *)(output + dst_offset); dstF[0] = srcF[0]; } } __global__ void fuseblit_half_4(const int16_t *input, int16_t *output, int fuseNum, int count, const int32_t* sliceOffset, DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX, int strideZ, int strideY, int dstStrideZ, int dstStrideY ) { for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) { int ix, tmp, iy, tmp2, iz, j; sizeX.divmod(c, tmp, ix); sizeY.divmod(tmp, tmp2, iy); sizeZ.divmod(tmp2, j, iz); int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + (ix << 2); int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + (ix << 2); int2* srcF = (int2 *)(input + src_offset); int2* dstF = (int2 *)(output + dst_offset); dstF[0] = srcF[0]; } } void FuseRasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int fuseNum, void* sliceOffset, int bytes, CUDARuntime* runtime, int unit) { DivModFast sz(size[0]); DivModFast sy(size[1]); int count = fuseNum * size[0] * size[1] * size[2]; bool strideC4Support = srcStride[0] % 4 == 0 && srcStride[1] % 4 == 0 && dstStride[0] % 4 == 0 && dstStride[1] % 4 == 0; if(size[2] % 4 == 0 && count > 16384 && srcStride[2] == 1 && dstStride[2] == 1 && unit == 4 && strideC4Support) { int xL4 = size[2] / 4; int countC4 = fuseNum * size[0] * size[1] * xL4; int numBlocks = runtime->blocks_num(countC4); int threadsPerBlock = runtime->threads_num(); DivModFast sx_4(xL4); if(bytes == 4) { fuseblit_4<<<numBlocks, threadsPerBlock>>>((const int32_t*)input, (int32_t*)output, fuseNum, countC4, (const int32_t*)sliceOffset, sz, sy, sx_4, srcStride[0], srcStride[1], dstStride[0], dstStride[1]); return; } else if(bytes == 2){ fuseblit_half_4<<<numBlocks, threadsPerBlock>>>((const int16_t*)input, (int16_t*)output, fuseNum, countC4, (const int32_t*)sliceOffset, sz, sy, sx_4, srcStride[0], srcStride[1], dstStride[0], dstStride[1]); return; } } DivModFast sx(size[2]); int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); switch (bytes) { case 64: fuseblit<<<block_num, threads_num>>>((const Bytes512*)input, (Bytes512*)output, fuseNum, count, (const int32_t*)sliceOffset, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 16: fuseblit<<<block_num, threads_num>>>((const int4*)input, (int4*)output, fuseNum, count, (const int32_t*)sliceOffset, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 4: fuseblit<<<block_num, threads_num>>>((const float*)input, (float*)output, fuseNum, count, (const int32_t*)sliceOffset, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 2: fuseblit<<<block_num, threads_num>>>((const int16_t*)input, (int16_t*)output, fuseNum, count, (const int32_t*)sliceOffset, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; case 1: fuseblit<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output, fuseNum, count, (const int32_t*)sliceOffset, sz, sy, sx, srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]); break; default: break; } //printf("%s, %d-%d-%d-%d\n", cudaGetErrorString(cudaGetLastError()), numBlocks.x, numBlocks.y, threadsPerBlock.x, threadsPerBlock.y); } template<typename T0, typename T1> __global__ void fuseblitLimit(const T0 *input, T1 *output, const FuseRegion* info, const int32_t* sliceOffset ) { int sizeZ = info->size[0]; int sizeY = info->size[1]; int sizeX = info->size[2]; int strideZ = info->srcStride[0]; int strideY = info->srcStride[1]; int strideX = info->srcStride[2]; int dstStrideZ = info->dstStride[0]; int dstStrideY = info->dstStride[1]; int dstStrideX = info->dstStride[2]; int fuseNum = info->fuseNumber; int count = fuseNum*sizeZ * sizeY * sizeX; for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < (count); c += blockDim.x * gridDim.x) { int j = c / (sizeZ * sizeY * sizeX); int i = c % (sizeZ * sizeY * sizeX); int ix = i % sizeX; int tmp = i / sizeX; int iy = tmp % sizeY; int iz = tmp / sizeY; const int* srcOffsetPtr = sliceOffset + 8 * j; const int* dstOffsetPtr = sliceOffset + 8 * j + 4; T0 srcValue = (T0)0; int src_offset = srcOffsetPtr[3] + iz * strideZ + iy * strideY + ix * strideX; if (srcOffsetPtr[0] > iz && srcOffsetPtr[1] > iy && srcOffsetPtr[2] > ix) { srcValue = input[src_offset]; } int dst_offset = dstOffsetPtr[3] + iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX; //printf("%d -> %d - %f\n", src_offset, dst_offset, srcValue); if (dstOffsetPtr[0] > iz && dstOffsetPtr[1] > iy && dstOffsetPtr[2] > ix) { output[dst_offset] = srcValue; } } } void FuseRasterBlitFloatToHalf(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) { auto& prop = runtime->prop(); int threads_num = prop.maxThreadsPerBlock; int block_num = prop.multiProcessorCount; fuseblitLimit<<<block_num, threads_num>>>((const float*)input, (half*)output, info, (const int32_t*)sliceOffset); } void FuseRasterBlitHalfToFloat(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) { auto& prop = runtime->prop(); int threads_num = prop.maxThreadsPerBlock; int block_num = prop.multiProcessorCount; fuseblitLimit<<<block_num, threads_num>>>((const half*)input, (float*)output, info, (const int32_t*)sliceOffset); } void FuseRasterBlitFloatToFloat(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) { auto& prop = runtime->prop(); int threads_num = prop.maxThreadsPerBlock; int block_num = prop.multiProcessorCount; fuseblitLimit<<<block_num, threads_num>>>((const float*)input, (float*)output, info, (const int32_t*)sliceOffset); } void FuseRasterBlitCommon(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime, int bytes) { auto& prop = runtime->prop(); int threads_num = prop.maxThreadsPerBlock; int block_num = prop.multiProcessorCount; switch (bytes) { case 4: fuseblitLimit<<<block_num, threads_num>>>((const float*)input, (float*)output, info, (const int32_t*)sliceOffset); break; case 2: fuseblitLimit<<<block_num, threads_num>>>((const half*)input, (half*)output, info, (const int32_t*)sliceOffset); break; case 1: fuseblitLimit<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output, info, (const int32_t*)sliceOffset); break; default: break; } } void UnaryBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); DivModFast sz(size[0]); DivModFast sy(size[1]); DivModFast sx(size[2]); // TODO: Support FP16 #define COMPUTE(TYPE)\ if (opType == MNN::UnaryOpOperation_##TYPE ) {\ if(bytes==2) {\ FLOAT##TYPE<<<block_num, threads_num>>>((const half*)input, (half*)output,\ count, \ sz, sy, sx,\ srcStride[0], srcStride[1], srcStride[2],\ dstStride[0], dstStride[1], dstStride[2]);\ } else {\ TYPE<<<block_num, threads_num>>>((const float*)input, (float*)output,\ count, \ sz, sy, sx,\ srcStride[0], srcStride[1], srcStride[2],\ dstStride[0], dstStride[1], dstStride[2]);\ }\ return;\ }\ COMPUTE(ABS); COMPUTE(NEG); COMPUTE(FLOOR); COMPUTE(CEIL); COMPUTE(SQUARE); COMPUTE(SQRT); COMPUTE(RSQRT); COMPUTE(EXP); COMPUTE(LOG); COMPUTE(SIN); COMPUTE(COS); COMPUTE(TAN); COMPUTE(GELU); COMPUTE(GELU_STANDARD); COMPUTE(ASIN); COMPUTE(ACOS); COMPUTE(ATAN); COMPUTE(RECIPROCAL); COMPUTE(LOG1P); COMPUTE(TANH); COMPUTE(SIGMOID); COMPUTE(EXPM1); COMPUTE(ACOSH); COMPUTE(ATANH); COMPUTE(SIGN); COMPUTE(COSH); COMPUTE(ROUND); COMPUTE(SINH); COMPUTE(ASINH); COMPUTE(HARDSWISH); COMPUTE(ERF); COMPUTE(ERFC); COMPUTE(ERFINV); #undef COMPUTE } #define BINARY_FUNC(Name, Func)\ template<typename TIn, typename TOut>\ __global__ void Binary##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY, int strideX,\ int strideZ1, int strideY1, int strideX1,\ int dstStrideZ, int dstStrideY, int dstStrideX, int activationType\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int ix = i % sizeX;\ int tmp = i / sizeX;\ int iy = tmp % sizeY;\ int iz = tmp / sizeY;\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix * strideX1;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ TIn x = input0[srcOffset];\ TIn y = input1[srcOffset1];\ TOut val = (TOut)(Func);\ if(activationType == 1) {\ val = (val < (TOut)0 ? (TOut)0 : val);\ }\ output[dstOffset] = val;\ }\ }\ #define BINARY_FUNC_FLOATMID(Name, Func)\ template<typename TIn, typename TOut>\ __global__ void BinaryMid##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY, int strideX,\ int strideZ1, int strideY1, int strideX1,\ int dstStrideZ, int dstStrideY, int dstStrideX, int activationType,\ DivModFast d_sizeY, DivModFast d_sizeX\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int ix, tmp, iy, iz;\ d_sizeX.divmod(i, tmp, ix);\ d_sizeY.divmod(tmp, iz, iy);\ int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\ int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix * strideX1;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\ float x = input0[srcOffset];\ float y = input1[srcOffset1];\ float val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset] = val;\ }\ }\ template<typename TIn, typename TOut>\ __global__ void BinaryMid4_##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY,\ int strideZ1, int strideY1,\ int dstStrideZ, int dstStrideY, int activationType,\ DivModFast d_sizeY, DivModFast d_sizeX,\ bool inp0Broadcast, bool inp1Broadcast\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int ix, tmp, iy, iz;\ d_sizeX.divmod(i, tmp, ix);\ d_sizeY.divmod(tmp, iz, iy);\ ix = ix << 2;\ int srcOffset = iz * strideZ + iy * strideY + ix;\ int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix;\ float4 xx = inp0Broadcast ? make_float4(input0[srcOffset-ix],input0[srcOffset-ix], input0[srcOffset-ix], input0[srcOffset-ix]) : ((float4 *)(input0+srcOffset))[0];\ float4 yy = inp1Broadcast ? make_float4(input1[srcOffset1-ix],input1[srcOffset1-ix], input1[srcOffset1-ix], input1[srcOffset1-ix]) :((float4 *)(input1+srcOffset1))[0];\ float x = xx.x;\ float y = yy.x;\ float val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset] = val;\ x = xx.y;\ y = yy.y;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+1] = val;\ x = xx.z;\ y = yy.z;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+2] = val;\ x = xx.w;\ y = yy.w;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+3] = val;\ }\ }\ template<typename TIn, typename TOut>\ __global__ void BinaryMidHalf2_##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ, int sizeY, int sizeX,\ int strideZ, int strideY,\ int strideZ1, int strideY1,\ int dstStrideZ, int dstStrideY, int activationType,\ DivModFast d_sizeY, DivModFast d_sizeX,\ bool inp0Broadcast, bool inp1Broadcast\ ) { \ int count = sizeZ * sizeY * sizeX;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int ix, tmp, iy, iz;\ d_sizeX.divmod(i, tmp, ix);\ d_sizeY.divmod(tmp, iz, iy);\ ix = ix << 1;\ int srcOffset = iz * strideZ + iy * strideY + ix;\ int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix;\ int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix;\ half2 xx = inp0Broadcast ? make_half2(input0[srcOffset-ix], input0[srcOffset-ix]) : ((half2 *)(input0+srcOffset))[0];\ half2 yy = inp1Broadcast ? make_half2(input1[srcOffset1-ix], input1[srcOffset1-ix]) : ((half2 *)(input1+srcOffset1))[0];\ float x = xx.x;\ float y = yy.x;\ float val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset] = val;\ x = xx.y;\ y = yy.y;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+1] = val;\ }\ }\ template<typename TIn, typename TOut>\ __global__ void BinaryMidLinear##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int sizeZ,\ int strideZ,\ int strideZ1,\ int dstStrideZ,\ int activationType\ ) { \ int count = sizeZ;\ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\ int iz = i;\ int srcOffset = iz * strideZ;\ int srcOffset1 = iz * strideZ1;\ int dstOffset = iz * dstStrideZ;\ float x = input0[srcOffset];\ float y = input1[srcOffset1];\ float val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset] = (TOut)val;\ }\ }\ #define BINARY_FUNC_FLOATMID4(Name, Func)\ template<typename TIn, typename TOut>\ __global__ void BinaryMidLinear4_##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int count_4, int activationType,\ bool inp0Broadcast, bool inp1Broadcast\ ) { \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count_4); i += blockDim.x * gridDim.x) {\ int iz = i;\ int srcOffset = iz << 2;\ int srcOffset1 = iz << 2;\ int dstOffset = iz << 2;\ float4 xx = inp0Broadcast ? make_float4(input0[0], input0[0], input0[0], input0[0]) : ((float4 *)(input0+srcOffset))[0];\ float4 yy = inp1Broadcast ? make_float4(input1[0], input1[0], input1[0], input1[0]) : ((float4 *)(input1+srcOffset1))[0];\ float x = xx.x;\ float y = yy.x;\ TOut val = (TOut)(Func);\ if(activationType == 1) {\ val = (val < (TOut)0 ? (TOut)0 : val);\ }\ output[dstOffset] = val;\ x = xx.y;\ y = yy.y;\ val = (TOut)(Func);\ if(activationType == 1) {\ val = (val < (TOut)0 ? (TOut)0 : val);\ }\ output[dstOffset+1] = val;\ x = xx.z;\ y = yy.z;\ val = (TOut)(Func);\ if(activationType == 1) {\ val = (val < (TOut)0 ? (TOut)0 : val);\ }\ output[dstOffset+2] = val;\ x = xx.w;\ y = yy.w;\ val = (TOut)(Func);\ if(activationType == 1) {\ val = (val < (TOut)0 ? (TOut)0 : val);\ }\ output[dstOffset+3] = val;\ }\ }\ template<typename TIn, typename TOut>\ __global__ void BinaryMidLinearHalf4_##Name(\ const TIn *input0, const TIn* input1, TOut *output,\ int count_4, int activationType,\ bool inp0Broadcast, bool inp1Broadcast\ ) { \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count_4); i += blockDim.x * gridDim.x) {\ int iz = i;\ int srcOffset = iz << 2;\ int srcOffset1 = iz << 2;\ int dstOffset = iz << 2;\ half2 xx = inp0Broadcast ? make_half2(input0[0], input0[0]) : ((half2 *)(input0+srcOffset))[0];\ half2 yy = inp1Broadcast ? make_half2(input1[0], input1[0]) : ((half2 *)(input1+srcOffset1))[0];\ float x = (float)xx.x;\ float y = (float)yy.x;\ float val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset] = (TOut)val;\ x = (float)xx.y;\ y = (float)yy.y;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+1] = (TOut)val;\ xx = inp0Broadcast ? make_half2(input0[0], input0[0]) : ((half2 *)(input0+srcOffset))[1];\ yy = inp1Broadcast ? make_half2(input1[0], input1[0]) : ((half2 *)(input1+srcOffset1))[1];\ x = (float)xx.x;\ y = (float)yy.x;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+2] = (TOut)val;\ x = (float)xx.y;\ y = (float)yy.y;\ val = (float)(Func);\ if(activationType == 1) {\ val = (val < 0.0f ? 0.0f : val);\ }\ output[dstOffset+3] = (TOut)val;\ }\ }\ #define sign(y) ((y) > 0 ? 1 : ((y) < 0 ? -1 : 0)) BINARY_FUNC(ADD, x+y); BINARY_FUNC(SUB, x-y); BINARY_FUNC(MUL, x*y); BINARY_FUNC(DIV, x/y); BINARY_FUNC(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001)); BINARY_FUNC(MINIMUM, min(x, y)); BINARY_FUNC(MAXIMUM, max(x, y)); BINARY_FUNC(GREATER, x > y ? 1 : 0); BINARY_FUNC(LESS, x < y ? 1 : 0); BINARY_FUNC(LESS_EQUAL, x <= y ? 1 : 0); BINARY_FUNC(GREATER_EQUAL, x >= y ? 1 : 0); BINARY_FUNC(EQUAL, x == y ? 1 : 0); BINARY_FUNC(NOTEQUAL, x != y ? 1 : 0); BINARY_FUNC(FLOORDIV, floor(x / y)); BINARY_FUNC(FLOORMOD, x - floor(x / y) * y); BINARY_FUNC(SquaredDifference, (x-y)*(x-y)); BINARY_FUNC(POW, pow(x, y)); BINARY_FUNC(ATAN2, atan2(x, y)); BINARY_FUNC(MOD, (x % y)); BINARY_FUNC(LOGICALOR, (x || y) ? 1 : 0); BINARY_FUNC_FLOATMID(ADD, x+y); BINARY_FUNC_FLOATMID(SUB, x-y); BINARY_FUNC_FLOATMID(MUL, x*y); BINARY_FUNC_FLOATMID(DIV, x/y); BINARY_FUNC_FLOATMID(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001)); BINARY_FUNC_FLOATMID(MINIMUM, min(x, y)); BINARY_FUNC_FLOATMID(MAXIMUM, max(x, y)); BINARY_FUNC_FLOATMID(GREATER, x > y ? 1 : 0); BINARY_FUNC_FLOATMID(LESS, x < y ? 1 : 0); BINARY_FUNC_FLOATMID(LESS_EQUAL, x <= y ? 1 : 0); BINARY_FUNC_FLOATMID(GREATER_EQUAL, x >= y ? 1 : 0); BINARY_FUNC_FLOATMID(EQUAL, x == y ? 1 : 0); BINARY_FUNC_FLOATMID(NOTEQUAL, x != y ? 1 : 0); BINARY_FUNC_FLOATMID(FLOORDIV, floor(x / y)); BINARY_FUNC_FLOATMID(FLOORMOD, x - floor(x / y) * y); BINARY_FUNC_FLOATMID(SquaredDifference, (x-y)*(x-y)); BINARY_FUNC_FLOATMID(POW, pow(x, y)); BINARY_FUNC_FLOATMID(ATAN2, atan2(x, y)); BINARY_FUNC_FLOATMID(MOD, fmod(x, y)); BINARY_FUNC_FLOATMID(LOGICALOR, (x || y) ? 1 : 0); BINARY_FUNC_FLOATMID4(ADD, x+y); BINARY_FUNC_FLOATMID4(SUB, x-y); BINARY_FUNC_FLOATMID4(MUL, x*y); BINARY_FUNC_FLOATMID4(DIV, x/y); BINARY_FUNC_FLOATMID4(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001)); BINARY_FUNC_FLOATMID4(MINIMUM, min(x, y)); BINARY_FUNC_FLOATMID4(MAXIMUM, max(x, y)); BINARY_FUNC_FLOATMID4(GREATER, x > y ? 1 : 0); BINARY_FUNC_FLOATMID4(LESS, x < y ? 1 : 0); BINARY_FUNC_FLOATMID4(LESS_EQUAL, x <= y ? 1 : 0); BINARY_FUNC_FLOATMID4(GREATER_EQUAL, x >= y ? 1 : 0); BINARY_FUNC_FLOATMID4(EQUAL, x == y ? 1 : 0); BINARY_FUNC_FLOATMID4(NOTEQUAL, x != y ? 1 : 0); BINARY_FUNC_FLOATMID4(FLOORDIV, floor(x / y)); BINARY_FUNC_FLOATMID4(FLOORMOD, x - floor(x / y) * y); BINARY_FUNC_FLOATMID4(SquaredDifference, (x-y)*(x-y)); BINARY_FUNC_FLOATMID4(POW, pow(x, y)); BINARY_FUNC_FLOATMID4(ATAN2, atan2(x, y)); BINARY_FUNC_FLOATMID4(MOD, fmod(x, y)); BINARY_FUNC_FLOATMID4(LOGICALOR, (x || y) ? 1 : 0); template<typename T> void BinaryBlitTemplateFloat(T* output, const T* input, const T* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType, int activationType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); // MNN_PRINT("binary :%d %d %d, %d %d %d, %d %d %d, %d %d %d, \n", size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], srcStride1[0], srcStride1[1], srcStride1[2], dstStride[0], dstStride[1], dstStride[2]); #define COMPUTE_FLOAT(TYPE, TOut)\ if (opType == MNN::BinaryOpOperation_##TYPE ) {\ if (size[2] == count) {\ if(count % 4 == 0 && count > 16384 && (srcStride[2] == 0 || srcStride[2] == 1) && (srcStride1[2] == 0 || srcStride1[2] == 1) && dstStride[2] == 1) {\ block_num = runtime->blocks_num(count/4);\ threads_num = runtime->threads_num();\ bool srcBroadcast = srcStride[2] == 0;\ bool srcBroadcast1 = srcStride1[2] == 0;\ if(bytes == 4) {\ BinaryMidLinear4_##TYPE<<<block_num, threads_num>>>((const T*)input, (const T*)(input1), (TOut*)output,\ count/4, activationType, srcBroadcast, srcBroadcast1);\ } else {\ BinaryMidLinearHalf4_##TYPE<<<block_num, threads_num>>>((const T*)input, (const T*)(input1), (TOut*)output,\ count/4, activationType, srcBroadcast, srcBroadcast1);\ }\ } else {\ BinaryMidLinear##TYPE<<<block_num, threads_num>>>((const T*)input, (const T*)(input1), (TOut*)output,\ size[2],\ srcStride[2],\ srcStride1[2],\ dstStride[2],\ activationType);\ }\ } else {\ bool isVectorSizeZ = (size[0] == 1 || ((srcStride[2] == 0 || srcStride[0] % bytes == 0) && (srcStride1[2] == 0 || srcStride1[0] % bytes == 0) && dstStride[0] % bytes == 0));\ bool isVectorSizeY = (size[1] == 1 || ((srcStride[2] == 0 || srcStride[1] % bytes == 0) && (srcStride1[2] == 0 || srcStride1[1] % bytes == 0) && dstStride[1] % bytes == 0));\ bool isVector4 = size[2] % bytes == 0 && isVectorSizeZ && isVectorSizeY;\ if(isVector4 && count > 16384 && (srcStride[2] == 0 || srcStride[2] == 1) && (srcStride1[2] == 0 || srcStride1[2] == 1) && dstStride[2] == 1) {\ block_num = runtime->blocks_num(count/bytes);\ threads_num = runtime->threads_num();\ DivModFast sy(size[1]);\ DivModFast sx(size[2]/bytes);\ bool srcBroadcast = srcStride[2] == 0;\ bool srcBroadcast1 = srcStride1[2] == 0;\ if(bytes == 4) {\ BinaryMid4_##TYPE<<<block_num, threads_num>>>((const T*)input, (const T*)(input1), (TOut*)output,\ size[0], size[1], size[2]/4,\ srcStride[0], srcStride[1],\ srcStride1[0], srcStride1[1],\ dstStride[0], dstStride[1], activationType, sy, sx, srcBroadcast, srcBroadcast1);\ } else {\ BinaryMidHalf2_##TYPE<<<block_num, threads_num>>>((const T*)input, (const T*)(input1), (TOut*)output,\ size[0], size[1], size[2]/2,\ srcStride[0], srcStride[1],\ srcStride1[0], srcStride1[1],\ dstStride[0], dstStride[1], activationType, sy, sx, srcBroadcast, srcBroadcast1);\ }\ } else {\ DivModFast sy(size[1]);\ DivModFast sx(size[2]);\ BinaryMid##TYPE<<<block_num, threads_num>>>((const T*)input, (const T*)(input1), (TOut*)output,\ size[0], size[1], size[2],\ srcStride[0], srcStride[1], srcStride[2],\ srcStride1[0], srcStride1[1], srcStride1[2],\ dstStride[0], dstStride[1], dstStride[2], activationType, sy, sx);\ }\ }\ return;\ }\ COMPUTE_FLOAT(ADD, T); COMPUTE_FLOAT(SUB, T); COMPUTE_FLOAT(MUL, T); COMPUTE_FLOAT(DIV, T); COMPUTE_FLOAT(REALDIV, T); COMPUTE_FLOAT(MINIMUM, T); COMPUTE_FLOAT(MAXIMUM, T); COMPUTE_FLOAT(GREATER, int); COMPUTE_FLOAT(LESS, int); COMPUTE_FLOAT(LESS_EQUAL, int); COMPUTE_FLOAT(GREATER_EQUAL, int); COMPUTE_FLOAT(EQUAL, int); COMPUTE_FLOAT(NOTEQUAL, int); COMPUTE_FLOAT(FLOORDIV, T); COMPUTE_FLOAT(FLOORMOD, T); COMPUTE_FLOAT(POW, T); COMPUTE_FLOAT(SquaredDifference, T); COMPUTE_FLOAT(ATAN2, T); COMPUTE_FLOAT(MOD, T); #undef COMPUTE_FLOAT } void BinaryBlitTemplateInt32(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType, int activationType) { int count = size[0] * size[1] * size[2]; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); #define COMPUTE_INT(TYPE, TOut)\ if (opType == MNN::BinaryOpOperation_##TYPE ) {\ Binary##TYPE<<<block_num, threads_num>>>((const int*)input, (const int*)(input1), (TOut*)output,\ size[0], size[1], size[2],\ srcStride[0], srcStride[1], srcStride[2],\ srcStride1[0], srcStride1[1], srcStride1[2],\ dstStride[0], dstStride[1], dstStride[2], activationType);\ return;\ }\ COMPUTE_INT(ADD, int); COMPUTE_INT(SUB, int); COMPUTE_INT(MUL, int); COMPUTE_INT(DIV, int); COMPUTE_INT(MINIMUM, int); COMPUTE_INT(MAXIMUM, int); COMPUTE_INT(GREATER, int); COMPUTE_INT(LESS, int); COMPUTE_INT(LESS_EQUAL, int); COMPUTE_INT(GREATER_EQUAL, int); COMPUTE_INT(EQUAL, int); COMPUTE_INT(NOTEQUAL, int); COMPUTE_INT(SquaredDifference, int); COMPUTE_INT(MOD, int); COMPUTE_INT(LOGICALOR, int); } void BinaryBlit(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, halide_type_t type, CUDARuntime* runtime, int opType, int activationType) { if (type.code == halide_type_float) { if (type.bits == 32) { BinaryBlitTemplateFloat((float*)output, (float*)input, (float*)input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType, activationType); } else if (type.bits == 16) { BinaryBlitTemplateFloat((half*)output, (half*)input, (half*)input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType, activationType); } } else if (type.code == halide_type_int) { BinaryBlitTemplateInt32(output, input, input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType, activationType); } } }// namespace CUDA }// namespace MNN
447f9d35ed5816798be4ed537a0249e7b4d3b208.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //************************************************************************ // __ __ _ _ _____ _____ // / | / || | | || \ / ___| // / |/ || |__| || _|| | _ // / /| /| || __ || |\ \ | |_| | // /_/ |_ / |_||_| |_||_| \_\|______| // // // Written by: Daniel L. Marino (marinodl@vcu.edu) (2016) // // Copyright (2016) Modern Heuristics Research Group (MHRG) // Virginia Commonwealth University (VCU), Richmond, VA // http://www.people.vcu.edu/~mmanic/ // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // Any opinions, findings, and conclusions or recommendations expressed // in this material are those of the author's(s') and do not necessarily // reflect the views of any other entity. // // *********************************************************************** // // Description: Test implementing 1-nearest neighbor classifier // // *********************************************************************** #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <float.h> #include <math.h> #include <iostream> #include "libarff/arff_parser.h" #include "libarff/arff_data.h" /* Includes, cuda */ #include "twodlearn/common/cuda/eigen_cuda.h" #include "twodlearn/common/cuda/matmul_pattern_cu.h" /* Includes, eigen */ #include "Eigen/Dense" using namespace std; using namespace Eigen; #define BLOCK_SIZE 12 typedef Matrix<double, Dynamic, Dynamic, RowMajor> MatrixXdR; void dataset2mat(ArffData* dataset, MatrixXdR& dataset_x, MatrixXdR& dataset_y){ // allocate memory dataset_x = MatrixXdR::Zero(dataset->num_instances(), dataset->num_attributes()); dataset_y = MatrixXdR::Zero(dataset->num_instances(), 1); // populate matrices for(int i = 0; i < dataset->num_instances(); i++){ for(int j = 0; j < dataset->num_attributes() - 1; j++){ dataset_x(i,j) = (double) dataset->get_instance(i)->get(j)->operator float(); } dataset_y(i,0) = (double) dataset->get_instance(i)->get(dataset->num_attributes()-1)->operator float(); } } int* gpu_knn( MatrixXdR& dataset_x, MatrixXdR& dataset_y){ hipEvent_t start_gpu, stop_gpu; // performance evaluation hipEventCreate(&start_gpu); hipEventCreate(&stop_gpu); // performance evaluation int* predictions = (int*)malloc(dataset_x.rows() * sizeof(int)); // calculate distance between all elements //MatrixXd dist = MatrixXd::Zero(dataset_x.rows(), dataset_x.rows()); TwinMat<double, RowMajor> a; a = dataset_x; cout << a.mat.rows() << " " << a.mat.cols() << " "<< a.obj_size << endl; //a.transfer_h2d(); TwinMat<double, RowMajor> b; MatrixXdR aux = dataset_x.transpose(); b = aux; //b = dataset_x.transpose(); //cout << b.mat.rows() << " " << b.mat.cols() << " "<< b.obj_size << endl; //b.transfer_h2d(); TwinMat<double, RowMajor> dist((int)a.mat.rows(), (int)b.mat.cols()); TwinMat<double, RowMajor> dist2((int)a.mat.rows(), (int)b.mat.cols()); hipEventRecord(start_gpu); // performance evaluation //dist.transfer_h2d(); cout << "Matrices allocated in cpu and gpu" << endl; // calculate distance between all elements a.transfer_h2d(); b.transfer_h2d(); cout << "Calculating distance matrix on GPU" << endl; SquaredDiffFunc<double> dist_cu; MulFunc<double> mul_cu; SumFunc<double> sum_cu; dim3 dim_grid( 1 + ((dist.mat.rows() -1)/BLOCK_SIZE), 1 + ((dist.mat.cols() -1)/BLOCK_SIZE), 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE,1); cout << dim_grid.x << " " << dim_grid.y << " " << BLOCK_SIZE << endl; hipLaunchKernelGGL(( matmul_pattern_cuda<SquaredDiffFunc<double>, SumFunc<double>, double, BLOCK_SIZE>) , dim3(dim_grid), dim3(dim_block), 0, 0, dist.device, a.device, b.device, a.mat.rows(), a.mat.cols(), b.mat.cols(), dist_cu, sum_cu); hipDeviceSynchronize(); dist.transfer_d2h(); hipEventRecord(stop_gpu); // performance evaluation hipEventSynchronize(stop_gpu); // performance evaluation float gpu_time_ms; // performance evaluation hipEventElapsedTime(&gpu_time_ms, start_gpu, stop_gpu); // performance evaluation cout << "time on gpu: " << gpu_time_ms << "[ms] \n"; // performance evaluation // calculate index with minimum distance int min_idx; for(int i=0; i<dist.mat.rows(); i++){ dist.mat(i,i)= 1e10; dist.mat.row(i).minCoeff(&min_idx); predictions[i] = dataset_y(min_idx,0); //cout << predictions[i] << endl; } return predictions; } int* KNN(ArffData* dataset){ int* predictions = (int*)malloc(dataset->num_instances() * sizeof(int)); for(int i = 0; i < dataset->num_instances(); i++){ // for each instance in the dataset float smallestDistance = FLT_MAX; int smallestDistanceClass; for(int j = 0; j < dataset->num_instances(); j++){ // target each other instance if(i == j) continue; float distance = 0; for(int k = 0; k < dataset->num_attributes() - 1; k++){ // compute the distance between the two instances float diff = dataset->get_instance(i)->get(k)->operator float() - dataset->get_instance(j)->get(k)->operator float(); distance += diff * diff; } distance = sqrt(distance); if(distance < smallestDistance){ // select the closest one smallestDistance = distance; smallestDistanceClass = dataset->get_instance(j)->get(dataset->num_attributes() - 1)->operator int32(); } } predictions[i] = smallestDistanceClass; } return predictions; } int* computeConfusionMatrix(int* predictions, ArffData* dataset){ int* confusionMatrix = (int*)calloc(dataset->num_classes() * dataset->num_classes(), sizeof(int)); // matriz size numberClasses x numberClasses for(int i = 0; i < dataset->num_instances(); i++){ // for each instance compare the true class and predicted class int trueClass = dataset->get_instance(i)->get(dataset->num_attributes() - 1)->operator int32(); int predictedClass = predictions[i]; confusionMatrix[trueClass*dataset->num_classes() + predictedClass]++; } return confusionMatrix; } float computeAccuracy(int* confusionMatrix, ArffData* dataset){ int successfulPredictions = 0; for(int i = 0; i < dataset->num_classes(); i++){ successfulPredictions += confusionMatrix[i*dataset->num_classes() + i]; // elements in the diagnoal are correct predictions } return successfulPredictions / (float) dataset->num_instances(); } int main(int argc, char *argv[]){ if(argc < 2 or argc > 3 ){ cout << "Usage: ./main datasets/datasetFile.arff num_threads" << endl; exit(0); } unsigned n_threads = 0; if(argc == 3) n_threads= atoi(argv[2]); ArffParser parser(argv[1]); ArffData *dataset = parser.parse(); struct timespec start, end; cout << "Number of instances: " << dataset->num_instances() << "\n"; cout << "Number of atributes: " << dataset->num_attributes() << "\n\n"; // ----------------------- serial code ------------------------// clock_gettime(CLOCK_MONOTONIC_RAW, &start); int* predictions = KNN(dataset); clock_gettime(CLOCK_MONOTONIC_RAW, &end); int* confusionMatrix = computeConfusionMatrix(predictions, dataset); float accuracy = computeAccuracy(confusionMatrix, dataset); uint64_t diff = (1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec) / 1e6; printf("The 1NN classifier for %lu instances required %llu ms CPU time, accuracy was %.4f\n", dataset->num_instances(), (long long unsigned int) diff, accuracy); //----------------- GPU implementation ------------------------// MatrixXdR dataset_x, dataset_y; cout << "formating dataset ..." << endl; dataset2mat(dataset, dataset_x, dataset_y); cout << "formating Done" << endl; // For performance measure hipEvent_t start_gpu, stop_gpu; hipEventCreate(&start_gpu); hipEventCreate(&stop_gpu); // 3.1. run matrix multiplication on GPU cout << "\n\nRunning on GPU" << endl; hipEventRecord(start_gpu); int* predictions_gpu = gpu_knn( dataset_x, dataset_y ); hipEventRecord(stop_gpu); // evaluate accuracy int* confusionMatrix_gpu = computeConfusionMatrix(predictions_gpu, dataset); float accuracy_gpu = computeAccuracy(confusionMatrix_gpu, dataset); float gpu_time_ms; hipEventElapsedTime(&gpu_time_ms, start_gpu, stop_gpu); //cout << "time on gpu: " << gpu_time_ms << "[ms] \n"; printf("The 1NN classifier for %lu instances required %f ms GPU time, accuracy was %.4f\n", dataset->num_instances(), gpu_time_ms, accuracy_gpu); }
447f9d35ed5816798be4ed537a0249e7b4d3b208.cu
//************************************************************************ // __ __ _ _ _____ _____ // / | / || | | || \ / ___| // / |/ || |__| || _|| | _ // / /| /| || __ || |\ \ | |_| | // /_/ |_ / |_||_| |_||_| \_\|______| // // // Written by: Daniel L. Marino (marinodl@vcu.edu) (2016) // // Copyright (2016) Modern Heuristics Research Group (MHRG) // Virginia Commonwealth University (VCU), Richmond, VA // http://www.people.vcu.edu/~mmanic/ // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // Any opinions, findings, and conclusions or recommendations expressed // in this material are those of the author's(s') and do not necessarily // reflect the views of any other entity. // // *********************************************************************** // // Description: Test implementing 1-nearest neighbor classifier // // *********************************************************************** #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <float.h> #include <math.h> #include <iostream> #include "libarff/arff_parser.h" #include "libarff/arff_data.h" /* Includes, cuda */ #include "twodlearn/common/cuda/eigen_cuda.h" #include "twodlearn/common/cuda/matmul_pattern_cu.h" /* Includes, eigen */ #include "Eigen/Dense" using namespace std; using namespace Eigen; #define BLOCK_SIZE 12 typedef Matrix<double, Dynamic, Dynamic, RowMajor> MatrixXdR; void dataset2mat(ArffData* dataset, MatrixXdR& dataset_x, MatrixXdR& dataset_y){ // allocate memory dataset_x = MatrixXdR::Zero(dataset->num_instances(), dataset->num_attributes()); dataset_y = MatrixXdR::Zero(dataset->num_instances(), 1); // populate matrices for(int i = 0; i < dataset->num_instances(); i++){ for(int j = 0; j < dataset->num_attributes() - 1; j++){ dataset_x(i,j) = (double) dataset->get_instance(i)->get(j)->operator float(); } dataset_y(i,0) = (double) dataset->get_instance(i)->get(dataset->num_attributes()-1)->operator float(); } } int* gpu_knn( MatrixXdR& dataset_x, MatrixXdR& dataset_y){ cudaEvent_t start_gpu, stop_gpu; // performance evaluation cudaEventCreate(&start_gpu); cudaEventCreate(&stop_gpu); // performance evaluation int* predictions = (int*)malloc(dataset_x.rows() * sizeof(int)); // calculate distance between all elements //MatrixXd dist = MatrixXd::Zero(dataset_x.rows(), dataset_x.rows()); TwinMat<double, RowMajor> a; a = dataset_x; cout << a.mat.rows() << " " << a.mat.cols() << " "<< a.obj_size << endl; //a.transfer_h2d(); TwinMat<double, RowMajor> b; MatrixXdR aux = dataset_x.transpose(); b = aux; //b = dataset_x.transpose(); //cout << b.mat.rows() << " " << b.mat.cols() << " "<< b.obj_size << endl; //b.transfer_h2d(); TwinMat<double, RowMajor> dist((int)a.mat.rows(), (int)b.mat.cols()); TwinMat<double, RowMajor> dist2((int)a.mat.rows(), (int)b.mat.cols()); cudaEventRecord(start_gpu); // performance evaluation //dist.transfer_h2d(); cout << "Matrices allocated in cpu and gpu" << endl; // calculate distance between all elements a.transfer_h2d(); b.transfer_h2d(); cout << "Calculating distance matrix on GPU" << endl; SquaredDiffFunc<double> dist_cu; MulFunc<double> mul_cu; SumFunc<double> sum_cu; dim3 dim_grid( 1 + ((dist.mat.rows() -1)/BLOCK_SIZE), 1 + ((dist.mat.cols() -1)/BLOCK_SIZE), 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE,1); cout << dim_grid.x << " " << dim_grid.y << " " << BLOCK_SIZE << endl; matmul_pattern_cuda<SquaredDiffFunc<double>, SumFunc<double>, double, BLOCK_SIZE> <<<dim_grid, dim_block>>>(dist.device, a.device, b.device, a.mat.rows(), a.mat.cols(), b.mat.cols(), dist_cu, sum_cu); cudaDeviceSynchronize(); dist.transfer_d2h(); cudaEventRecord(stop_gpu); // performance evaluation cudaEventSynchronize(stop_gpu); // performance evaluation float gpu_time_ms; // performance evaluation cudaEventElapsedTime(&gpu_time_ms, start_gpu, stop_gpu); // performance evaluation cout << "time on gpu: " << gpu_time_ms << "[ms] \n"; // performance evaluation // calculate index with minimum distance int min_idx; for(int i=0; i<dist.mat.rows(); i++){ dist.mat(i,i)= 1e10; dist.mat.row(i).minCoeff(&min_idx); predictions[i] = dataset_y(min_idx,0); //cout << predictions[i] << endl; } return predictions; } int* KNN(ArffData* dataset){ int* predictions = (int*)malloc(dataset->num_instances() * sizeof(int)); for(int i = 0; i < dataset->num_instances(); i++){ // for each instance in the dataset float smallestDistance = FLT_MAX; int smallestDistanceClass; for(int j = 0; j < dataset->num_instances(); j++){ // target each other instance if(i == j) continue; float distance = 0; for(int k = 0; k < dataset->num_attributes() - 1; k++){ // compute the distance between the two instances float diff = dataset->get_instance(i)->get(k)->operator float() - dataset->get_instance(j)->get(k)->operator float(); distance += diff * diff; } distance = sqrt(distance); if(distance < smallestDistance){ // select the closest one smallestDistance = distance; smallestDistanceClass = dataset->get_instance(j)->get(dataset->num_attributes() - 1)->operator int32(); } } predictions[i] = smallestDistanceClass; } return predictions; } int* computeConfusionMatrix(int* predictions, ArffData* dataset){ int* confusionMatrix = (int*)calloc(dataset->num_classes() * dataset->num_classes(), sizeof(int)); // matriz size numberClasses x numberClasses for(int i = 0; i < dataset->num_instances(); i++){ // for each instance compare the true class and predicted class int trueClass = dataset->get_instance(i)->get(dataset->num_attributes() - 1)->operator int32(); int predictedClass = predictions[i]; confusionMatrix[trueClass*dataset->num_classes() + predictedClass]++; } return confusionMatrix; } float computeAccuracy(int* confusionMatrix, ArffData* dataset){ int successfulPredictions = 0; for(int i = 0; i < dataset->num_classes(); i++){ successfulPredictions += confusionMatrix[i*dataset->num_classes() + i]; // elements in the diagnoal are correct predictions } return successfulPredictions / (float) dataset->num_instances(); } int main(int argc, char *argv[]){ if(argc < 2 or argc > 3 ){ cout << "Usage: ./main datasets/datasetFile.arff num_threads" << endl; exit(0); } unsigned n_threads = 0; if(argc == 3) n_threads= atoi(argv[2]); ArffParser parser(argv[1]); ArffData *dataset = parser.parse(); struct timespec start, end; cout << "Number of instances: " << dataset->num_instances() << "\n"; cout << "Number of atributes: " << dataset->num_attributes() << "\n\n"; // ----------------------- serial code ------------------------// clock_gettime(CLOCK_MONOTONIC_RAW, &start); int* predictions = KNN(dataset); clock_gettime(CLOCK_MONOTONIC_RAW, &end); int* confusionMatrix = computeConfusionMatrix(predictions, dataset); float accuracy = computeAccuracy(confusionMatrix, dataset); uint64_t diff = (1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec) / 1e6; printf("The 1NN classifier for %lu instances required %llu ms CPU time, accuracy was %.4f\n", dataset->num_instances(), (long long unsigned int) diff, accuracy); //----------------- GPU implementation ------------------------// MatrixXdR dataset_x, dataset_y; cout << "formating dataset ..." << endl; dataset2mat(dataset, dataset_x, dataset_y); cout << "formating Done" << endl; // For performance measure cudaEvent_t start_gpu, stop_gpu; cudaEventCreate(&start_gpu); cudaEventCreate(&stop_gpu); // 3.1. run matrix multiplication on GPU cout << "\n\nRunning on GPU" << endl; cudaEventRecord(start_gpu); int* predictions_gpu = gpu_knn( dataset_x, dataset_y ); cudaEventRecord(stop_gpu); // evaluate accuracy int* confusionMatrix_gpu = computeConfusionMatrix(predictions_gpu, dataset); float accuracy_gpu = computeAccuracy(confusionMatrix_gpu, dataset); float gpu_time_ms; cudaEventElapsedTime(&gpu_time_ms, start_gpu, stop_gpu); //cout << "time on gpu: " << gpu_time_ms << "[ms] \n"; printf("The 1NN classifier for %lu instances required %f ms GPU time, accuracy was %.4f\n", dataset->num_instances(), gpu_time_ms, accuracy_gpu); }
d34c88506bcb9bf2d749e0f01d9586261f1ad92b.hip
// !!! This is a file automatically generated by hipify!!! #include "GpuCholeSky.h" #include <hip/hip_runtime.h> #include "cuda_utils.h" #include <exception> namespace gpu_cholesky { enum { TILE_SIZE = 32 }; __device__ int lex_index_2D(int r, int c, int row_length) { return c + r*row_length; } __device__ int global_pos(int t_pos, int block_offset) { return t_pos + TILE_SIZE*block_offset; } // row majored matrix __device__ __forceinline__ float sqr(float a) { return a*a; } template<class T, int N> struct SmallDeviceMat { __device__ SmallDeviceMat(T* d) :A(d){} T* A; __device__ __forceinline__ T& operator()(int r, int c) { return A[r*N + c]; } __device__ __forceinline__ const T& operator()(int r, int c)const { return A[r*N + c]; } }; template<typename T, int N> __global__ void __single_thread_cholesky_batched(T *ptr, int stride, int batchSize) { int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid >= batchSize) return; SmallDeviceMat<T, N> A(ptr + tid*stride); for (unsigned int r = 0; r < N; ++r) { T sum = A(r, r); for (unsigned int u = 0; u < r; ++u) sum -= sqr(A(r, u)); T Lii = sqrt(sum); A(r, r) = Lii; for (unsigned int c = r + 1; c < N; ++c) { sum = A(c, r); for (unsigned int u = 0; u < r; ++u) sum -= A(c, u) * A(r, u); A(c, r) = sum / Lii; } } } template<typename T, int N> __global__ void __single_thread_tril_inv_batched(T *ptr, int stride, int batchSize) { int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid >= batchSize) return; SmallDeviceMat<T, N> A(ptr + tid*stride); for (int i = 0; i < N; ++i) { A(i, i) = 1.f / A(i, i); for (int j = i + 1; j < N; j++) { float sum = 0.f; for (int k = i; k < j; k++) sum -= A(j, k) * A(k, i); A(j, i) = sum / A(j, j); } } } template<typename T, int N> __global__ void __single_thread_LtL_batched(T* outputLLt, int strideLLt, const T *inputL, int strideL, int batchSize) { int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid >= batchSize) return; SmallDeviceMat<T, N> A((T*)inputL + tid*strideL); SmallDeviceMat<T, N> C(outputLLt + tid*strideLLt); for (int y = 0; y < N; ++y) { for (int x = 0; x <= y; ++x) { T sum = 0.f; for (int k = y; k<N; k++) sum += A(k, y) * A(k, x); C(y, x) = C(x, y) = sum; } } } template<typename T> __global__ void __factorize_diagonal_block(T *A, int block_offset, int global_row_length) { int col = threadIdx.x; int row = threadIdx.y; int global_row = global_pos(row, block_offset); int global_col = global_pos(col, block_offset); int idx = lex_index_2D(global_row, global_col, global_row_length); __shared__ T L[TILE_SIZE][TILE_SIZE + 1]; L[row][col] = A[idx]; __syncthreads(); T fac; for (int k = 0; k < TILE_SIZE; k++) { __syncthreads(); fac = rsqrtf(L[k][k]); __syncthreads(); if ((row == k) && (col >= k)) L[col][row] = (L[col][row])*fac; __syncthreads(); if ((row >= col) && (col > k)) L[row][col] -= L[col][k] * L[row][k]; } __syncthreads(); if (row >= col) A[idx] = L[row][col]; } template<typename T> __global__ void __strip_update(T *A, int block_offset, int global_row_length) { int boffy = block_offset; int boffx = blockIdx.x + boffy + 1; int col = threadIdx.x; int row = threadIdx.y; __shared__ T topleft[TILE_SIZE][TILE_SIZE + 1]; __shared__ T workingmat[TILE_SIZE][TILE_SIZE + 1]; int global_row = global_pos(row, block_offset); int global_col = global_pos(col, block_offset); int idx = lex_index_2D(global_row, global_col, global_row_length); topleft[row][col] = A[idx]; global_row = global_pos(row, boffx); int idx_w = lex_index_2D(global_row, global_col, global_row_length); workingmat[col][row] = A[idx_w]; __syncthreads(); if (row == 0) for (int k = 0; k < TILE_SIZE; k++) { T sum = 0.; for (int m = 0; m < k; m++) sum += topleft[k][m] * workingmat[m][col]; workingmat[k][col] = (workingmat[k][col] - sum) / topleft[k][k]; } __syncthreads(); A[idx_w] = workingmat[col][row]; } template<typename T> __global__ void __diag_update(T *A, int block_offset, int global_row_length) { int boffx = blockIdx.x + block_offset + 1; int col = threadIdx.x; int row = threadIdx.y; int global_row = global_pos(row, boffx); int global_col = global_pos(col, block_offset); int idx = lex_index_2D(global_row, global_col, global_row_length); __shared__ T left[TILE_SIZE][TILE_SIZE + 1]; left[row][col] = A[idx]; __syncthreads(); T sum = 0.f; if (row >= col) { for (int kk = 0; kk < TILE_SIZE; kk++) sum += left[row][kk] * left[col][kk]; global_col = global_pos(col, boffx); idx = lex_index_2D(global_row, global_col, global_row_length); A[idx] -= sum; } } template<typename T> __global__ void __lo_update(T *A, int block_offset, int n_blocks, int global_row_length) { int col = threadIdx.x; int row = threadIdx.y; int boffy = blockIdx.y + block_offset + 1; int boffx = boffy + 1; __shared__ T left[TILE_SIZE][TILE_SIZE]; __shared__ T upt[TILE_SIZE][TILE_SIZE + 1]; int global_row = global_pos(row, boffy); int global_col = global_pos(col, block_offset); int idx = lex_index_2D(global_row, global_col, global_row_length); upt[row][col] = A[idx]; for (; boffx < n_blocks; boffx++) { global_row = global_pos(row, boffx); idx = lex_index_2D(global_row, global_col, global_row_length); left[row][col] = A[idx]; __syncthreads(); T matrixprod = 0.f; for (int kk = 0; kk < TILE_SIZE; kk++) matrixprod += left[row][kk] * upt[col][kk]; __syncthreads(); global_col = global_pos(col, boffy); idx = lex_index_2D(global_row, global_col, global_row_length); A[idx] -= matrixprod; } } template<typename T, int N> void _single_thread_cholesky_batched(T *ptr, int stride, int batchSize) { dim3 block(TILE_SIZE); dim3 grid(divUp(batchSize, block.x)); __single_thread_cholesky_batched<T, N> << <grid, block >> >( ptr, stride, batchSize); cudaSafeCall(hipGetLastError(), "__single_thread_cholesky_batched"); } template<typename T, int N> void _single_thread_tril_inv_batched(T *ptr, int stride, int batchSize) { dim3 block(TILE_SIZE); dim3 grid(divUp(batchSize, block.x)); __single_thread_tril_inv_batched<T, N> << <grid, block >> >( ptr, stride, batchSize); cudaSafeCall(hipGetLastError(), "__single_thread_tril_inv_batched"); } template<typename T, int N> void _single_thread_LtL_batched(T* outputLLt, int strideLLt, const T *inputL, int strideL, int batchSize) { dim3 block(TILE_SIZE); dim3 grid(divUp(batchSize, block.x)); __single_thread_LtL_batched<T, N> << <grid, block >> >( outputLLt, strideLLt, inputL, strideL, batchSize); cudaSafeCall(hipGetLastError(), "__single_thread_LtL_batched"); } template<typename T> hipError_t _factorize_diagonal_block(T * a_d, int block_offset, int n_rows_padded) { dim3 threads(TILE_SIZE, TILE_SIZE); __factorize_diagonal_block << <1, threads >> >(a_d, block_offset, n_rows_padded); hipDeviceSynchronize(); return hipGetLastError(); } template<typename T> void _strip_update(T *a_d, int block_offset, int n_remaining_blocks, int n_rows_padded) { hipError_t error; dim3 stripgrid(n_remaining_blocks - 1); dim3 threads(TILE_SIZE, TILE_SIZE); __strip_update << <stripgrid, threads >> >(a_d, block_offset, n_rows_padded); hipDeviceSynchronize(); error = hipGetLastError(); if (error != hipSuccess) { printf(" Error code %d: %s.\n", error, hipGetErrorString(error)); exit(-1); } } template<typename T> void _diag_update(T *a_d, int block_offset, int n_rows_padded, int n_remaining_blocks) { hipError_t error; dim3 stripgrid(n_remaining_blocks - 1); dim3 threads(TILE_SIZE, TILE_SIZE); __diag_update << <stripgrid, threads >> >(a_d, block_offset, n_rows_padded); hipDeviceSynchronize(); error = hipGetLastError(); if (error != hipSuccess) { printf(" Error code %d: %s.\n", error, hipGetErrorString(error)); exit(-1); } } template<typename T> void _lo_update(T *a_d, int block_offset, int n_blocks, int n_rows_padded, int n_remaining_blocks) { hipError_t error; dim3 logrid; logrid.x = 1; logrid.y = n_remaining_blocks - 2; dim3 threads(TILE_SIZE, TILE_SIZE); __lo_update << < logrid, threads >> >(a_d, block_offset, n_blocks, n_rows_padded); hipDeviceSynchronize(); error = hipGetLastError(); if (error != hipSuccess) { printf(" Error code %d: %s.\n", error, hipGetErrorString(error)); exit(-1); } } template<typename T> void _cholesky(T * a_d, int n_rows) { hipError_t error; int n_blocks = (n_rows + int(TILE_SIZE) - 1) / int(TILE_SIZE); int n_rows_padded = n_blocks*TILE_SIZE; dim3 threads(TILE_SIZE, TILE_SIZE); dim3 logrid; for (int i = n_blocks; i > 2; --i) { logrid.x = 1; logrid.y = i - 2; dim3 stripgrid(i - 1); __factorize_diagonal_block << <1, threads >> >(a_d, n_blocks - i, n_rows_padded); hipDeviceSynchronize(); __strip_update << <stripgrid, threads >> >(a_d, n_blocks - i, n_rows_padded); hipDeviceSynchronize(); __diag_update << <stripgrid, threads >> >(a_d, n_blocks - i, n_rows_padded); hipDeviceSynchronize(); __lo_update << < logrid, threads >> >(a_d, n_blocks - i, n_blocks, n_rows_padded); hipDeviceSynchronize(); } if (n_blocks > 1) { __factorize_diagonal_block << <1, threads >> >(a_d, n_blocks - 2, n_rows_padded); hipDeviceSynchronize(); __strip_update << <1, threads >> >(a_d, n_blocks - 2, n_rows_padded); hipDeviceSynchronize(); __diag_update << <1, threads >> >(a_d, n_blocks - 2, n_rows_padded); hipDeviceSynchronize(); } __factorize_diagonal_block << <1, threads >> >(a_d, n_blocks - 1, n_rows_padded); hipDeviceSynchronize(); error = hipGetLastError(); if (error != hipSuccess) { printf(" Error code %d: %s.\n", error, hipGetErrorString(error)); exit(-1); } } hipError_t factorize_diagonal_block(float *A, int block_offset, int global_row_length) { return _factorize_diagonal_block(A, block_offset, global_row_length); } void strip_update(float *A, int block_offset, int n_remaining_blocks, int n_rows_padded) { _strip_update(A, block_offset, n_remaining_blocks, n_rows_padded); } void diag_update(float *A, int block_offset, int global_row_length, int n_remaining_blocks) { _diag_update(A, block_offset, global_row_length, n_remaining_blocks); } void lo_update(float *A, int block_offset, int n_blocks, int global_row_length, int n_remaining_blocks) { _lo_update(A, block_offset, n_blocks, global_row_length, n_remaining_blocks); } void cholesky(float * a_d, int n_rows) { _cholesky(a_d, n_rows); } void single_thread_cholesky_batched(float *ptr, int nMatRowCol, int stride, int batchSize) { switch (nMatRowCol) { case 0: return; case 1: _single_thread_cholesky_batched<float, 1>(ptr, stride, batchSize); break; case 2: _single_thread_cholesky_batched<float, 2>(ptr, stride, batchSize); break; case 3: _single_thread_cholesky_batched<float, 3>(ptr, stride, batchSize); break; case 4: _single_thread_cholesky_batched<float, 4>(ptr, stride, batchSize); break; case 5: _single_thread_cholesky_batched<float, 5>(ptr, stride, batchSize); break; case 6: _single_thread_cholesky_batched<float, 6>(ptr, stride, batchSize); break; case 7: _single_thread_cholesky_batched<float, 7>(ptr, stride, batchSize); break; case 8: _single_thread_cholesky_batched<float, 8>(ptr, stride, batchSize); break; case 9: _single_thread_cholesky_batched<float, 9>(ptr, stride, batchSize); break; default: throw std::exception("error: non-supported size in _single_thread_cholesky_batched"); } } void single_thread_tril_inv_batched(float *ptr, int nMatRowCol, int stride, int batchSize) { switch (nMatRowCol) { case 0: return; case 1: _single_thread_tril_inv_batched<float, 1>(ptr, stride, batchSize); break; case 2: _single_thread_tril_inv_batched<float, 2>(ptr, stride, batchSize); break; case 3: _single_thread_tril_inv_batched<float, 3>(ptr, stride, batchSize); break; case 4: _single_thread_tril_inv_batched<float, 4>(ptr, stride, batchSize); break; case 5: _single_thread_tril_inv_batched<float, 5>(ptr, stride, batchSize); break; case 6: _single_thread_tril_inv_batched<float, 6>(ptr, stride, batchSize); break; case 7: _single_thread_tril_inv_batched<float, 7>(ptr, stride, batchSize); break; case 8: _single_thread_tril_inv_batched<float, 8>(ptr, stride, batchSize); break; case 9: _single_thread_tril_inv_batched<float, 9>(ptr, stride, batchSize); break; default: throw std::exception("error: non-supported size in single_thread_tril_inv_batched"); } } void single_thread_LtL_batched(float* outputLLt, int strideLLt, const float *inputL, int strideL, int nMatRowCol, int batchSize) { switch (nMatRowCol) { case 0: return; case 1: _single_thread_LtL_batched<float, 1>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 2: _single_thread_LtL_batched<float, 2>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 3: _single_thread_LtL_batched<float, 3>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 4: _single_thread_LtL_batched<float, 4>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 5: _single_thread_LtL_batched<float, 5>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 6: _single_thread_LtL_batched<float, 6>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 7: _single_thread_LtL_batched<float, 7>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 8: _single_thread_LtL_batched<float, 8>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 9: _single_thread_LtL_batched<float, 9>(outputLLt, strideLLt, inputL, strideL, batchSize); break; default: throw std::exception("error: non-supported size in single_thread_LtL_batched"); } } }
d34c88506bcb9bf2d749e0f01d9586261f1ad92b.cu
#include "GpuCholeSky.h" #include <cuda.h> #include "cuda_utils.h" #include <exception> namespace gpu_cholesky { enum { TILE_SIZE = 32 }; __device__ int lex_index_2D(int r, int c, int row_length) { return c + r*row_length; } __device__ int global_pos(int t_pos, int block_offset) { return t_pos + TILE_SIZE*block_offset; } // row majored matrix __device__ __forceinline__ float sqr(float a) { return a*a; } template<class T, int N> struct SmallDeviceMat { __device__ SmallDeviceMat(T* d) :A(d){} T* A; __device__ __forceinline__ T& operator()(int r, int c) { return A[r*N + c]; } __device__ __forceinline__ const T& operator()(int r, int c)const { return A[r*N + c]; } }; template<typename T, int N> __global__ void __single_thread_cholesky_batched(T *ptr, int stride, int batchSize) { int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid >= batchSize) return; SmallDeviceMat<T, N> A(ptr + tid*stride); for (unsigned int r = 0; r < N; ++r) { T sum = A(r, r); for (unsigned int u = 0; u < r; ++u) sum -= sqr(A(r, u)); T Lii = sqrt(sum); A(r, r) = Lii; for (unsigned int c = r + 1; c < N; ++c) { sum = A(c, r); for (unsigned int u = 0; u < r; ++u) sum -= A(c, u) * A(r, u); A(c, r) = sum / Lii; } } } template<typename T, int N> __global__ void __single_thread_tril_inv_batched(T *ptr, int stride, int batchSize) { int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid >= batchSize) return; SmallDeviceMat<T, N> A(ptr + tid*stride); for (int i = 0; i < N; ++i) { A(i, i) = 1.f / A(i, i); for (int j = i + 1; j < N; j++) { float sum = 0.f; for (int k = i; k < j; k++) sum -= A(j, k) * A(k, i); A(j, i) = sum / A(j, j); } } } template<typename T, int N> __global__ void __single_thread_LtL_batched(T* outputLLt, int strideLLt, const T *inputL, int strideL, int batchSize) { int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid >= batchSize) return; SmallDeviceMat<T, N> A((T*)inputL + tid*strideL); SmallDeviceMat<T, N> C(outputLLt + tid*strideLLt); for (int y = 0; y < N; ++y) { for (int x = 0; x <= y; ++x) { T sum = 0.f; for (int k = y; k<N; k++) sum += A(k, y) * A(k, x); C(y, x) = C(x, y) = sum; } } } template<typename T> __global__ void __factorize_diagonal_block(T *A, int block_offset, int global_row_length) { int col = threadIdx.x; int row = threadIdx.y; int global_row = global_pos(row, block_offset); int global_col = global_pos(col, block_offset); int idx = lex_index_2D(global_row, global_col, global_row_length); __shared__ T L[TILE_SIZE][TILE_SIZE + 1]; L[row][col] = A[idx]; __syncthreads(); T fac; for (int k = 0; k < TILE_SIZE; k++) { __syncthreads(); fac = rsqrtf(L[k][k]); __syncthreads(); if ((row == k) && (col >= k)) L[col][row] = (L[col][row])*fac; __syncthreads(); if ((row >= col) && (col > k)) L[row][col] -= L[col][k] * L[row][k]; } __syncthreads(); if (row >= col) A[idx] = L[row][col]; } template<typename T> __global__ void __strip_update(T *A, int block_offset, int global_row_length) { int boffy = block_offset; int boffx = blockIdx.x + boffy + 1; int col = threadIdx.x; int row = threadIdx.y; __shared__ T topleft[TILE_SIZE][TILE_SIZE + 1]; __shared__ T workingmat[TILE_SIZE][TILE_SIZE + 1]; int global_row = global_pos(row, block_offset); int global_col = global_pos(col, block_offset); int idx = lex_index_2D(global_row, global_col, global_row_length); topleft[row][col] = A[idx]; global_row = global_pos(row, boffx); int idx_w = lex_index_2D(global_row, global_col, global_row_length); workingmat[col][row] = A[idx_w]; __syncthreads(); if (row == 0) for (int k = 0; k < TILE_SIZE; k++) { T sum = 0.; for (int m = 0; m < k; m++) sum += topleft[k][m] * workingmat[m][col]; workingmat[k][col] = (workingmat[k][col] - sum) / topleft[k][k]; } __syncthreads(); A[idx_w] = workingmat[col][row]; } template<typename T> __global__ void __diag_update(T *A, int block_offset, int global_row_length) { int boffx = blockIdx.x + block_offset + 1; int col = threadIdx.x; int row = threadIdx.y; int global_row = global_pos(row, boffx); int global_col = global_pos(col, block_offset); int idx = lex_index_2D(global_row, global_col, global_row_length); __shared__ T left[TILE_SIZE][TILE_SIZE + 1]; left[row][col] = A[idx]; __syncthreads(); T sum = 0.f; if (row >= col) { for (int kk = 0; kk < TILE_SIZE; kk++) sum += left[row][kk] * left[col][kk]; global_col = global_pos(col, boffx); idx = lex_index_2D(global_row, global_col, global_row_length); A[idx] -= sum; } } template<typename T> __global__ void __lo_update(T *A, int block_offset, int n_blocks, int global_row_length) { int col = threadIdx.x; int row = threadIdx.y; int boffy = blockIdx.y + block_offset + 1; int boffx = boffy + 1; __shared__ T left[TILE_SIZE][TILE_SIZE]; __shared__ T upt[TILE_SIZE][TILE_SIZE + 1]; int global_row = global_pos(row, boffy); int global_col = global_pos(col, block_offset); int idx = lex_index_2D(global_row, global_col, global_row_length); upt[row][col] = A[idx]; for (; boffx < n_blocks; boffx++) { global_row = global_pos(row, boffx); idx = lex_index_2D(global_row, global_col, global_row_length); left[row][col] = A[idx]; __syncthreads(); T matrixprod = 0.f; for (int kk = 0; kk < TILE_SIZE; kk++) matrixprod += left[row][kk] * upt[col][kk]; __syncthreads(); global_col = global_pos(col, boffy); idx = lex_index_2D(global_row, global_col, global_row_length); A[idx] -= matrixprod; } } template<typename T, int N> void _single_thread_cholesky_batched(T *ptr, int stride, int batchSize) { dim3 block(TILE_SIZE); dim3 grid(divUp(batchSize, block.x)); __single_thread_cholesky_batched<T, N> << <grid, block >> >( ptr, stride, batchSize); cudaSafeCall(cudaGetLastError(), "__single_thread_cholesky_batched"); } template<typename T, int N> void _single_thread_tril_inv_batched(T *ptr, int stride, int batchSize) { dim3 block(TILE_SIZE); dim3 grid(divUp(batchSize, block.x)); __single_thread_tril_inv_batched<T, N> << <grid, block >> >( ptr, stride, batchSize); cudaSafeCall(cudaGetLastError(), "__single_thread_tril_inv_batched"); } template<typename T, int N> void _single_thread_LtL_batched(T* outputLLt, int strideLLt, const T *inputL, int strideL, int batchSize) { dim3 block(TILE_SIZE); dim3 grid(divUp(batchSize, block.x)); __single_thread_LtL_batched<T, N> << <grid, block >> >( outputLLt, strideLLt, inputL, strideL, batchSize); cudaSafeCall(cudaGetLastError(), "__single_thread_LtL_batched"); } template<typename T> cudaError_t _factorize_diagonal_block(T * a_d, int block_offset, int n_rows_padded) { dim3 threads(TILE_SIZE, TILE_SIZE); __factorize_diagonal_block << <1, threads >> >(a_d, block_offset, n_rows_padded); cudaThreadSynchronize(); return cudaGetLastError(); } template<typename T> void _strip_update(T *a_d, int block_offset, int n_remaining_blocks, int n_rows_padded) { cudaError_t error; dim3 stripgrid(n_remaining_blocks - 1); dim3 threads(TILE_SIZE, TILE_SIZE); __strip_update << <stripgrid, threads >> >(a_d, block_offset, n_rows_padded); cudaThreadSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf(" Error code %d: %s.\n", error, cudaGetErrorString(error)); exit(-1); } } template<typename T> void _diag_update(T *a_d, int block_offset, int n_rows_padded, int n_remaining_blocks) { cudaError_t error; dim3 stripgrid(n_remaining_blocks - 1); dim3 threads(TILE_SIZE, TILE_SIZE); __diag_update << <stripgrid, threads >> >(a_d, block_offset, n_rows_padded); cudaThreadSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf(" Error code %d: %s.\n", error, cudaGetErrorString(error)); exit(-1); } } template<typename T> void _lo_update(T *a_d, int block_offset, int n_blocks, int n_rows_padded, int n_remaining_blocks) { cudaError_t error; dim3 logrid; logrid.x = 1; logrid.y = n_remaining_blocks - 2; dim3 threads(TILE_SIZE, TILE_SIZE); __lo_update << < logrid, threads >> >(a_d, block_offset, n_blocks, n_rows_padded); cudaThreadSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf(" Error code %d: %s.\n", error, cudaGetErrorString(error)); exit(-1); } } template<typename T> void _cholesky(T * a_d, int n_rows) { cudaError_t error; int n_blocks = (n_rows + int(TILE_SIZE) - 1) / int(TILE_SIZE); int n_rows_padded = n_blocks*TILE_SIZE; dim3 threads(TILE_SIZE, TILE_SIZE); dim3 logrid; for (int i = n_blocks; i > 2; --i) { logrid.x = 1; logrid.y = i - 2; dim3 stripgrid(i - 1); __factorize_diagonal_block << <1, threads >> >(a_d, n_blocks - i, n_rows_padded); cudaThreadSynchronize(); __strip_update << <stripgrid, threads >> >(a_d, n_blocks - i, n_rows_padded); cudaThreadSynchronize(); __diag_update << <stripgrid, threads >> >(a_d, n_blocks - i, n_rows_padded); cudaThreadSynchronize(); __lo_update << < logrid, threads >> >(a_d, n_blocks - i, n_blocks, n_rows_padded); cudaThreadSynchronize(); } if (n_blocks > 1) { __factorize_diagonal_block << <1, threads >> >(a_d, n_blocks - 2, n_rows_padded); cudaThreadSynchronize(); __strip_update << <1, threads >> >(a_d, n_blocks - 2, n_rows_padded); cudaThreadSynchronize(); __diag_update << <1, threads >> >(a_d, n_blocks - 2, n_rows_padded); cudaThreadSynchronize(); } __factorize_diagonal_block << <1, threads >> >(a_d, n_blocks - 1, n_rows_padded); cudaThreadSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf(" Error code %d: %s.\n", error, cudaGetErrorString(error)); exit(-1); } } cudaError_t factorize_diagonal_block(float *A, int block_offset, int global_row_length) { return _factorize_diagonal_block(A, block_offset, global_row_length); } void strip_update(float *A, int block_offset, int n_remaining_blocks, int n_rows_padded) { _strip_update(A, block_offset, n_remaining_blocks, n_rows_padded); } void diag_update(float *A, int block_offset, int global_row_length, int n_remaining_blocks) { _diag_update(A, block_offset, global_row_length, n_remaining_blocks); } void lo_update(float *A, int block_offset, int n_blocks, int global_row_length, int n_remaining_blocks) { _lo_update(A, block_offset, n_blocks, global_row_length, n_remaining_blocks); } void cholesky(float * a_d, int n_rows) { _cholesky(a_d, n_rows); } void single_thread_cholesky_batched(float *ptr, int nMatRowCol, int stride, int batchSize) { switch (nMatRowCol) { case 0: return; case 1: _single_thread_cholesky_batched<float, 1>(ptr, stride, batchSize); break; case 2: _single_thread_cholesky_batched<float, 2>(ptr, stride, batchSize); break; case 3: _single_thread_cholesky_batched<float, 3>(ptr, stride, batchSize); break; case 4: _single_thread_cholesky_batched<float, 4>(ptr, stride, batchSize); break; case 5: _single_thread_cholesky_batched<float, 5>(ptr, stride, batchSize); break; case 6: _single_thread_cholesky_batched<float, 6>(ptr, stride, batchSize); break; case 7: _single_thread_cholesky_batched<float, 7>(ptr, stride, batchSize); break; case 8: _single_thread_cholesky_batched<float, 8>(ptr, stride, batchSize); break; case 9: _single_thread_cholesky_batched<float, 9>(ptr, stride, batchSize); break; default: throw std::exception("error: non-supported size in _single_thread_cholesky_batched"); } } void single_thread_tril_inv_batched(float *ptr, int nMatRowCol, int stride, int batchSize) { switch (nMatRowCol) { case 0: return; case 1: _single_thread_tril_inv_batched<float, 1>(ptr, stride, batchSize); break; case 2: _single_thread_tril_inv_batched<float, 2>(ptr, stride, batchSize); break; case 3: _single_thread_tril_inv_batched<float, 3>(ptr, stride, batchSize); break; case 4: _single_thread_tril_inv_batched<float, 4>(ptr, stride, batchSize); break; case 5: _single_thread_tril_inv_batched<float, 5>(ptr, stride, batchSize); break; case 6: _single_thread_tril_inv_batched<float, 6>(ptr, stride, batchSize); break; case 7: _single_thread_tril_inv_batched<float, 7>(ptr, stride, batchSize); break; case 8: _single_thread_tril_inv_batched<float, 8>(ptr, stride, batchSize); break; case 9: _single_thread_tril_inv_batched<float, 9>(ptr, stride, batchSize); break; default: throw std::exception("error: non-supported size in single_thread_tril_inv_batched"); } } void single_thread_LtL_batched(float* outputLLt, int strideLLt, const float *inputL, int strideL, int nMatRowCol, int batchSize) { switch (nMatRowCol) { case 0: return; case 1: _single_thread_LtL_batched<float, 1>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 2: _single_thread_LtL_batched<float, 2>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 3: _single_thread_LtL_batched<float, 3>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 4: _single_thread_LtL_batched<float, 4>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 5: _single_thread_LtL_batched<float, 5>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 6: _single_thread_LtL_batched<float, 6>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 7: _single_thread_LtL_batched<float, 7>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 8: _single_thread_LtL_batched<float, 8>(outputLLt, strideLLt, inputL, strideL, batchSize); break; case 9: _single_thread_LtL_batched<float, 9>(outputLLt, strideLLt, inputL, strideL, batchSize); break; default: throw std::exception("error: non-supported size in single_thread_LtL_batched"); } } }
99d0c8ed2243e36061fca0f3f0a636c0be5a8742.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void cudaMultVectorsKernel(int N, float *x, float *y, float *z) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { z[idx] = x[idx] * y[idx]; } // idx = idx + blockDim.x * gridDim.x; // we will discuss this later... } // extern "C" is necessary because nvcc uses c++ compiler to compile cuda code // hence applies name mangling. Because we use gcc for linking, we should // prevent name mangling. extern "C" void runKernel(int N, float *x, float *y, float *z) { hipLaunchKernelGGL(( cudaMultVectorsKernel), dim3((N+511)/512), dim3(512), 0, 0, N, x, y, z); }
99d0c8ed2243e36061fca0f3f0a636c0be5a8742.cu
#include <stdio.h> #include <stdlib.h> __global__ void cudaMultVectorsKernel(int N, float *x, float *y, float *z) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { z[idx] = x[idx] * y[idx]; } // idx = idx + blockDim.x * gridDim.x; // we will discuss this later... } // extern "C" is necessary because nvcc uses c++ compiler to compile cuda code // hence applies name mangling. Because we use gcc for linking, we should // prevent name mangling. extern "C" void runKernel(int N, float *x, float *y, float *z) { cudaMultVectorsKernel<<<(N+511)/512, 512>>>(N, x, y, z); }
b7e1733339a6424f9fb3f0a80e2f8adb5fb3bd22.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_pr.cu * * @brief Simple test driver program for PageRank. */ #include <gunrock/app/pr/pr_app.cu> #include <gunrock/app/test_base.cuh> using namespace gunrock; /****************************************************************************** * Main ******************************************************************************/ /** * @brief Enclosure to the main function */ struct main_struct { /** * @brief the actual main function, after type switching * @tparam VertexT Type of vertex identifier * @tparam SizeT Type of graph size, i.e. type of edge identifier * @tparam ValueT Type of edge values * @param parameters Command line parameters * @param v,s,val Place holders for type deduction * \return hipError_t error message(s), if any */ template <typename VertexT, // Use int as the vertex identifier typename SizeT, // Use int as the graph size type typename ValueT> // Use int as the value type hipError_t operator()(util::Parameters &parameters, VertexT v, SizeT s, ValueT val) { typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_COO | graph::HAS_CSC> GraphT; // typedef typename GraphT::CooT CooT; hipError_t retval = hipSuccess; bool quick = parameters.Get<bool>("quick"); bool quiet = parameters.Get<bool>("quiet"); std::string validation = parameters.Get<std::string>("validation"); util::CpuTimer cpu_timer; GraphT graph; // graph we process on // require undirected input graph when unnormalized if (!parameters.Get<bool>("normalize")) { util::PrintMsg( "Directed graph is only supported by normalized PR," " Changing graph type to undirected.", quiet); parameters.Set("undirected", true); } cpu_timer.Start(); GUARD_CU(graphio::LoadGraph(parameters, graph)); cpu_timer.Stop(); parameters.Set("load-time", cpu_timer.ElapsedMillis()); std::vector<bool> compensate_vec = parameters.Get<std::vector<bool>>("compensate"); for (auto it = compensate_vec.begin(); it != compensate_vec.end(); it++) { bool compensate = *it; if (compensate) { GUARD_CU(gunrock::app::pr::Compensate_ZeroDegrees(graph, quiet)); } GUARD_CU(parameters.Set("compensate", compensate)); std::vector<std::string> switches{"normalize", "delta", "threshold", "max-iter", "pull"}; GUARD_CU(app::Switch_Parameters( parameters, graph, switches, [quick, quiet, validation](util::Parameters &parameters, GraphT &graph) { hipError_t retval = hipSuccess; GUARD_CU(app::Set_Srcs(parameters, graph)); ValueT **ref_ranks = NULL; VertexT **ref_vertices = NULL; int num_srcs = 0; if (quick && (parameters.UseDefault("validation") == false && validation != "none")) { util::PrintMsg("Invalid options --quick and --validation=" + validation + ", no CPU reference result to validate"); return retval; } // compute reference CPU SSSP solution for source-distance if (!quick) { util::PrintMsg("Computing reference value ...", !quiet); std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs"); num_srcs = srcs.size(); SizeT nodes = graph.nodes; ref_ranks = (ValueT **)malloc(sizeof(ValueT *) * num_srcs); ref_vertices = (VertexT **)malloc(sizeof(VertexT *) * num_srcs); for (int i = 0; i < num_srcs; i++) { ref_ranks[i] = (ValueT *)malloc(sizeof(ValueT) * nodes); ref_vertices[i] = (VertexT *)malloc(sizeof(VertexT) * nodes); VertexT src = srcs[i]; util::PrintMsg("__________________________", !quiet); float elapsed = app::pr::CPU_Reference( parameters, graph, src, ref_vertices[i], ref_ranks[i]); util::PrintMsg("--------------------------\nRun " + std::to_string(i) + " elapsed: " + std::to_string(elapsed) + " ms, src = " + std::to_string(src), !quiet); } } std::vector<std::string> switches2{"scale", "advance-mode"}; GUARD_CU(app::Switch_Parameters( parameters, graph, switches2, [ref_ranks, ref_vertices](util::Parameters &parameters, GraphT &graph) { return app::pr::RunTests(parameters, graph, ref_vertices, ref_ranks); })); if (!quick) { for (int i = 0; i < num_srcs; i++) { free(ref_ranks[i]); ref_ranks[i] = NULL; free(ref_vertices[i]); ref_vertices[i] = NULL; } free(ref_ranks); ref_ranks = NULL; free(ref_vertices); ref_vertices = NULL; } return retval; })); } GUARD_CU(parameters.Set("compensate", compensate_vec)); return retval; } }; int main(int argc, char **argv) { hipError_t retval = hipSuccess; util::Parameters parameters("test pr"); GUARD_CU(graphio::UseParameters(parameters)); GUARD_CU(app::pr::UseParameters(parameters)); GUARD_CU(app::UseParameters_test(parameters)); GUARD_CU(parameters.Parse_CommandLine(argc, argv)); if (parameters.Get<bool>("help")) { parameters.Print_Help(); return hipSuccess; } GUARD_CU(parameters.Check_Required()); return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B | app::SIZET_U32B | // app::SIZET_U64B | app::VALUET_F64B | // app::VALUET_F64B | app::DIRECTED | app::UNDIRECTED>(parameters, main_struct()); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
b7e1733339a6424f9fb3f0a80e2f8adb5fb3bd22.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_pr.cu * * @brief Simple test driver program for PageRank. */ #include <gunrock/app/pr/pr_app.cu> #include <gunrock/app/test_base.cuh> using namespace gunrock; /****************************************************************************** * Main ******************************************************************************/ /** * @brief Enclosure to the main function */ struct main_struct { /** * @brief the actual main function, after type switching * @tparam VertexT Type of vertex identifier * @tparam SizeT Type of graph size, i.e. type of edge identifier * @tparam ValueT Type of edge values * @param parameters Command line parameters * @param v,s,val Place holders for type deduction * \return cudaError_t error message(s), if any */ template <typename VertexT, // Use int as the vertex identifier typename SizeT, // Use int as the graph size type typename ValueT> // Use int as the value type cudaError_t operator()(util::Parameters &parameters, VertexT v, SizeT s, ValueT val) { typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_COO | graph::HAS_CSC> GraphT; // typedef typename GraphT::CooT CooT; cudaError_t retval = cudaSuccess; bool quick = parameters.Get<bool>("quick"); bool quiet = parameters.Get<bool>("quiet"); std::string validation = parameters.Get<std::string>("validation"); util::CpuTimer cpu_timer; GraphT graph; // graph we process on // require undirected input graph when unnormalized if (!parameters.Get<bool>("normalize")) { util::PrintMsg( "Directed graph is only supported by normalized PR," " Changing graph type to undirected.", quiet); parameters.Set("undirected", true); } cpu_timer.Start(); GUARD_CU(graphio::LoadGraph(parameters, graph)); cpu_timer.Stop(); parameters.Set("load-time", cpu_timer.ElapsedMillis()); std::vector<bool> compensate_vec = parameters.Get<std::vector<bool>>("compensate"); for (auto it = compensate_vec.begin(); it != compensate_vec.end(); it++) { bool compensate = *it; if (compensate) { GUARD_CU(gunrock::app::pr::Compensate_ZeroDegrees(graph, quiet)); } GUARD_CU(parameters.Set("compensate", compensate)); std::vector<std::string> switches{"normalize", "delta", "threshold", "max-iter", "pull"}; GUARD_CU(app::Switch_Parameters( parameters, graph, switches, [quick, quiet, validation](util::Parameters &parameters, GraphT &graph) { cudaError_t retval = cudaSuccess; GUARD_CU(app::Set_Srcs(parameters, graph)); ValueT **ref_ranks = NULL; VertexT **ref_vertices = NULL; int num_srcs = 0; if (quick && (parameters.UseDefault("validation") == false && validation != "none")) { util::PrintMsg("Invalid options --quick and --validation=" + validation + ", no CPU reference result to validate"); return retval; } // compute reference CPU SSSP solution for source-distance if (!quick) { util::PrintMsg("Computing reference value ...", !quiet); std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs"); num_srcs = srcs.size(); SizeT nodes = graph.nodes; ref_ranks = (ValueT **)malloc(sizeof(ValueT *) * num_srcs); ref_vertices = (VertexT **)malloc(sizeof(VertexT *) * num_srcs); for (int i = 0; i < num_srcs; i++) { ref_ranks[i] = (ValueT *)malloc(sizeof(ValueT) * nodes); ref_vertices[i] = (VertexT *)malloc(sizeof(VertexT) * nodes); VertexT src = srcs[i]; util::PrintMsg("__________________________", !quiet); float elapsed = app::pr::CPU_Reference( parameters, graph, src, ref_vertices[i], ref_ranks[i]); util::PrintMsg("--------------------------\nRun " + std::to_string(i) + " elapsed: " + std::to_string(elapsed) + " ms, src = " + std::to_string(src), !quiet); } } std::vector<std::string> switches2{"scale", "advance-mode"}; GUARD_CU(app::Switch_Parameters( parameters, graph, switches2, [ref_ranks, ref_vertices](util::Parameters &parameters, GraphT &graph) { return app::pr::RunTests(parameters, graph, ref_vertices, ref_ranks); })); if (!quick) { for (int i = 0; i < num_srcs; i++) { free(ref_ranks[i]); ref_ranks[i] = NULL; free(ref_vertices[i]); ref_vertices[i] = NULL; } free(ref_ranks); ref_ranks = NULL; free(ref_vertices); ref_vertices = NULL; } return retval; })); } GUARD_CU(parameters.Set("compensate", compensate_vec)); return retval; } }; int main(int argc, char **argv) { cudaError_t retval = cudaSuccess; util::Parameters parameters("test pr"); GUARD_CU(graphio::UseParameters(parameters)); GUARD_CU(app::pr::UseParameters(parameters)); GUARD_CU(app::UseParameters_test(parameters)); GUARD_CU(parameters.Parse_CommandLine(argc, argv)); if (parameters.Get<bool>("help")) { parameters.Print_Help(); return cudaSuccess; } GUARD_CU(parameters.Check_Required()); return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B | app::SIZET_U32B | // app::SIZET_U64B | app::VALUET_F64B | // app::VALUET_F64B | app::DIRECTED | app::UNDIRECTED>(parameters, main_struct()); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
4b5e02b31c175f63363aa3e9df9e38f7d7efa416.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void blur( float * input, float * output, int height, int width) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x<height && y<width) { for(int k=0;k<3;k++) { float sum=0; int count=0; for(int i=x-BLUR_SIZE; i<= x+BLUR_SIZE; i++) { for(int j= y-BLUR_SIZE; j<=y+BLUR_SIZE;j++) { if(i>=0 && i<height && j>=0 && j<width) { count++; sum+=input[3*(i*width+j)+k]; } } } output[3*(x*width+y)+k]=sum/count; } } else return ; }
4b5e02b31c175f63363aa3e9df9e38f7d7efa416.cu
#include "includes.h" __global__ void blur( float * input, float * output, int height, int width) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x<height && y<width) { for(int k=0;k<3;k++) { float sum=0; int count=0; for(int i=x-BLUR_SIZE; i<= x+BLUR_SIZE; i++) { for(int j= y-BLUR_SIZE; j<=y+BLUR_SIZE;j++) { if(i>=0 && i<height && j>=0 && j<width) { count++; sum+=input[3*(i*width+j)+k]; } } } output[3*(x*width+y)+k]=sum/count; } } else return ; }
c3aed083e13d5dd4afd109e960050397fe9ac4da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <lbann-dev@llnl.gov> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_SOFTMAX_LAYER_INSTANTIATE #include "lbann/layers/activations/softmax.hpp" #include "lbann/utils/gpu/helpers.hpp" #ifdef LBANN_HAS_DNN_LIB #include "lbann/utils/dnn_lib/softmax.hpp" #endif // LBANN_HAS_DNN_LIB namespace lbann { namespace { #ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD /** Functor to ensure values are above threshold value */ template <typename TensorDataType> struct threshold_op { __forceinline__ __device__ TensorDataType operator()(const TensorDataType& y) const { return gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>())); } }; #endif // LBANN_ENABLE_SOFTMAX_THRESHOLD /** @brief Max functor */ template <class T> struct max_op { __device__ __forceinline__ DataType operator()(const T& x1, const T& x2) const { return gpu_lib::max(x1, x2); } }; /** @brief Kernel for max reduction on matrix columns * * Each CUDA block computes the max over a subset of matrix entries * and outputs the result. This is repeated multiple times for * column-wise max reduction. * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param values (height x width) matrix * @param max_values (nblocksx x width) matrix */ template <size_t bsize, typename TensorDataType> __global__ void reduce_max_kernel(size_t height, size_t width, const TensorDataType* __restrict__ values, size_t values_ldim, TensorDataType* __restrict__ max_values) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidx = blockIdx.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksx = gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { // Find largest value for each thread TensorDataType thread_max_val{-gpu_lib::infinity<TensorDataType>()}; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& val = values[row+col*values_ldim]; thread_max_val = gpu_lib::max(thread_max_val, val); } // Find largest value for each block const TensorDataType block_max_val = gpu_lib::block_reduce<bsize,1,1,DataType,max_op<DataType>>(thread_max_val); if (tid == 0) { max_values[bidx+col*nblocksx] = block_max_val; } } } /** @brief Compute exp(x-shift) * * Also compute sum(exp(x-shift)) for each matrix column. * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 */ template <size_t bsize, typename TensorDataType> __global__ void fp_exp_kernel(size_t height, size_t width, const TensorDataType* __restrict__ input, size_t input_ldim, TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ shifts, TensorDataType* __restrict__ sums) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { const auto& shift = shifts[col]; // Exponentiate inputs and compute sum for each thread TensorDataType thread_sum{0}; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& x = input[row+col*input_ldim]; auto& y = output[row+col*output_ldim]; y = gpu_lib::exp(x-shift); thread_sum += y; } // Compute sum for each block const TensorDataType block_sum = gpu_lib::block_reduce<bsize,1,1>(thread_sum); if (tid == 0) { gpu_lib::atomic_add(&sums[col], block_sum); } } } /** @brief Compute layer output * * y = exp(x-shift) / sum(exp(x-shift)) * * If @c LBANN_ENABLE_SOFTMAX_THRESHOLD is set, small values are * thresholded to a minimum value to avoid denormalized floats. * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param output On input, constains exp(x-shift). On output, * contains the layer output. * @param sums sum(exp(x-shift)) for each column */ template <typename TensorDataType> __global__ void fp_output_kernel(size_t height, size_t width, TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ sums) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t col = gidy; col < width; col += nthreadsy) { const auto& denom = sums[col]; for (size_t row = gidx; row < height; row += nthreadsx) { auto& y = output[row+col*output_ldim]; y /= denom; #ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD y = gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>())); #endif // LBANN_ENABLE_SOFTMAX_THRESHOLD } } } /** @brief Compute dot(y,dy) for each matrix column * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 */ template <size_t bsize, typename TensorDataType> __global__ void bp_dot_product_kernel( size_t height, size_t width, const TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, TensorDataType* __restrict__ dot_products) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { // Compute dot product contribution for each thread TensorDataType thread_dot_product{0}; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& y = output[row+col*output_ldim]; const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim]; thread_dot_product += y * dy; } // Compute dot product contribution for each block const TensorDataType block_dot_product = gpu_lib::block_reduce<bsize,1,1>(thread_dot_product); if (tid == 0) { gpu_lib::atomic_add(&dot_products[col], block_dot_product); } } } /** @brief Compute gradient w.r.t. input * * dx = y * (dy - dot(y,dy)) * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param dot_products dot(y,dy) for each matrix column */ template <size_t bsize, typename TensorDataType> __global__ void bp_kernel(size_t height, size_t width, const TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, const TensorDataType* __restrict__ dot_products, TensorDataType* __restrict__ gradient_wrt_input, size_t gradient_wrt_input_ldim) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t col = gidy; col < width; col += nthreadsy) { const auto& y_dot_dy = dot_products[col]; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& y = output[row+col*output_ldim]; const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim]; auto& dx = gradient_wrt_input[row+col*gradient_wrt_input_ldim]; dx = y * (dy - y_dot_dy); } } } #ifdef LBANN_HAS_DISTCONV template <typename TensorDataType, data_layout Layout, El::Device Device> void fp_compute_distconv(softmax_distconv_adapter<TensorDataType, Layout, Device> &dc) { dc.m_softmax->forward(dc.get_prev_activations(), dc.get_activations()); } template <typename TensorDataType, data_layout Layout, El::Device Device> void bp_compute_distconv(softmax_distconv_adapter<TensorDataType, Layout, Device> &dc) { dc.m_softmax->backward(dc.get_activations(), dc.get_prev_error_signals(), dc.get_error_signals()); } #endif // LBANN_HAS_DISTCONV } // namespace template <typename TensorDataType> void fp_compute_impl(softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) { #ifdef LBANN_HAS_DISTCONV if (l.distconv_enabled()) { fp_compute_distconv(l.get_distconv_adapter()); return; } #endif // LBANN_HAS_DISTCONV const dnn_lib::ScalingParamType<TensorDataType> zero = 0.; const dnn_lib::ScalingParamType<TensorDataType> one = 1.; const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations()); auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations()); if (!local_input.IsEmpty()) { dnn_lib::softmax_forward(one, l.m_tensors_dnn_desc.get_prev_activations(), local_input, zero, l.m_tensors_dnn_desc.get_activations(), local_output, l.m_mode); #ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD gpu_lib::apply_entrywise_unary_operator<threshold_op>(local_output, local_output); #endif // LBANN_ENABLE_SOFTMAX_THRESHOLD } } template <typename TensorDataType> void bp_compute_impl(softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) { #ifdef LBANN_HAS_DISTCONV if (l.distconv_enabled()) { bp_compute_distconv(l.get_distconv_adapter()); return; } #endif // LBANN_HAS_DISTCONV const dnn_lib::ScalingParamType<TensorDataType> zero = 0.; const dnn_lib::ScalingParamType<TensorDataType> one = 1.; const auto& local_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations()); const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_error_signals()); auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_error_signals()); dnn_lib::softmax_backward(one, l.m_tensors_dnn_desc.get_activations(), local_output, l.m_tensors_dnn_desc.get_prev_error_signals(), local_gradient_wrt_output, zero, l.m_tensors_dnn_desc.get_error_signals(), local_gradient_wrt_input, l.m_mode); } template <typename TensorDataType> void fp_compute_impl(softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) { if(l.m_mode != softmax_mode::INSTANCE) { LBANN_ERROR("Unsupported softmax mode"); } // Local matrices const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations()); auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations()); auto& local_workspace = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.m_workspace->Matrix()); const size_t local_height = local_input.Height(); const size_t local_width = local_input.Width(); // GPU objects auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input), gpu::get_sync_info(local_output), gpu::get_sync_info(local_workspace)); // The comm templates will not convert the multisync, so cast the multisync // and use sync_info for comms. El::SyncInfo<El::Device::GPU> const& sync_info = multisync; // Find max value in each column gpu_lib::thrust::vector<TensorDataType> max_vals; if (local_output.IsEmpty()) { max_vals.resize(local_width, -std::numeric_limits<TensorDataType>::infinity()); } else { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; max_vals.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), max_vals.data().get()); while (grid_dims.x > 1) { const size_t prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(max_vals)); max_vals.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), prev_height, max_vals.data().get()); } } El::mpi::AllReduce(max_vals.data().get(), max_vals.size(), El::mpi::MAX, l.m_workspace->RedundantComm(), sync_info); // Compute exp(x-max_val) and sum(exp(x-max_val)) El::Zero(*l.m_workspace); if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel( fp_exp_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim(), max_vals.data().get(), local_workspace.Buffer()); } El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm()); // Compute output // Note: y = exp(x-max_val) / sum(exp(x-max_val)) if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel( fp_output_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_output.Buffer(), local_output.LDim(), local_workspace.LockedBuffer()); } } template <typename TensorDataType> void bp_compute_impl(softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) { if(l.m_mode != softmax_mode::INSTANCE) { LBANN_ERROR("Unsupported softmax mode"); } // Local matrices const auto& local_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations()); const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_error_signals()); auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_error_signals()); auto& local_workspace = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.m_workspace->Matrix()); const auto& local_height = local_output.Height(); const auto& local_width = local_output.Width(); // GPU objects auto multisync = El::MakeMultiSync( gpu::get_sync_info(local_output), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_workspace)); // Compute dot(y,dy) El::Zero(local_workspace); if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel( bp_dot_product_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_output.LockedBuffer(), local_output.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_workspace.Buffer()); } El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm()); // Compute gradient w.r.t. input if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel( bp_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_output.LockedBuffer(), local_output.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_workspace.Buffer(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } } template <typename TensorDataType, data_layout Layout, El::Device Device> void softmax_layer<TensorDataType, Layout, Device>::setup_fp_dnn_descriptors() { } template <typename TensorDataType, data_layout Layout, El::Device Device> void softmax_layer<TensorDataType, Layout, Device>::setup_bp_dnn_descriptors() { } template <typename TensorDataType, data_layout Layout, El::Device Device> void softmax_layer<TensorDataType, Layout, Device>::fp_compute() { fp_compute_impl(*this); } template <typename TensorDataType, data_layout Layout, El::Device Device> void softmax_layer<TensorDataType, Layout, Device>::bp_compute() { bp_compute_impl(*this); } // Template instantiation #define PROTO(T) \ template class softmax_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>; \ template class softmax_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
c3aed083e13d5dd4afd109e960050397fe9ac4da.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <lbann-dev@llnl.gov> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_SOFTMAX_LAYER_INSTANTIATE #include "lbann/layers/activations/softmax.hpp" #include "lbann/utils/gpu/helpers.hpp" #ifdef LBANN_HAS_DNN_LIB #include "lbann/utils/dnn_lib/softmax.hpp" #endif // LBANN_HAS_DNN_LIB namespace lbann { namespace { #ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD /** Functor to ensure values are above threshold value */ template <typename TensorDataType> struct threshold_op { __forceinline__ __device__ TensorDataType operator()(const TensorDataType& y) const { return gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>())); } }; #endif // LBANN_ENABLE_SOFTMAX_THRESHOLD /** @brief Max functor */ template <class T> struct max_op { __device__ __forceinline__ DataType operator()(const T& x1, const T& x2) const { return gpu_lib::max(x1, x2); } }; /** @brief Kernel for max reduction on matrix columns * * Each CUDA block computes the max over a subset of matrix entries * and outputs the result. This is repeated multiple times for * column-wise max reduction. * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param values (height x width) matrix * @param max_values (nblocksx x width) matrix */ template <size_t bsize, typename TensorDataType> __global__ void reduce_max_kernel(size_t height, size_t width, const TensorDataType* __restrict__ values, size_t values_ldim, TensorDataType* __restrict__ max_values) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidx = blockIdx.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksx = gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { // Find largest value for each thread TensorDataType thread_max_val{-gpu_lib::infinity<TensorDataType>()}; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& val = values[row+col*values_ldim]; thread_max_val = gpu_lib::max(thread_max_val, val); } // Find largest value for each block const TensorDataType block_max_val = gpu_lib::block_reduce<bsize,1,1,DataType,max_op<DataType>>(thread_max_val); if (tid == 0) { max_values[bidx+col*nblocksx] = block_max_val; } } } /** @brief Compute exp(x-shift) * * Also compute sum(exp(x-shift)) for each matrix column. * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 */ template <size_t bsize, typename TensorDataType> __global__ void fp_exp_kernel(size_t height, size_t width, const TensorDataType* __restrict__ input, size_t input_ldim, TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ shifts, TensorDataType* __restrict__ sums) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { const auto& shift = shifts[col]; // Exponentiate inputs and compute sum for each thread TensorDataType thread_sum{0}; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& x = input[row+col*input_ldim]; auto& y = output[row+col*output_ldim]; y = gpu_lib::exp(x-shift); thread_sum += y; } // Compute sum for each block const TensorDataType block_sum = gpu_lib::block_reduce<bsize,1,1>(thread_sum); if (tid == 0) { gpu_lib::atomic_add(&sums[col], block_sum); } } } /** @brief Compute layer output * * y = exp(x-shift) / sum(exp(x-shift)) * * If @c LBANN_ENABLE_SOFTMAX_THRESHOLD is set, small values are * thresholded to a minimum value to avoid denormalized floats. * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param output On input, constains exp(x-shift). On output, * contains the layer output. * @param sums sum(exp(x-shift)) for each column */ template <typename TensorDataType> __global__ void fp_output_kernel(size_t height, size_t width, TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ sums) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t col = gidy; col < width; col += nthreadsy) { const auto& denom = sums[col]; for (size_t row = gidx; row < height; row += nthreadsx) { auto& y = output[row+col*output_ldim]; y /= denom; #ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD y = gpu_lib::max(y, gpu_lib::sqrt(gpu_lib::min<TensorDataType>())); #endif // LBANN_ENABLE_SOFTMAX_THRESHOLD } } } /** @brief Compute dot(y,dy) for each matrix column * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 */ template <size_t bsize, typename TensorDataType> __global__ void bp_dot_product_kernel( size_t height, size_t width, const TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, TensorDataType* __restrict__ dot_products) { // Indices const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t bidy = blockIdx.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nblocksy = gridDim.y; for (size_t col = bidy; col < width; col += nblocksy) { // Compute dot product contribution for each thread TensorDataType thread_dot_product{0}; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& y = output[row+col*output_ldim]; const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim]; thread_dot_product += y * dy; } // Compute dot product contribution for each block const TensorDataType block_dot_product = gpu_lib::block_reduce<bsize,1,1>(thread_dot_product); if (tid == 0) { gpu_lib::atomic_add(&dot_products[col], block_dot_product); } } } /** @brief Compute gradient w.r.t. input * * dx = y * (dy - dot(y,dy)) * * Block dimensions: bsize x 1 x 1 * * Grid dimension: (height / bsize) x width x 1 * * @param dot_products dot(y,dy) for each matrix column */ template <size_t bsize, typename TensorDataType> __global__ void bp_kernel(size_t height, size_t width, const TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, const TensorDataType* __restrict__ dot_products, TensorDataType* __restrict__ gradient_wrt_input, size_t gradient_wrt_input_ldim) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t col = gidy; col < width; col += nthreadsy) { const auto& y_dot_dy = dot_products[col]; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& y = output[row+col*output_ldim]; const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim]; auto& dx = gradient_wrt_input[row+col*gradient_wrt_input_ldim]; dx = y * (dy - y_dot_dy); } } } #ifdef LBANN_HAS_DISTCONV template <typename TensorDataType, data_layout Layout, El::Device Device> void fp_compute_distconv(softmax_distconv_adapter<TensorDataType, Layout, Device> &dc) { dc.m_softmax->forward(dc.get_prev_activations(), dc.get_activations()); } template <typename TensorDataType, data_layout Layout, El::Device Device> void bp_compute_distconv(softmax_distconv_adapter<TensorDataType, Layout, Device> &dc) { dc.m_softmax->backward(dc.get_activations(), dc.get_prev_error_signals(), dc.get_error_signals()); } #endif // LBANN_HAS_DISTCONV } // namespace template <typename TensorDataType> void fp_compute_impl(softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) { #ifdef LBANN_HAS_DISTCONV if (l.distconv_enabled()) { fp_compute_distconv(l.get_distconv_adapter()); return; } #endif // LBANN_HAS_DISTCONV const dnn_lib::ScalingParamType<TensorDataType> zero = 0.; const dnn_lib::ScalingParamType<TensorDataType> one = 1.; const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations()); auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations()); if (!local_input.IsEmpty()) { dnn_lib::softmax_forward(one, l.m_tensors_dnn_desc.get_prev_activations(), local_input, zero, l.m_tensors_dnn_desc.get_activations(), local_output, l.m_mode); #ifdef LBANN_ENABLE_SOFTMAX_THRESHOLD gpu_lib::apply_entrywise_unary_operator<threshold_op>(local_output, local_output); #endif // LBANN_ENABLE_SOFTMAX_THRESHOLD } } template <typename TensorDataType> void bp_compute_impl(softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) { #ifdef LBANN_HAS_DISTCONV if (l.distconv_enabled()) { bp_compute_distconv(l.get_distconv_adapter()); return; } #endif // LBANN_HAS_DISTCONV const dnn_lib::ScalingParamType<TensorDataType> zero = 0.; const dnn_lib::ScalingParamType<TensorDataType> one = 1.; const auto& local_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations()); const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_error_signals()); auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_error_signals()); dnn_lib::softmax_backward(one, l.m_tensors_dnn_desc.get_activations(), local_output, l.m_tensors_dnn_desc.get_prev_error_signals(), local_gradient_wrt_output, zero, l.m_tensors_dnn_desc.get_error_signals(), local_gradient_wrt_input, l.m_mode); } template <typename TensorDataType> void fp_compute_impl(softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) { if(l.m_mode != softmax_mode::INSTANCE) { LBANN_ERROR("Unsupported softmax mode"); } // Local matrices const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations()); auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations()); auto& local_workspace = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.m_workspace->Matrix()); const size_t local_height = local_input.Height(); const size_t local_width = local_input.Width(); // GPU objects auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input), gpu::get_sync_info(local_output), gpu::get_sync_info(local_workspace)); // The comm templates will not convert the multisync, so cast the multisync // and use sync_info for comms. El::SyncInfo<El::Device::GPU> const& sync_info = multisync; // Find max value in each column gpu_lib::thrust::vector<TensorDataType> max_vals; if (local_output.IsEmpty()) { max_vals.resize(local_width, -std::numeric_limits<TensorDataType>::infinity()); } else { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; max_vals.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), max_vals.data().get()); while (grid_dims.x > 1) { const size_t prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(max_vals)); max_vals.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), prev_height, max_vals.data().get()); } } El::mpi::AllReduce(max_vals.data().get(), max_vals.size(), El::mpi::MAX, l.m_workspace->RedundantComm(), sync_info); // Compute exp(x-max_val) and sum(exp(x-max_val)) El::Zero(*l.m_workspace); if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel( fp_exp_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim(), max_vals.data().get(), local_workspace.Buffer()); } El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm()); // Compute output // Note: y = exp(x-max_val) / sum(exp(x-max_val)) if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel( fp_output_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_output.Buffer(), local_output.LDim(), local_workspace.LockedBuffer()); } } template <typename TensorDataType> void bp_compute_impl(softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) { if(l.m_mode != softmax_mode::INSTANCE) { LBANN_ERROR("Unsupported softmax mode"); } // Local matrices const auto& local_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations()); const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_error_signals()); auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_error_signals()); auto& local_workspace = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.m_workspace->Matrix()); const auto& local_height = local_output.Height(); const auto& local_width = local_output.Width(); // GPU objects auto multisync = El::MakeMultiSync( gpu::get_sync_info(local_output), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_workspace)); // Compute dot(y,dy) El::Zero(local_workspace); if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel( bp_dot_product_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_output.LockedBuffer(), local_output.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_workspace.Buffer()); } El::AllReduce(*l.m_workspace, l.m_workspace->RedundantComm()); // Compute gradient w.r.t. input if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel( bp_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_output.LockedBuffer(), local_output.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_workspace.Buffer(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } } template <typename TensorDataType, data_layout Layout, El::Device Device> void softmax_layer<TensorDataType, Layout, Device>::setup_fp_dnn_descriptors() { } template <typename TensorDataType, data_layout Layout, El::Device Device> void softmax_layer<TensorDataType, Layout, Device>::setup_bp_dnn_descriptors() { } template <typename TensorDataType, data_layout Layout, El::Device Device> void softmax_layer<TensorDataType, Layout, Device>::fp_compute() { fp_compute_impl(*this); } template <typename TensorDataType, data_layout Layout, El::Device Device> void softmax_layer<TensorDataType, Layout, Device>::bp_compute() { bp_compute_impl(*this); } // Template instantiation #define PROTO(T) \ template class softmax_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>; \ template class softmax_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
ae20ba34cb1b12cb04f7491316647d123157eeb9.hip
// !!! This is a file automatically generated by hipify!!! /** * CUDA C/C++ implementation for Parallel Graph Coloring for Manycore Architectures * {@link https://ieeexplore.ieee.org/abstract/document/7516086} * * @author Ashwin Joisa * @author Praveen Gupta **/ //=============================================================================================// // Include header files #include <iostream> #include <hip/hip_runtime.h> // Include custom header file for implementation of Graphs #include "Graph.h" //=============================================================================================// #define MAX_THREAD_COUNT 1024 #define CEIL(a, b) ((a - 1) / b + 1) //=============================================================================================// using namespace std; float device_time_taken; //=============================================================================================// // Catch Cuda errors void catchCudaError(hipError_t error, const char *function) { if (error != hipSuccess) { printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, hipGetErrorString(error), function); exit(-1); } } //=============================================================================================// __global__ void assignColoursKernel(Graph *graph, int nodeCount, int *device_colours, bool *device_conflicts, int maxDegree) { int node = blockIdx.x * blockDim.x + threadIdx.x; if (node >= nodeCount || !device_conflicts[node]) return; int maxColours = maxDegree + 1; // Create forbidden array of size maxDegree int *forbidden = new int[CEIL(maxColours + 1, 32)]; if(forbidden == NULL) { cout << "Cuda Memory Full\n"; return; } memset(forbidden, 0, sizeof(int) * CEIL(maxColours + 1, 32)); for (int i = graph->adjacencyListPointers[node]; i < graph->adjacencyListPointers[node + 1]; i++) { int neighbour = graph->adjacencyList[i]; int ind = device_colours[neighbour] % 32; forbidden[device_colours[neighbour] / 32] |= (1<<ind); } for (int colour = 1; colour <= maxColours; ++colour) { int ind = colour % 32; if ((forbidden[colour / 32] & (1<<ind)) == 0) { device_colours[node] = colour; break; } } delete[] forbidden; } void assignColours(Graph *graph, int nodeCount, int *device_colours, bool *device_conflicts, int maxDegree) { // Launch assignColoursKernel with nodeCount number of threads hipLaunchKernelGGL(( assignColoursKernel), dim3(CEIL(nodeCount, MAX_THREAD_COUNT)), dim3(MAX_THREAD_COUNT), 0, 0, graph, nodeCount, device_colours, device_conflicts, maxDegree); hipDeviceSynchronize(); } __global__ void detectConflictsKernel(Graph *graph, int nodeCount, int *device_colours, bool *device_conflicts, bool *device_conflictExists) { int node = blockIdx.x * blockDim.x + threadIdx.x; if (node >= nodeCount) return; device_conflicts[node] = false; for (int i = graph->adjacencyListPointers[node]; i < graph->adjacencyListPointers[node + 1]; i++) { int neighbour = graph->adjacencyList[i]; if (device_colours[neighbour] == device_colours[node] && neighbour < node) { //conflict device_conflicts[node] = true; *device_conflictExists = true; } } } bool detectConflicts(Graph *graph, int nodeCount, int *device_colours, bool *device_conflicts) { bool *device_conflictExists; bool conflictExists = false; catchCudaError(hipMalloc((void **)&device_conflictExists, sizeof(bool)), "Malloc1"); catchCudaError(hipMemcpy(device_conflictExists, &conflictExists, sizeof(bool), hipMemcpyHostToDevice), "Memcpy7"); //Launch detectConflictsKernel with nodeCount number of threads hipLaunchKernelGGL(( detectConflictsKernel), dim3(CEIL(nodeCount, MAX_THREAD_COUNT)), dim3(MAX_THREAD_COUNT), 0, 0, graph, nodeCount, device_colours, device_conflicts, device_conflictExists); hipDeviceSynchronize(); // Copy device_conflictExists to conflictExists and return catchCudaError(hipMemcpy(&conflictExists, device_conflictExists, sizeof(bool), hipMemcpyDeviceToHost), "Memcpy6"); // Free device memory catchCudaError(hipFree(device_conflictExists), "Free"); return conflictExists; } int *graphColouring(Graph *graph, int nodeCount, int maxDegree) { // Boolean array for conflicts bool *host_conflicts = new bool[nodeCount]; int *host_colours = new int[nodeCount]; int *device_colours; bool *device_conflicts; // Initialize all nodes to invalid colour (0) memset(host_colours, 0, sizeof(int) * nodeCount); // Initialize all nodes into conflict memset(host_conflicts, true, sizeof(bool) * nodeCount); catchCudaError(hipMalloc((void **)&device_colours, sizeof(int) * nodeCount), "Malloc2"); catchCudaError(hipMemcpy(device_colours, host_colours, sizeof(int) * nodeCount, hipMemcpyHostToDevice), "Memcpy1"); catchCudaError(hipMalloc((void **)&device_conflicts, sizeof(bool) * nodeCount), "Malloc3"); catchCudaError(hipMemcpy(device_conflicts, host_conflicts, sizeof(bool) * nodeCount, hipMemcpyHostToDevice), "Memcpy2"); // Timer hipEvent_t device_start, device_end; catchCudaError(hipEventCreate(&device_start), "Event Create"); catchCudaError(hipEventCreate(&device_end), "Event Create"); catchCudaError(hipEventRecord(device_start), "Event Record"); do { assignColours(graph, nodeCount, device_colours, device_conflicts, maxDegree); } while (detectConflicts(graph, nodeCount, device_colours, device_conflicts)); // Timer catchCudaError(hipEventRecord(device_end), "Event Record"); catchCudaError(hipEventSynchronize(device_end), "Event Synchronize"); catchCudaError(hipEventElapsedTime(&device_time_taken, device_start, device_end), "Elapsed time"); // Copy colours to host and return catchCudaError(hipMemcpy(host_colours, device_colours, sizeof(int) * nodeCount, hipMemcpyDeviceToHost), "Memcpy3"); delete[] host_conflicts; catchCudaError(hipFree(device_colours), "Free"); catchCudaError(hipFree(device_conflicts), "Free"); return host_colours; } int main(int argc, char *argv[]) { if (argc < 2) { cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n"; return 0; } char choice; cout << "Would you like to print the colouring of the graph? (y/n) "; cin >> choice; freopen(argv[1], "r", stdin); Graph *host_graph = new Graph(); Graph *device_graph; catchCudaError(hipMalloc((void **)&device_graph, sizeof(Graph)), "Malloc4"); host_graph->readGraph(); int nodeCount = host_graph->getNodeCount(); int edgeCount = host_graph->getEdgeCount(); int maxDegree = host_graph->getMaxDegree(); catchCudaError(hipMemcpy(device_graph, host_graph, sizeof(Graph), hipMemcpyHostToDevice), "Memcpy4"); // Copy Adjancency List to device int *adjacencyList; // Alocate device memory and copy catchCudaError(hipMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1)), "Malloc5"); catchCudaError(hipMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), hipMemcpyHostToDevice), "Memcpy"); // Update the pointer to this, in device_graph catchCudaError(hipMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), hipMemcpyHostToDevice), "Memcpy5"); // Copy Adjancency List Pointers to device int *adjacencyListPointers; // Alocate device memory and copy catchCudaError(hipMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1)), "Malloc6"); catchCudaError(hipMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), hipMemcpyHostToDevice), "Memcpy"); // Update the pointer to this, in device_graph catchCudaError(hipMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), hipMemcpyHostToDevice), "Memcpy"); int *colouring = graphColouring(device_graph, nodeCount, maxDegree); int chromaticNumber = INT_MIN; for (int i = 0; i < nodeCount; i++) { chromaticNumber = max(chromaticNumber, colouring[i]); if(choice == 'y' || choice == 'Y') printf("Node %d => Colour %d\n", i, colouring[i]); } cout << endl; printf("\nNumber of colours used (chromatic number) ==> %d\n", chromaticNumber); printf("Time Taken (Parallel) = %f ms\n", device_time_taken); if (argc == 3) { freopen(argv[2], "w", stdout); for (int i = 0; i < nodeCount; i++) cout << colouring[i] << " "; cout << endl; } // Free all memory delete[] colouring; catchCudaError(hipFree(adjacencyList), "Free"); catchCudaError(hipFree(adjacencyListPointers), "Free"); catchCudaError(hipFree(device_graph), "Free"); }
ae20ba34cb1b12cb04f7491316647d123157eeb9.cu
/** * CUDA C/C++ implementation for Parallel Graph Coloring for Manycore Architectures * {@link https://ieeexplore.ieee.org/abstract/document/7516086} * * @author Ashwin Joisa * @author Praveen Gupta **/ //=============================================================================================// // Include header files #include <iostream> #include <cuda.h> // Include custom header file for implementation of Graphs #include "Graph.h" //=============================================================================================// #define MAX_THREAD_COUNT 1024 #define CEIL(a, b) ((a - 1) / b + 1) //=============================================================================================// using namespace std; float device_time_taken; //=============================================================================================// // Catch Cuda errors void catchCudaError(cudaError_t error, const char *function) { if (error != cudaSuccess) { printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, cudaGetErrorString(error), function); exit(-1); } } //=============================================================================================// __global__ void assignColoursKernel(Graph *graph, int nodeCount, int *device_colours, bool *device_conflicts, int maxDegree) { int node = blockIdx.x * blockDim.x + threadIdx.x; if (node >= nodeCount || !device_conflicts[node]) return; int maxColours = maxDegree + 1; // Create forbidden array of size maxDegree int *forbidden = new int[CEIL(maxColours + 1, 32)]; if(forbidden == NULL) { cout << "Cuda Memory Full\n"; return; } memset(forbidden, 0, sizeof(int) * CEIL(maxColours + 1, 32)); for (int i = graph->adjacencyListPointers[node]; i < graph->adjacencyListPointers[node + 1]; i++) { int neighbour = graph->adjacencyList[i]; int ind = device_colours[neighbour] % 32; forbidden[device_colours[neighbour] / 32] |= (1<<ind); } for (int colour = 1; colour <= maxColours; ++colour) { int ind = colour % 32; if ((forbidden[colour / 32] & (1<<ind)) == 0) { device_colours[node] = colour; break; } } delete[] forbidden; } void assignColours(Graph *graph, int nodeCount, int *device_colours, bool *device_conflicts, int maxDegree) { // Launch assignColoursKernel with nodeCount number of threads assignColoursKernel<<<CEIL(nodeCount, MAX_THREAD_COUNT), MAX_THREAD_COUNT>>>(graph, nodeCount, device_colours, device_conflicts, maxDegree); cudaDeviceSynchronize(); } __global__ void detectConflictsKernel(Graph *graph, int nodeCount, int *device_colours, bool *device_conflicts, bool *device_conflictExists) { int node = blockIdx.x * blockDim.x + threadIdx.x; if (node >= nodeCount) return; device_conflicts[node] = false; for (int i = graph->adjacencyListPointers[node]; i < graph->adjacencyListPointers[node + 1]; i++) { int neighbour = graph->adjacencyList[i]; if (device_colours[neighbour] == device_colours[node] && neighbour < node) { //conflict device_conflicts[node] = true; *device_conflictExists = true; } } } bool detectConflicts(Graph *graph, int nodeCount, int *device_colours, bool *device_conflicts) { bool *device_conflictExists; bool conflictExists = false; catchCudaError(cudaMalloc((void **)&device_conflictExists, sizeof(bool)), "Malloc1"); catchCudaError(cudaMemcpy(device_conflictExists, &conflictExists, sizeof(bool), cudaMemcpyHostToDevice), "Memcpy7"); //Launch detectConflictsKernel with nodeCount number of threads detectConflictsKernel<<<CEIL(nodeCount, MAX_THREAD_COUNT), MAX_THREAD_COUNT>>>(graph, nodeCount, device_colours, device_conflicts, device_conflictExists); cudaDeviceSynchronize(); // Copy device_conflictExists to conflictExists and return catchCudaError(cudaMemcpy(&conflictExists, device_conflictExists, sizeof(bool), cudaMemcpyDeviceToHost), "Memcpy6"); // Free device memory catchCudaError(cudaFree(device_conflictExists), "Free"); return conflictExists; } int *graphColouring(Graph *graph, int nodeCount, int maxDegree) { // Boolean array for conflicts bool *host_conflicts = new bool[nodeCount]; int *host_colours = new int[nodeCount]; int *device_colours; bool *device_conflicts; // Initialize all nodes to invalid colour (0) memset(host_colours, 0, sizeof(int) * nodeCount); // Initialize all nodes into conflict memset(host_conflicts, true, sizeof(bool) * nodeCount); catchCudaError(cudaMalloc((void **)&device_colours, sizeof(int) * nodeCount), "Malloc2"); catchCudaError(cudaMemcpy(device_colours, host_colours, sizeof(int) * nodeCount, cudaMemcpyHostToDevice), "Memcpy1"); catchCudaError(cudaMalloc((void **)&device_conflicts, sizeof(bool) * nodeCount), "Malloc3"); catchCudaError(cudaMemcpy(device_conflicts, host_conflicts, sizeof(bool) * nodeCount, cudaMemcpyHostToDevice), "Memcpy2"); // Timer cudaEvent_t device_start, device_end; catchCudaError(cudaEventCreate(&device_start), "Event Create"); catchCudaError(cudaEventCreate(&device_end), "Event Create"); catchCudaError(cudaEventRecord(device_start), "Event Record"); do { assignColours(graph, nodeCount, device_colours, device_conflicts, maxDegree); } while (detectConflicts(graph, nodeCount, device_colours, device_conflicts)); // Timer catchCudaError(cudaEventRecord(device_end), "Event Record"); catchCudaError(cudaEventSynchronize(device_end), "Event Synchronize"); catchCudaError(cudaEventElapsedTime(&device_time_taken, device_start, device_end), "Elapsed time"); // Copy colours to host and return catchCudaError(cudaMemcpy(host_colours, device_colours, sizeof(int) * nodeCount, cudaMemcpyDeviceToHost), "Memcpy3"); delete[] host_conflicts; catchCudaError(cudaFree(device_colours), "Free"); catchCudaError(cudaFree(device_conflicts), "Free"); return host_colours; } int main(int argc, char *argv[]) { if (argc < 2) { cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n"; return 0; } char choice; cout << "Would you like to print the colouring of the graph? (y/n) "; cin >> choice; freopen(argv[1], "r", stdin); Graph *host_graph = new Graph(); Graph *device_graph; catchCudaError(cudaMalloc((void **)&device_graph, sizeof(Graph)), "Malloc4"); host_graph->readGraph(); int nodeCount = host_graph->getNodeCount(); int edgeCount = host_graph->getEdgeCount(); int maxDegree = host_graph->getMaxDegree(); catchCudaError(cudaMemcpy(device_graph, host_graph, sizeof(Graph), cudaMemcpyHostToDevice), "Memcpy4"); // Copy Adjancency List to device int *adjacencyList; // Alocate device memory and copy catchCudaError(cudaMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1)), "Malloc5"); catchCudaError(cudaMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), cudaMemcpyHostToDevice), "Memcpy"); // Update the pointer to this, in device_graph catchCudaError(cudaMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), cudaMemcpyHostToDevice), "Memcpy5"); // Copy Adjancency List Pointers to device int *adjacencyListPointers; // Alocate device memory and copy catchCudaError(cudaMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1)), "Malloc6"); catchCudaError(cudaMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), cudaMemcpyHostToDevice), "Memcpy"); // Update the pointer to this, in device_graph catchCudaError(cudaMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), cudaMemcpyHostToDevice), "Memcpy"); int *colouring = graphColouring(device_graph, nodeCount, maxDegree); int chromaticNumber = INT_MIN; for (int i = 0; i < nodeCount; i++) { chromaticNumber = max(chromaticNumber, colouring[i]); if(choice == 'y' || choice == 'Y') printf("Node %d => Colour %d\n", i, colouring[i]); } cout << endl; printf("\nNumber of colours used (chromatic number) ==> %d\n", chromaticNumber); printf("Time Taken (Parallel) = %f ms\n", device_time_taken); if (argc == 3) { freopen(argv[2], "w", stdout); for (int i = 0; i < nodeCount; i++) cout << colouring[i] << " "; cout << endl; } // Free all memory delete[] colouring; catchCudaError(cudaFree(adjacencyList), "Free"); catchCudaError(cudaFree(adjacencyListPointers), "Free"); catchCudaError(cudaFree(device_graph), "Free"); }
f571b95cd96938f7711619e5e26125d69230674b.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. * * See LICENSE.txt for license information ************************************************************************/ #include "hip/hip_runtime.h" #include "common.h" void print_header() { PRINT("# %10s %12s %8s %6s out-of-place in-place \n", "", "", "", ""); PRINT("# %10s %12s %8s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "root", "time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error"); PRINT("# %10s %12s %8s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "", "(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", ""); } void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) { PRINT("%12li %12li %8s %6i", size, count, typeName, root); } void BroadcastGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) { *sendcount = count; *recvcount = count; *sendInplaceOffset = 0; *recvInplaceOffset = 0; *paramcount = *sendcount; } testResult_t BroadcastInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) { size_t sendcount = args->sendBytes / wordSize(type); size_t recvcount = args->expectedBytes / wordSize(type); for (int i=0; i<args->nGpus; i++) { int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i; CUDACHECK(hipSetDevice(gpuid)); int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); CUDACHECK(hipMemset(args->recvbuffs[i], 0, args->expectedBytes)); void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i]; if (rank == root) TESTCHECK(InitData(data, sendcount, type, rep, rank)); TESTCHECK(InitData(args->expected[i], recvcount, type, rep, root)); CUDACHECK(hipDeviceSynchronize()); } return testSuccess; } void BroadcastGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) { double baseBw = (double)(count * typesize) / 1.0E9 / sec; *algBw = baseBw; double factor = 1; *busBw = baseBw * factor; } testResult_t BroadcastRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) { int rank; NCCLCHECK(ncclCommUserRank(comm, &rank)); #if NCCL_MAJOR >= 2 && NCCL_MINOR >= 2 NCCLCHECK(ncclBroadcast(sendbuff, recvbuff, count, type, root, comm, stream)); #else if (rank == root) { NCCLCHECK(ncclBcast(sendbuff, count, type, root, comm, stream)); } else { NCCLCHECK(ncclBcast(recvbuff, count, type, root, comm, stream)); } #endif return testSuccess; } struct testColl broadcastTest = { "Broadcast", BroadcastGetCollByteCount, BroadcastInitData, BroadcastGetBw, BroadcastRunColl }; void BroadcastGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) { size_t paramcount, sendInplaceOffset, recvInplaceOffset; BroadcastGetCollByteCount(sendcount, recvcount, &paramcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks); } testResult_t BroadcastRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) { args->collTest = &broadcastTest; ncclDataType_t *run_types; const char **run_typenames; int type_count; int begin_root, end_root; if ((int)type != -1) { type_count = 1; run_types = &type; run_typenames = &typeName; } else { type_count = test_typenum; run_types = test_types; run_typenames = test_typenames; } if (root != -1) { begin_root = end_root = root; } else { begin_root = 0; end_root = args->nProcs*args->nThreads*args->nGpus-1; } for (int i=0; i<type_count; i++) { for (int j=begin_root; j<=end_root; j++) { TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], (ncclRedOp_t)0, "", j)); } } return testSuccess; } struct testEngine broadcastEngine = { BroadcastGetBuffSize, BroadcastRunTest }; #pragma weak ncclTestEngine=broadcastEngine
f571b95cd96938f7711619e5e26125d69230674b.cu
/************************************************************************* * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. * * See LICENSE.txt for license information ************************************************************************/ #include "cuda_runtime.h" #include "common.h" void print_header() { PRINT("# %10s %12s %8s %6s out-of-place in-place \n", "", "", "", ""); PRINT("# %10s %12s %8s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "root", "time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error"); PRINT("# %10s %12s %8s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "", "(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", ""); } void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) { PRINT("%12li %12li %8s %6i", size, count, typeName, root); } void BroadcastGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) { *sendcount = count; *recvcount = count; *sendInplaceOffset = 0; *recvInplaceOffset = 0; *paramcount = *sendcount; } testResult_t BroadcastInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) { size_t sendcount = args->sendBytes / wordSize(type); size_t recvcount = args->expectedBytes / wordSize(type); for (int i=0; i<args->nGpus; i++) { int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i; CUDACHECK(cudaSetDevice(gpuid)); int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); CUDACHECK(cudaMemset(args->recvbuffs[i], 0, args->expectedBytes)); void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i]; if (rank == root) TESTCHECK(InitData(data, sendcount, type, rep, rank)); TESTCHECK(InitData(args->expected[i], recvcount, type, rep, root)); CUDACHECK(cudaDeviceSynchronize()); } return testSuccess; } void BroadcastGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) { double baseBw = (double)(count * typesize) / 1.0E9 / sec; *algBw = baseBw; double factor = 1; *busBw = baseBw * factor; } testResult_t BroadcastRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream) { int rank; NCCLCHECK(ncclCommUserRank(comm, &rank)); #if NCCL_MAJOR >= 2 && NCCL_MINOR >= 2 NCCLCHECK(ncclBroadcast(sendbuff, recvbuff, count, type, root, comm, stream)); #else if (rank == root) { NCCLCHECK(ncclBcast(sendbuff, count, type, root, comm, stream)); } else { NCCLCHECK(ncclBcast(recvbuff, count, type, root, comm, stream)); } #endif return testSuccess; } struct testColl broadcastTest = { "Broadcast", BroadcastGetCollByteCount, BroadcastInitData, BroadcastGetBw, BroadcastRunColl }; void BroadcastGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) { size_t paramcount, sendInplaceOffset, recvInplaceOffset; BroadcastGetCollByteCount(sendcount, recvcount, &paramcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks); } testResult_t BroadcastRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) { args->collTest = &broadcastTest; ncclDataType_t *run_types; const char **run_typenames; int type_count; int begin_root, end_root; if ((int)type != -1) { type_count = 1; run_types = &type; run_typenames = &typeName; } else { type_count = test_typenum; run_types = test_types; run_typenames = test_typenames; } if (root != -1) { begin_root = end_root = root; } else { begin_root = 0; end_root = args->nProcs*args->nThreads*args->nGpus-1; } for (int i=0; i<type_count; i++) { for (int j=begin_root; j<=end_root; j++) { TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], (ncclRedOp_t)0, "", j)); } } return testSuccess; } struct testEngine broadcastEngine = { BroadcastGetBuffSize, BroadcastRunTest }; #pragma weak ncclTestEngine=broadcastEngine
2218489e2030d0b575e96d2959a8eabf36a2901e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THH/THHGeneral.h> #include <THH/THHNumerics.cuh> #include <THH/THHDeviceUtils.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { __host__ __device__ __forceinline__ int imin(int a, int b) { return a > b ? b : a; } __host__ __device__ __forceinline__ int imax(int a, int b) { return a > b ? a : b; } namespace { template <typename scalar_t> __global__ void replication_pad_forward_kernel1d( PackedTensorAccessor64<scalar_t, 3> input, PackedTensorAccessor64<scalar_t, 3> output, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2)) { return; } int outputPointX = outputPointId % output.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), input.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = input[batch][plane][inputPointX]; output[batch][plane][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 3> gradInput, PackedTensorAccessor64<scalar_t, 3> gradOutput, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2)) { return; } int outputPointX = outputPointId % gradOutput.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = gradOutput[batch][plane][outputPointX]; gpuAtomicAddNoReturn(&gradInput[batch][plane][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel2d( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2) * output.size(3)) { return; } int outputPointX = outputPointId % output.size(3); int outputPointY = outputPointId / output.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), input.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), input.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 4> gradInput, PackedTensorAccessor64<scalar_t, 4> gradOutput, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2) * gradOutput.size(3)) { return; } int outputPointX = outputPointId % gradOutput.size(3); int outputPointY = outputPointId / gradOutput.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), gradInput.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; gpuAtomicAddNoReturn(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel3d( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (output.size(2) * output.size(3) * output.size(4))) { return; } int outputPointX = outputPointId % output.size(4); int outputPointY = (outputPointId / output.size(4)) % output.size(3); int outputPointZ = outputPointId / (output.size(3) * output.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), input.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), input.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), input.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = input[batch][plane][inputPointZ][inputPointY][inputPointX]; output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4))) { return; } int outputPointX = outputPointId % gradOutput.size(4); int outputPointY = (outputPointId / gradOutput.size(4)) % gradOutput.size(3); int outputPointZ = outputPointId / (gradOutput.size(3) * gradOutput.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), gradInput.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), gradInput.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), gradInput.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX]; gpuAtomicAddNoReturn(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } void replication_pad2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = input.dim(); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } int iheight = input.size(dimh); int iwidth = input.size(dimw); int oheight = iheight + padT + padB; int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 3) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, padT, padB, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } static inline void shapeCheck3d( const Tensor& input, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && input.size(0) != 0 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); } static inline void shapeAndGradOutputCheck3d( const Tensor& input, const Tensor& gradOutput, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(numPlanes == gradOutput.size(planeDim), "gradOutput width unexpected. Expected: ", numPlanes, ", Got: ", gradOutput.size(planeDim)); TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); TORCH_CHECK(odepth == gradOutput.size(dimd), "gradOutput depth unexpected. Expected: ", odepth, ", Got: ", gradOutput.size(dimd)); } void replication_pad3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeAndGradOutputCheck3d(input, gradOutput, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numInputDims = input.dim(); if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 4) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 5>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3) * devGradOutput.size(4); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } } // namespace TORCH_IMPL_FUNC(replication_pad1d_out_cuda) ( const Tensor& input, IntArrayRef paddingSize, const Tensor& output ) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int64_t padL = paddingSize[0]; int64_t padR = paddingSize[1]; constexpr int64_t planeDim = -2; constexpr int64_t dimw = -1; int numInputDims = input.ndimension(); int64_t numPlanes = input.size(planeDim); int64_t inputW = input.size(dimw); int64_t outputW = output.size(dimw); if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 2) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 3>(); auto devOutput = output_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devOutput.size(2); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel1d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } TORCH_IMPL_FUNC(replication_pad1d_backward_out_cuda) ( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, const Tensor& gradInput ) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_cuda"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numInputDims = input.ndimension(); if (numInputDims == 3) { planeDim++; dimw++; } int iwidth = input.size(dimw); int owidth = iwidth + padL + padR; if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 2) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 3>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devGradOutput.size(2); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } }); } TORCH_IMPL_FUNC(replication_pad2d_out_cuda) ( const Tensor& input, IntArrayRef paddingSize, const Tensor& output ) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); if (input.numel() == 0) { return; } int64_t padL = paddingSize[0]; int64_t padR = paddingSize[1]; int64_t padT = paddingSize[2]; int64_t padB = paddingSize[3]; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (input.dim() == 3) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 4>(); auto devOutput = output_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel2d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padT, padB, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } Tensor& replication_pad2d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_out_cuda"); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad2d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } TORCH_IMPL_FUNC(replication_pad3d_out_cuda) ( const Tensor& input, IntArrayRef paddingSize, const Tensor& output ) { int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numInputDims = input.dim(); if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputD = input.size(dimd); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputD = output.size(dimd); int outputH = output.size(dimh); int outputW = output.size(dimw); if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 4) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 5>(); auto devOutput = output_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3) * devOutput.size(4); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel3d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } Tensor& replication_pad3d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_out_cuda"); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad3d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } } // at::native } // at
2218489e2030d0b575e96d2959a8eabf36a2901e.cu
#include <ATen/ATen.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THC/THCGeneral.h> #include <THC/THCNumerics.cuh> #include <THC/THCDeviceUtils.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { __host__ __device__ __forceinline__ int imin(int a, int b) { return a > b ? b : a; } __host__ __device__ __forceinline__ int imax(int a, int b) { return a > b ? a : b; } namespace { template <typename scalar_t> __global__ void replication_pad_forward_kernel1d( PackedTensorAccessor64<scalar_t, 3> input, PackedTensorAccessor64<scalar_t, 3> output, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2)) { return; } int outputPointX = outputPointId % output.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), input.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = input[batch][plane][inputPointX]; output[batch][plane][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 3> gradInput, PackedTensorAccessor64<scalar_t, 3> gradOutput, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2)) { return; } int outputPointX = outputPointId % gradOutput.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = gradOutput[batch][plane][outputPointX]; gpuAtomicAddNoReturn(&gradInput[batch][plane][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel2d( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2) * output.size(3)) { return; } int outputPointX = outputPointId % output.size(3); int outputPointY = outputPointId / output.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), input.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), input.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 4> gradInput, PackedTensorAccessor64<scalar_t, 4> gradOutput, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2) * gradOutput.size(3)) { return; } int outputPointX = outputPointId % gradOutput.size(3); int outputPointY = outputPointId / gradOutput.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), gradInput.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; gpuAtomicAddNoReturn(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel3d( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (output.size(2) * output.size(3) * output.size(4))) { return; } int outputPointX = outputPointId % output.size(4); int outputPointY = (outputPointId / output.size(4)) % output.size(3); int outputPointZ = outputPointId / (output.size(3) * output.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), input.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), input.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), input.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = input[batch][plane][inputPointZ][inputPointY][inputPointX]; output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4))) { return; } int outputPointX = outputPointId % gradOutput.size(4); int outputPointY = (outputPointId / gradOutput.size(4)) % gradOutput.size(3); int outputPointZ = outputPointId / (gradOutput.size(3) * gradOutput.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), gradInput.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), gradInput.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), gradInput.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX]; gpuAtomicAddNoReturn(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } void replication_pad2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = input.dim(); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } int iheight = input.size(dimh); int iwidth = input.size(dimw); int oheight = iheight + padT + padB; int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 3) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devGradInput, devGradOutput, padT, padB, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } static inline void shapeCheck3d( const Tensor& input, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && input.size(0) != 0 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); } static inline void shapeAndGradOutputCheck3d( const Tensor& input, const Tensor& gradOutput, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(numPlanes == gradOutput.size(planeDim), "gradOutput width unexpected. Expected: ", numPlanes, ", Got: ", gradOutput.size(planeDim)); TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); TORCH_CHECK(odepth == gradOutput.size(dimd), "gradOutput depth unexpected. Expected: ", odepth, ", Got: ", gradOutput.size(dimd)); } void replication_pad3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeAndGradOutputCheck3d(input, gradOutput, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numInputDims = input.dim(); if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 4) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 5>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3) * devGradOutput.size(4); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } } // namespace TORCH_IMPL_FUNC(replication_pad1d_out_cuda) ( const Tensor& input, IntArrayRef paddingSize, const Tensor& output ) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int64_t padL = paddingSize[0]; int64_t padR = paddingSize[1]; constexpr int64_t planeDim = -2; constexpr int64_t dimw = -1; int numInputDims = input.ndimension(); int64_t numPlanes = input.size(planeDim); int64_t inputW = input.size(dimw); int64_t outputW = output.size(dimw); if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 2) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 3>(); auto devOutput = output_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devOutput.size(2); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel1d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>(devInput, devOutput, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } TORCH_IMPL_FUNC(replication_pad1d_backward_out_cuda) ( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, const Tensor& gradInput ) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_cuda"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numInputDims = input.ndimension(); if (numInputDims == 3) { planeDim++; dimw++; } int iwidth = input.size(dimw); int owidth = iwidth + padL + padR; if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 2) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 3>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devGradOutput.size(2); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devGradInput, devGradOutput, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } }); } TORCH_IMPL_FUNC(replication_pad2d_out_cuda) ( const Tensor& input, IntArrayRef paddingSize, const Tensor& output ) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); if (input.numel() == 0) { return; } int64_t padL = paddingSize[0]; int64_t padR = paddingSize[1]; int64_t padT = paddingSize[2]; int64_t padB = paddingSize[3]; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (input.dim() == 3) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 4>(); auto devOutput = output_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel2d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devInput, devOutput, padT, padB, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } Tensor& replication_pad2d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_out_cuda"); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad2d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } TORCH_IMPL_FUNC(replication_pad3d_out_cuda) ( const Tensor& input, IntArrayRef paddingSize, const Tensor& output ) { int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numInputDims = input.dim(); if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputD = input.size(dimd); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputD = output.size(dimd); int outputH = output.size(dimh); int outputW = output.size(dimw); if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 4) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 5>(); auto devOutput = output_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3) * devOutput.size(4); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel3d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } Tensor& replication_pad3d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_out_cuda"); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad3d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } } // at::native } // at
3b0d170b0de55ce3e2b69285ae0e550db9c2041c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GPU_ma_rock_sample.h" #include "base_ma_rock_sample.h" #include "ma_rock_sample.h" #include <bitset> #include <despot/GPUutil/GPUrandom.h> #include <despot/solver/Hyp_despot.h> #include <despot/GPUinterface/GPUupper_bound.h> using namespace std; namespace despot { /* ============================================================================= * Dvc_MultiAgentRockSample class * =============================================================================*/ extern __shared__ int localParticles[]; __global__ void step_global( Dvc_State* vnode_particles,float* rand, float * reward, OBS_TYPE* obs, float* ub, Dvc_ValuedAction* lb, bool* term, int num_particles, int parent_action, Dvc_State* state) { int action=blockIdx.x; int PID = (blockIdx.y * blockDim.x + threadIdx.x) ; if(PID<num_particles){ int global_list_pos = action * num_particles + PID; float rand_num=rand[global_list_pos]; if (threadIdx.y == 0) { DvcModelCopyToShared_( (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x), vnode_particles, PID % num_particles, false); } Dvc_State* current_particle = (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x); __syncthreads(); term[global_list_pos]=DvcModelStep_(*current_particle, rand_num, parent_action, reward[global_list_pos], obs[global_list_pos]); if (blockIdx.y * blockDim.x + threadIdx.x < num_particles) { /*Record stepped particles from parent as particles in this node*/ if (threadIdx.y == 0 && action==0) { Dvc_State* temp = DvcModelGet_(vnode_particles, PID % num_particles); DvcModelCopyNoAlloc_(temp, current_particle,0, false); } } term[global_list_pos]=DvcModelStep_(*current_particle, rand_num, action, reward[global_list_pos], obs[global_list_pos]); Dvc_History history; Dvc_RandomStreams streams; ub[global_list_pos]=DvcUpperBoundValue_(current_particle, 0, history); lb[global_list_pos]=DvcLowerBoundValue_(current_particle,streams,history, 0) ; Dvc_State* temp = DvcModelGet_(state, global_list_pos); DvcModelCopyNoAlloc_(temp, current_particle, 0, false); } } __global__ void step_global_1( Dvc_State* vnode_particles,float* rand, float * reward, OBS_TYPE* obs, float* ub, Dvc_ValuedAction* lb, bool* term, int num_particles, int parent_action, Dvc_State* state) { int action=blockIdx.x; int PID = (blockIdx.y * blockDim.x + threadIdx.x) ; if(PID<num_particles){ int global_list_pos = action * num_particles + PID; float rand_num=rand[global_list_pos]; if (threadIdx.y == 0) { DvcModelCopyToShared_( (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x), vnode_particles, PID % num_particles, false); } Dvc_State* current_particle = (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x); __syncthreads(); term[global_list_pos]=DvcModelStep_(*current_particle, rand_num, parent_action, reward[global_list_pos], obs[global_list_pos]); Dvc_State* temp = DvcModelGet_(state, global_list_pos); DvcModelCopyNoAlloc_(temp, current_particle, 0, false); } } __global__ void step_global_2( Dvc_State* vnode_particles,float* rand, float * reward, OBS_TYPE* obs, float* ub, Dvc_ValuedAction* lb, bool* term, int num_particles, int parent_action, Dvc_State* state) { int action=blockIdx.x; int PID = (blockIdx.y * blockDim.x + threadIdx.x) ; if(PID<num_particles){ int global_list_pos = action * num_particles + PID; float rand_num=rand[global_list_pos]; if (threadIdx.y == 0) { DvcModelCopyToShared_( (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x), state, global_list_pos, false); } Dvc_State* current_particle = (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x); if (blockIdx.y * blockDim.x + threadIdx.x < num_particles) { /*Record stepped particles from parent as particles in this node*/ if (threadIdx.y == 0 && action==0) { Dvc_State* temp = DvcModelGet_(vnode_particles, PID % num_particles); DvcModelCopyNoAlloc_(temp, current_particle,0, false); } } __syncthreads(); term[global_list_pos]=DvcModelStep_(*current_particle, rand_num, action, reward[global_list_pos], obs[global_list_pos]); Dvc_History history; Dvc_RandomStreams streams; ub[global_list_pos]=DvcUpperBoundValue_(current_particle, 0, history); lb[global_list_pos]=DvcLowerBoundValue_(current_particle,streams,history, 0) ; Dvc_State* temp = DvcModelGet_(state, global_list_pos); DvcModelCopyNoAlloc_(temp, current_particle, 0, false); } } DEVICE bool Dvc_MultiAgentRockSample::Dvc_Step(Dvc_State& state, float rand_num, int action, float& reward, OBS_TYPE& obs) { reward=0; obs=0; bool terminal=true; Dvc_MARockSampleState& rockstate = static_cast<Dvc_MARockSampleState&>(state); __syncthreads(); unsigned long long int Temp=INIT_QUICKRANDSEED; for(int rid=0;rid<num_agents_;rid++) { SetRobObs(obs, E_NONE, rid); if(GetRobPosIndex(&rockstate, rid)!=ROB_TERMINAL_ID){ int rob_act=GetRobAction(action, rid); //rob_act=Dvc_Compass::EAST;//debugging if (rob_act < E_SAMPLE) { // Move switch (rob_act) { case Dvc_Compass::EAST: if (GetX(&rockstate, rid) + 1 < ma_map_size_) { IncX(&rockstate, rid); } else { reward+= +10; SetRobPosIndex(rockstate.joint_pos, rid, ROB_TERMINAL_ID); } break; case Dvc_Compass::NORTH: if (GetY(&rockstate, rid) + 1 < ma_map_size_) IncY(&rockstate, rid); else{ reward += -100; } break; case Dvc_Compass::SOUTH: if (GetY(&rockstate, rid) - 1 >= 0) DecY(&rockstate, rid); else reward += -100; break; case Dvc_Compass::WEST: if (GetX(&rockstate, rid) - 1 >= 0) DecX(&rockstate, rid); else reward += -100; break; } } if (rob_act == E_SAMPLE) { // Sample int rock = ma_grid_[GetRobPosIndex(&rockstate, rid)]; if (rock >= 0) { if (GetRock(&rockstate, rock)) reward += +10; else reward += -10; SampleRock(&rockstate, rock); } else { reward += -100; } } if (rob_act > E_SAMPLE) { // Sense int rob_obs = 0; int rock = (rob_act - E_SAMPLE - 1) % ma_num_rocks_; float distance = DvcCoord::EuclideanDistance(GetRobPos(&rockstate, rid), ma_rock_pos_[rock]); int action_type = (rob_act - E_SAMPLE - 1)/ma_num_rocks_; reward = action_type*(-0.01); double half_efficiency_distance = action_type > 0 ? ma_half_efficiency_distance_2_ : ma_half_efficiency_distance_; float efficiency = (1 + pow(2, -distance / half_efficiency_distance)) * 0.5; //float efficiency = (1 + powf(2, -distance / ma_half_efficiency_distance_)) // * 0.5; if(use_continuous_observation) { bool good_rock = GetRock(&rockstate, rock); if(efficiency > (1-continuous_observation_interval)) { efficiency = (1-continuous_observation_interval); } float prob_bucket_double = rand_num * continuous_observation_scale; int prob_bucket = (int)prob_bucket_double; float remaining_prob = prob_bucket_double - prob_bucket; float prob_good = efficiency + (continuous_observation_interval*(float)prob_bucket / (float)continuous_observation_scale); if(remaining_prob > prob_good) { prob_good = 1-prob_good; } if(!good_rock & E_GOOD) { prob_good = 1-prob_good; } //double real_obs = (random_num*(upper_limit-lower_limit)) + lower_limit; rob_obs = int(prob_good*continuous_observation_scale/continuous_observation_interval); SetRobObs(obs, rob_obs, rid); } else { for(int j = 0; j < num_obs_bits; j++) {int temp_rob_obs; if (rand_num < efficiency) temp_rob_obs= GetRock(&rockstate, rock) & E_GOOD; else temp_rob_obs= !(GetRock(&rockstate, rock) & E_GOOD); rob_obs = (2*rob_obs + temp_rob_obs); rand_num=Dvc_QuickRandom::RandGeneration(&Temp, rand_num); } rob_obs = 4*rob_obs; SetRobObs(obs, rob_obs, rid); //if (rand_num < efficiency) // SetRobObs(obs, GetRock(&rockstate, rock) & E_GOOD, rid); //else // SetRobObs(obs, !(GetRock(&rockstate, rock) & E_GOOD), rid); } } if (GetRobPosIndex(&rockstate, rid)!=ROB_TERMINAL_ID) { terminal=false; } } } if(GPUDoPrint/* && action==blockIdx.x*/) printf("(GPU_step) action %d scenario %d state_id %d joint_pos %d blockid.y %d threadid.x %d rand %f\n", action, rockstate.scenario_id, rockstate.state_id, rockstate.joint_pos, blockIdx.y, threadIdx.x, rand_num); return terminal; } DEVICE float Dvc_MultiAgentRockSample::Dvc_ObsProb(OBS_TYPE& obs, Dvc_State& state, int action) { float prob=1; //calculate prob for each robot, multiply them together for(int i=0;i<num_agents_;i++){ int agent_action=GetRobAction(action, i); int rob_obs= GetRobObs(obs, i); const Dvc_MARockSampleState& rockstate = static_cast<const Dvc_MARockSampleState&>(state); if(GetRobPosIndex(&rockstate, i)!=ROB_TERMINAL_ID){ if (agent_action <= E_SAMPLE) prob *= (rob_obs == E_NONE); //else if (rob_obs < 4) //Last 2 bits for E_NONE // prob *=0; else{ //int rock = agent_action - E_SAMPLE - 1; int rock = (agent_action - E_SAMPLE - 1) % ma_num_rocks_; float distance = DvcCoord::EuclideanDistance(GetRobPos(&rockstate, i), ma_rock_pos_[rock]); float efficiency = (1 + pow(2, -distance / ma_half_efficiency_distance_)) * 0.5; int true_state = (GetRock(&rockstate, rock) & 1); if(use_continuous_observation) { float obs_prob = (continuous_observation_interval*rob_obs)/(continuous_observation_scale); prob *= (true_state == E_BAD ? (1-obs_prob):obs_prob); } else { for(int j = 0; j < num_obs_bits; j++) { int my_rob_obs = (rob_obs >> (2+j)) & 1; prob*= ( true_state== my_rob_obs) ? efficiency : (1 - efficiency); if(j % 8 == 0) { prob = prob*1000; //Multiply by a constant to avoid prob becoming 0 } } } } } } return prob; } DEVICE int Dvc_MultiAgentRockSample::NumActions() { return pow((float)((num_action_types*ma_num_rocks_) + 5), num_agents_); } DEVICE int Dvc_MultiAgentRockSample::Dvc_NumObservations() { return /*3*/num_agents_*(1 + (1 << num_obs_bits)); } DEVICE Dvc_State* Dvc_MultiAgentRockSample::Dvc_Get(Dvc_State* particles, int pos) { Dvc_MARockSampleState* particle_i= static_cast<Dvc_MARockSampleState*>(particles)+pos; return particle_i; } DEVICE void Dvc_MultiAgentRockSample::Dvc_Copy_NoAlloc(Dvc_State* des, const Dvc_State* src, int pos, bool offset_des) { /*Pass member values, assign member pointers to existing state pointer*/ const Dvc_MARockSampleState* src_i= static_cast<const Dvc_MARockSampleState*>(src)+pos; if(!offset_des) pos=0; Dvc_MARockSampleState* des_i= static_cast<const Dvc_MARockSampleState*>(des)+pos; des_i->weight = src_i->weight; des_i->scenario_id = src_i->scenario_id; des_i->state_id = src_i->state_id; des_i->joint_pos = src_i->joint_pos; //des_i->allocated_=true; } } // namespace despot
3b0d170b0de55ce3e2b69285ae0e550db9c2041c.cu
#include "GPU_ma_rock_sample.h" #include "base_ma_rock_sample.h" #include "ma_rock_sample.h" #include <bitset> #include <despot/GPUutil/GPUrandom.h> #include <despot/solver/Hyp_despot.h> #include <despot/GPUinterface/GPUupper_bound.h> using namespace std; namespace despot { /* ============================================================================= * Dvc_MultiAgentRockSample class * =============================================================================*/ extern __shared__ int localParticles[]; __global__ void step_global( Dvc_State* vnode_particles,float* rand, float * reward, OBS_TYPE* obs, float* ub, Dvc_ValuedAction* lb, bool* term, int num_particles, int parent_action, Dvc_State* state) { int action=blockIdx.x; int PID = (blockIdx.y * blockDim.x + threadIdx.x) ; if(PID<num_particles){ int global_list_pos = action * num_particles + PID; float rand_num=rand[global_list_pos]; if (threadIdx.y == 0) { DvcModelCopyToShared_( (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x), vnode_particles, PID % num_particles, false); } Dvc_State* current_particle = (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x); __syncthreads(); term[global_list_pos]=DvcModelStep_(*current_particle, rand_num, parent_action, reward[global_list_pos], obs[global_list_pos]); if (blockIdx.y * blockDim.x + threadIdx.x < num_particles) { /*Record stepped particles from parent as particles in this node*/ if (threadIdx.y == 0 && action==0) { Dvc_State* temp = DvcModelGet_(vnode_particles, PID % num_particles); DvcModelCopyNoAlloc_(temp, current_particle,0, false); } } term[global_list_pos]=DvcModelStep_(*current_particle, rand_num, action, reward[global_list_pos], obs[global_list_pos]); Dvc_History history; Dvc_RandomStreams streams; ub[global_list_pos]=DvcUpperBoundValue_(current_particle, 0, history); lb[global_list_pos]=DvcLowerBoundValue_(current_particle,streams,history, 0) ; Dvc_State* temp = DvcModelGet_(state, global_list_pos); DvcModelCopyNoAlloc_(temp, current_particle, 0, false); } } __global__ void step_global_1( Dvc_State* vnode_particles,float* rand, float * reward, OBS_TYPE* obs, float* ub, Dvc_ValuedAction* lb, bool* term, int num_particles, int parent_action, Dvc_State* state) { int action=blockIdx.x; int PID = (blockIdx.y * blockDim.x + threadIdx.x) ; if(PID<num_particles){ int global_list_pos = action * num_particles + PID; float rand_num=rand[global_list_pos]; if (threadIdx.y == 0) { DvcModelCopyToShared_( (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x), vnode_particles, PID % num_particles, false); } Dvc_State* current_particle = (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x); __syncthreads(); term[global_list_pos]=DvcModelStep_(*current_particle, rand_num, parent_action, reward[global_list_pos], obs[global_list_pos]); Dvc_State* temp = DvcModelGet_(state, global_list_pos); DvcModelCopyNoAlloc_(temp, current_particle, 0, false); } } __global__ void step_global_2( Dvc_State* vnode_particles,float* rand, float * reward, OBS_TYPE* obs, float* ub, Dvc_ValuedAction* lb, bool* term, int num_particles, int parent_action, Dvc_State* state) { int action=blockIdx.x; int PID = (blockIdx.y * blockDim.x + threadIdx.x) ; if(PID<num_particles){ int global_list_pos = action * num_particles + PID; float rand_num=rand[global_list_pos]; if (threadIdx.y == 0) { DvcModelCopyToShared_( (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x), state, global_list_pos, false); } Dvc_State* current_particle = (Dvc_State*) ((int*) localParticles + 8 * threadIdx.x); if (blockIdx.y * blockDim.x + threadIdx.x < num_particles) { /*Record stepped particles from parent as particles in this node*/ if (threadIdx.y == 0 && action==0) { Dvc_State* temp = DvcModelGet_(vnode_particles, PID % num_particles); DvcModelCopyNoAlloc_(temp, current_particle,0, false); } } __syncthreads(); term[global_list_pos]=DvcModelStep_(*current_particle, rand_num, action, reward[global_list_pos], obs[global_list_pos]); Dvc_History history; Dvc_RandomStreams streams; ub[global_list_pos]=DvcUpperBoundValue_(current_particle, 0, history); lb[global_list_pos]=DvcLowerBoundValue_(current_particle,streams,history, 0) ; Dvc_State* temp = DvcModelGet_(state, global_list_pos); DvcModelCopyNoAlloc_(temp, current_particle, 0, false); } } DEVICE bool Dvc_MultiAgentRockSample::Dvc_Step(Dvc_State& state, float rand_num, int action, float& reward, OBS_TYPE& obs) { reward=0; obs=0; bool terminal=true; Dvc_MARockSampleState& rockstate = static_cast<Dvc_MARockSampleState&>(state); __syncthreads(); unsigned long long int Temp=INIT_QUICKRANDSEED; for(int rid=0;rid<num_agents_;rid++) { SetRobObs(obs, E_NONE, rid); if(GetRobPosIndex(&rockstate, rid)!=ROB_TERMINAL_ID){ int rob_act=GetRobAction(action, rid); //rob_act=Dvc_Compass::EAST;//debugging if (rob_act < E_SAMPLE) { // Move switch (rob_act) { case Dvc_Compass::EAST: if (GetX(&rockstate, rid) + 1 < ma_map_size_) { IncX(&rockstate, rid); } else { reward+= +10; SetRobPosIndex(rockstate.joint_pos, rid, ROB_TERMINAL_ID); } break; case Dvc_Compass::NORTH: if (GetY(&rockstate, rid) + 1 < ma_map_size_) IncY(&rockstate, rid); else{ reward += -100; } break; case Dvc_Compass::SOUTH: if (GetY(&rockstate, rid) - 1 >= 0) DecY(&rockstate, rid); else reward += -100; break; case Dvc_Compass::WEST: if (GetX(&rockstate, rid) - 1 >= 0) DecX(&rockstate, rid); else reward += -100; break; } } if (rob_act == E_SAMPLE) { // Sample int rock = ma_grid_[GetRobPosIndex(&rockstate, rid)]; if (rock >= 0) { if (GetRock(&rockstate, rock)) reward += +10; else reward += -10; SampleRock(&rockstate, rock); } else { reward += -100; } } if (rob_act > E_SAMPLE) { // Sense int rob_obs = 0; int rock = (rob_act - E_SAMPLE - 1) % ma_num_rocks_; float distance = DvcCoord::EuclideanDistance(GetRobPos(&rockstate, rid), ma_rock_pos_[rock]); int action_type = (rob_act - E_SAMPLE - 1)/ma_num_rocks_; reward = action_type*(-0.01); double half_efficiency_distance = action_type > 0 ? ma_half_efficiency_distance_2_ : ma_half_efficiency_distance_; float efficiency = (1 + pow(2, -distance / half_efficiency_distance)) * 0.5; //float efficiency = (1 + powf(2, -distance / ma_half_efficiency_distance_)) // * 0.5; if(use_continuous_observation) { bool good_rock = GetRock(&rockstate, rock); if(efficiency > (1-continuous_observation_interval)) { efficiency = (1-continuous_observation_interval); } float prob_bucket_double = rand_num * continuous_observation_scale; int prob_bucket = (int)prob_bucket_double; float remaining_prob = prob_bucket_double - prob_bucket; float prob_good = efficiency + (continuous_observation_interval*(float)prob_bucket / (float)continuous_observation_scale); if(remaining_prob > prob_good) { prob_good = 1-prob_good; } if(!good_rock & E_GOOD) { prob_good = 1-prob_good; } //double real_obs = (random_num*(upper_limit-lower_limit)) + lower_limit; rob_obs = int(prob_good*continuous_observation_scale/continuous_observation_interval); SetRobObs(obs, rob_obs, rid); } else { for(int j = 0; j < num_obs_bits; j++) {int temp_rob_obs; if (rand_num < efficiency) temp_rob_obs= GetRock(&rockstate, rock) & E_GOOD; else temp_rob_obs= !(GetRock(&rockstate, rock) & E_GOOD); rob_obs = (2*rob_obs + temp_rob_obs); rand_num=Dvc_QuickRandom::RandGeneration(&Temp, rand_num); } rob_obs = 4*rob_obs; SetRobObs(obs, rob_obs, rid); //if (rand_num < efficiency) // SetRobObs(obs, GetRock(&rockstate, rock) & E_GOOD, rid); //else // SetRobObs(obs, !(GetRock(&rockstate, rock) & E_GOOD), rid); } } if (GetRobPosIndex(&rockstate, rid)!=ROB_TERMINAL_ID) { terminal=false; } } } if(GPUDoPrint/* && action==blockIdx.x*/) printf("(GPU_step) action %d scenario %d state_id %d joint_pos %d blockid.y %d threadid.x %d rand %f\n", action, rockstate.scenario_id, rockstate.state_id, rockstate.joint_pos, blockIdx.y, threadIdx.x, rand_num); return terminal; } DEVICE float Dvc_MultiAgentRockSample::Dvc_ObsProb(OBS_TYPE& obs, Dvc_State& state, int action) { float prob=1; //calculate prob for each robot, multiply them together for(int i=0;i<num_agents_;i++){ int agent_action=GetRobAction(action, i); int rob_obs= GetRobObs(obs, i); const Dvc_MARockSampleState& rockstate = static_cast<const Dvc_MARockSampleState&>(state); if(GetRobPosIndex(&rockstate, i)!=ROB_TERMINAL_ID){ if (agent_action <= E_SAMPLE) prob *= (rob_obs == E_NONE); //else if (rob_obs < 4) //Last 2 bits for E_NONE // prob *=0; else{ //int rock = agent_action - E_SAMPLE - 1; int rock = (agent_action - E_SAMPLE - 1) % ma_num_rocks_; float distance = DvcCoord::EuclideanDistance(GetRobPos(&rockstate, i), ma_rock_pos_[rock]); float efficiency = (1 + pow(2, -distance / ma_half_efficiency_distance_)) * 0.5; int true_state = (GetRock(&rockstate, rock) & 1); if(use_continuous_observation) { float obs_prob = (continuous_observation_interval*rob_obs)/(continuous_observation_scale); prob *= (true_state == E_BAD ? (1-obs_prob):obs_prob); } else { for(int j = 0; j < num_obs_bits; j++) { int my_rob_obs = (rob_obs >> (2+j)) & 1; prob*= ( true_state== my_rob_obs) ? efficiency : (1 - efficiency); if(j % 8 == 0) { prob = prob*1000; //Multiply by a constant to avoid prob becoming 0 } } } } } } return prob; } DEVICE int Dvc_MultiAgentRockSample::NumActions() { return pow((float)((num_action_types*ma_num_rocks_) + 5), num_agents_); } DEVICE int Dvc_MultiAgentRockSample::Dvc_NumObservations() { return /*3*/num_agents_*(1 + (1 << num_obs_bits)); } DEVICE Dvc_State* Dvc_MultiAgentRockSample::Dvc_Get(Dvc_State* particles, int pos) { Dvc_MARockSampleState* particle_i= static_cast<Dvc_MARockSampleState*>(particles)+pos; return particle_i; } DEVICE void Dvc_MultiAgentRockSample::Dvc_Copy_NoAlloc(Dvc_State* des, const Dvc_State* src, int pos, bool offset_des) { /*Pass member values, assign member pointers to existing state pointer*/ const Dvc_MARockSampleState* src_i= static_cast<const Dvc_MARockSampleState*>(src)+pos; if(!offset_des) pos=0; Dvc_MARockSampleState* des_i= static_cast<const Dvc_MARockSampleState*>(des)+pos; des_i->weight = src_i->weight; des_i->scenario_id = src_i->scenario_id; des_i->state_id = src_i->state_id; des_i->joint_pos = src_i->joint_pos; //des_i->allocated_=true; } } // namespace despot
6078b9adf9a875d58371840e34585bd53b9027e9.hip
// !!! This is a file automatically generated by hipify!!! #include "cg.hh" #include "matrix.hh" #include <algorithm> #include <cblas.h> #include <cmath> #include <iostream> #include <hip/hip_runtime.h> #define SHARED 4096 const double NEARZERO = 1.0e-14; const bool DEBUG = false; /* cgsolver solves the linear equation A*x = b where A is of size m x n Code based on MATLAB code (from wikipedia ;-) ): function x = conjgrad(A, b, x) r = b - A * x; p = r; rsold = r' * r; for i = 1:length(b) Ap = A * p; alpha = rsold / (p' * Ap); x = x + alpha * p; r = r - alpha * Ap; rsnew = r' * r; if sqrt(rsnew) < 1e-10 break; end p = r + (rsnew / rsold) * p; rsold = rsnew; end end */ ////////////////////////////////////////////////////////////////////////////////////////////// /* I implemented 4 kernels: -dgemv: makes matrix multiplication y = alpha*A*x with 1 thread per cross-product, puts SHARED values of x in shared memory -dgemv2: makes matrix multiplication y = alpha*A*x with n_reduce threads per cross-product, each value is stores in y before reduction with reduce kernel puts width values of x in shared memory -reduce: reduces the value of y to finish the computations of dgemv2 -daxpy: performs y = alpha*x + beta*y */ ////////////////////////////////////////////////////////////////////////////////////////////// __global__ void dgemv(int m, int n, double alpha, Matrix A, double * x, double * y) { /// Computes y = alpha*A*x int row = blockDim.x * blockIdx.x + threadIdx.x; // Instantiate an array in shared memory too store x __shared__ double x_shar[SHARED+1023]; // +1023 to prevent bank conflicts (I think that's how it works) // Each thread in a block will put some data of x into shared memory // First look at columns to be put in x_shar by this thread int chunk; if (blockDim.x > SHARED) {chunk = 1;} else {chunk = SHARED/blockDim.x + (SHARED % blockDim.x == 0 ? 0 : 1);}; int first_col = chunk*threadIdx.x; int last_col = first_col + chunk; if (last_col > SHARED) {last_col=SHARED;}; // Put values of x in shared memory for (int col = first_col; col < last_col; col++) { x_shar[col] = x[col]; } __syncthreads(); // Make sure it is done for all threads*/ // stores partial cross-products in y, taking x in global memory only if needed double sum = 0; if (row < n) { for (int j = 0; j < m; j++) { if (j < SHARED) { sum += alpha* A(j,row) * x_shar[j]; } else { sum += alpha* A(j,row) * x[j]; // A is spd, we access it col-wise for coalesced memory } } y[row] = sum; } } __global__ void dgemv2(int m, int n, double alpha, Matrix A, double * x, double * y, int width) { /// Computes truncated cross-products alpha * A(i,j) * x(i), the final summation is done elsewhere // Get the indices of x to make cross_product int row = blockDim.x * blockIdx.x + threadIdx.x; int start_col = blockIdx.y * width; int end_col = start_col + width; if (end_col > n) {end_col = n;}; // Instantiate an array in shared memory too store x extern __shared__ double x_shar[]; // Each thread in a block will put some data of x into shared memory // First look at columns to be put in x_shar by this thread int chunk; if (blockDim.x > width) {chunk = 1;} else {chunk = width/blockDim.x + (width%blockDim.x==0 ? 0 : 1);}; int first_col = chunk*threadIdx.x + start_col; int last_col = first_col + chunk; if (last_col > end_col) {last_col=end_col;}; // Put values of x in shared memory for (int col = first_col; col < last_col; col++) { x_shar[col-start_col] = x[col]; } __syncthreads(); // Make sure it is done for all threads // Compute the partial cross-products and store them in y if (row < m) { double sum = 0; for (int col = start_col; col < end_col; col++) { sum += alpha*A(col,row)*x_shar[col-start_col]; // A is symmetric } y[blockIdx.y*n + row] = sum; } } __global__ void reduce(int n, int n_reduce, double * y) { /// Makes the summation to achieve y = alpha*A*x int row = blockDim.x * blockIdx.x + threadIdx.x; // if row sums up the partial cross-products in a row, and save it in the first row of y if (row < n) { double sum = 0; for (int j = 0; j < n_reduce; j++) { sum += y[j*n + row]; } y[row] = sum; } } __global__ void daxpy(int n, double alpha, double * x, double beta, double * y) { // Computes y = alpha * x + beta * y int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { double tmp = beta * y[i]; y[i] = alpha * x[i] + tmp; } } void CGSolver::solve(double * x, int block_size, int width) { // Compute the grid sizes int grid_size = m_m / block_size + (m_m % block_size == 0 ? 0 : 1); //grid_size along rows int n_reduce = m_n / width + (m_n % width == 0 ? 0 : 1); // grid_size along y (# of partial cross-products) dim3 grid = dim3(grid_size, n_reduce); size_t sm = (width+block_size-1)*sizeof(double); //size of shared memory // (+block_size-1 to prevent, I think it works) // Allocate memory in host and device double * r; double * p; double * Ap; double * tmp; hipMallocManaged(&r, m_n*sizeof(double)); hipMallocManaged(&p, m_n*sizeof(double)); // If using dgemv Ap only needs m_n floating points if (width == m_n){ hipMallocManaged(&Ap, m_n*sizeof(double)); } else { hipMallocManaged(&Ap, m_n*n_reduce*sizeof(double)); } hipMallocManaged(&tmp, m_n*sizeof(double)); // r = b - A * x; if (width == m_n) { hipLaunchKernelGGL(( dgemv), dim3(grid_size), dim3(block_size), 0, 0, m_m, m_n, 1., m_A, x, Ap); } else { hipLaunchKernelGGL(( dgemv2), dim3(grid), dim3(block_size), sm, 0, m_m, m_n, 1., m_A, x, Ap, width); hipDeviceSynchronize(); hipLaunchKernelGGL(( reduce), dim3(grid_size), dim3(block_size), 0, 0, m_n, n_reduce, Ap); } hipDeviceSynchronize(); //r = m_b; //for (int i = 0; i < m_n; i++) {r[i] = m_b[i];}; hipLaunchKernelGGL(( daxpy), dim3(grid_size), dim3(block_size), 0, 0, m_n, 1., m_b, 0., r); hipDeviceSynchronize(); // r = r - Ap hipLaunchKernelGGL(( daxpy), dim3(grid_size), dim3(block_size), 0, 0, m_n, -1., Ap, 1., r); hipDeviceSynchronize(); // p = r; //for (int i = 0; i < m_n; i++) {p[i] = r[i];}; hipLaunchKernelGGL(( daxpy), dim3(grid_size), dim3(block_size), 0, 0, m_n, 1., r, 0., p); hipDeviceSynchronize(); // rsold = r' * r; auto rsold = cblas_ddot(m_n, r, 1, p, 1); // for i = 1:length(b) int k = 0; for (; k < m_n; ++k) { // Ap = A * p; if (width == m_n) { hipLaunchKernelGGL(( dgemv), dim3(grid_size), dim3(block_size), 0, 0, m_m, m_n, 1., m_A, p, Ap); } else { hipLaunchKernelGGL(( dgemv2), dim3(grid), dim3(block_size), sm, 0, m_m, m_n, 1., m_A, p, Ap, width); hipDeviceSynchronize(); hipLaunchKernelGGL(( reduce), dim3(grid_size), dim3(block_size), 0, 0, m_n, n_reduce, Ap); } hipDeviceSynchronize(); // alpha = rsold / (p' * Ap); auto alpha = rsold / ::max(cblas_ddot(m_n, p, 1, Ap, 1), rsold * NEARZERO); // x = x + alpha * p; hipLaunchKernelGGL(( daxpy), dim3(grid_size), dim3(block_size), 0, 0, m_n, alpha, p , 1., x); // r = r - alpha * Ap; hipLaunchKernelGGL(( daxpy), dim3(grid_size), dim3(block_size), 0, 0, m_n, -alpha, Ap, 1., r); hipDeviceSynchronize(); // rsnew = r' * r; auto rsnew = cblas_ddot(m_n, r, 1, r, 1); // if sqrt(rsnew) < 1e-10 // break; if (std::sqrt(rsnew) < m_tolerance) break; // Convergence test auto beta = rsnew / rsold; // p = r + (rsnew / rsold) * p; hipLaunchKernelGGL(( daxpy), dim3(grid_size), dim3(block_size), 0, 0, m_n, 1., r, beta, p); hipDeviceSynchronize(); // rsold = rsnew; rsold = rsnew; if (DEBUG) { std::cout << "\t[STEP " << k << "] residual = " << std::scientific << std::sqrt(rsold) << "\r" << std::flush; } } if (DEBUG) { std::fill_n(&r[0], m_n, 0.); cblas_dgemv(CblasRowMajor, CblasNoTrans, m_m, m_n, 1., m_A.data(), m_n, x, 1, 0., r, 1); cblas_daxpy(m_n, -1., m_b, 1, r, 1); auto res = std::sqrt(cblas_ddot(m_n, r, 1, r, 1)) / std::sqrt(cblas_ddot(m_n, m_b, 1, m_b, 1)); auto nx = std::sqrt(cblas_ddot(m_n, x, 1, x, 1)); std::cout << "\t[STEP " << k << "] residual = " << std::scientific << std::sqrt(rsold) << ", ||x|| = " << nx << ", ||Ax - b||/||b|| = " << res << std::endl; } hipFree(&r); hipFree(&p); hipFree(&Ap); hipFree(&tmp); } void CGSolver::read_matrix(const std::string & filename) { m_A.read(filename); m_m = m_A.m(); m_n = m_A.n(); } /* Initialization of the source term b */ void Solver::init_source_term(double h) { //m_b.resize(m_n); hipMallocManaged(&m_b, m_n*sizeof(double)); for (int i = 0; i < m_n; i++) { m_b[i] = -2. * i * M_PI * M_PI * std::sin(10. * M_PI * i * h) * std::sin(10. * M_PI * i * h); } }
6078b9adf9a875d58371840e34585bd53b9027e9.cu
#include "cg.hh" #include "matrix.hh" #include <algorithm> #include <cblas.h> #include <cmath> #include <iostream> #include <cuda_runtime.h> #define SHARED 4096 const double NEARZERO = 1.0e-14; const bool DEBUG = false; /* cgsolver solves the linear equation A*x = b where A is of size m x n Code based on MATLAB code (from wikipedia ;-) ): function x = conjgrad(A, b, x) r = b - A * x; p = r; rsold = r' * r; for i = 1:length(b) Ap = A * p; alpha = rsold / (p' * Ap); x = x + alpha * p; r = r - alpha * Ap; rsnew = r' * r; if sqrt(rsnew) < 1e-10 break; end p = r + (rsnew / rsold) * p; rsold = rsnew; end end */ ////////////////////////////////////////////////////////////////////////////////////////////// /* I implemented 4 kernels: -dgemv: makes matrix multiplication y = alpha*A*x with 1 thread per cross-product, puts SHARED values of x in shared memory -dgemv2: makes matrix multiplication y = alpha*A*x with n_reduce threads per cross-product, each value is stores in y before reduction with reduce kernel puts width values of x in shared memory -reduce: reduces the value of y to finish the computations of dgemv2 -daxpy: performs y = alpha*x + beta*y */ ////////////////////////////////////////////////////////////////////////////////////////////// __global__ void dgemv(int m, int n, double alpha, Matrix A, double * x, double * y) { /// Computes y = alpha*A*x int row = blockDim.x * blockIdx.x + threadIdx.x; // Instantiate an array in shared memory too store x __shared__ double x_shar[SHARED+1023]; // +1023 to prevent bank conflicts (I think that's how it works) // Each thread in a block will put some data of x into shared memory // First look at columns to be put in x_shar by this thread int chunk; if (blockDim.x > SHARED) {chunk = 1;} else {chunk = SHARED/blockDim.x + (SHARED % blockDim.x == 0 ? 0 : 1);}; int first_col = chunk*threadIdx.x; int last_col = first_col + chunk; if (last_col > SHARED) {last_col=SHARED;}; // Put values of x in shared memory for (int col = first_col; col < last_col; col++) { x_shar[col] = x[col]; } __syncthreads(); // Make sure it is done for all threads*/ // stores partial cross-products in y, taking x in global memory only if needed double sum = 0; if (row < n) { for (int j = 0; j < m; j++) { if (j < SHARED) { sum += alpha* A(j,row) * x_shar[j]; } else { sum += alpha* A(j,row) * x[j]; // A is spd, we access it col-wise for coalesced memory } } y[row] = sum; } } __global__ void dgemv2(int m, int n, double alpha, Matrix A, double * x, double * y, int width) { /// Computes truncated cross-products alpha * A(i,j) * x(i), the final summation is done elsewhere // Get the indices of x to make cross_product int row = blockDim.x * blockIdx.x + threadIdx.x; int start_col = blockIdx.y * width; int end_col = start_col + width; if (end_col > n) {end_col = n;}; // Instantiate an array in shared memory too store x extern __shared__ double x_shar[]; // Each thread in a block will put some data of x into shared memory // First look at columns to be put in x_shar by this thread int chunk; if (blockDim.x > width) {chunk = 1;} else {chunk = width/blockDim.x + (width%blockDim.x==0 ? 0 : 1);}; int first_col = chunk*threadIdx.x + start_col; int last_col = first_col + chunk; if (last_col > end_col) {last_col=end_col;}; // Put values of x in shared memory for (int col = first_col; col < last_col; col++) { x_shar[col-start_col] = x[col]; } __syncthreads(); // Make sure it is done for all threads // Compute the partial cross-products and store them in y if (row < m) { double sum = 0; for (int col = start_col; col < end_col; col++) { sum += alpha*A(col,row)*x_shar[col-start_col]; // A is symmetric } y[blockIdx.y*n + row] = sum; } } __global__ void reduce(int n, int n_reduce, double * y) { /// Makes the summation to achieve y = alpha*A*x int row = blockDim.x * blockIdx.x + threadIdx.x; // if row sums up the partial cross-products in a row, and save it in the first row of y if (row < n) { double sum = 0; for (int j = 0; j < n_reduce; j++) { sum += y[j*n + row]; } y[row] = sum; } } __global__ void daxpy(int n, double alpha, double * x, double beta, double * y) { // Computes y = alpha * x + beta * y int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { double tmp = beta * y[i]; y[i] = alpha * x[i] + tmp; } } void CGSolver::solve(double * x, int block_size, int width) { // Compute the grid sizes int grid_size = m_m / block_size + (m_m % block_size == 0 ? 0 : 1); //grid_size along rows int n_reduce = m_n / width + (m_n % width == 0 ? 0 : 1); // grid_size along y (# of partial cross-products) dim3 grid = dim3(grid_size, n_reduce); size_t sm = (width+block_size-1)*sizeof(double); //size of shared memory // (+block_size-1 to prevent, I think it works) // Allocate memory in host and device double * r; double * p; double * Ap; double * tmp; cudaMallocManaged(&r, m_n*sizeof(double)); cudaMallocManaged(&p, m_n*sizeof(double)); // If using dgemv Ap only needs m_n floating points if (width == m_n){ cudaMallocManaged(&Ap, m_n*sizeof(double)); } else { cudaMallocManaged(&Ap, m_n*n_reduce*sizeof(double)); } cudaMallocManaged(&tmp, m_n*sizeof(double)); // r = b - A * x; if (width == m_n) { dgemv<<<grid_size, block_size>>>(m_m, m_n, 1., m_A, x, Ap); } else { dgemv2<<<grid, block_size, sm>>>(m_m, m_n, 1., m_A, x, Ap, width); cudaDeviceSynchronize(); reduce<<<grid_size, block_size>>>(m_n, n_reduce, Ap); } cudaDeviceSynchronize(); //r = m_b; //for (int i = 0; i < m_n; i++) {r[i] = m_b[i];}; daxpy<<<grid_size, block_size>>>(m_n, 1., m_b, 0., r); cudaDeviceSynchronize(); // r = r - Ap daxpy<<<grid_size, block_size>>>(m_n, -1., Ap, 1., r); cudaDeviceSynchronize(); // p = r; //for (int i = 0; i < m_n; i++) {p[i] = r[i];}; daxpy<<<grid_size, block_size>>>(m_n, 1., r, 0., p); cudaDeviceSynchronize(); // rsold = r' * r; auto rsold = cblas_ddot(m_n, r, 1, p, 1); // for i = 1:length(b) int k = 0; for (; k < m_n; ++k) { // Ap = A * p; if (width == m_n) { dgemv<<<grid_size, block_size>>>(m_m, m_n, 1., m_A, p, Ap); } else { dgemv2<<<grid, block_size, sm>>>(m_m, m_n, 1., m_A, p, Ap, width); cudaDeviceSynchronize(); reduce<<<grid_size, block_size>>>(m_n, n_reduce, Ap); } cudaDeviceSynchronize(); // alpha = rsold / (p' * Ap); auto alpha = rsold / std::max(cblas_ddot(m_n, p, 1, Ap, 1), rsold * NEARZERO); // x = x + alpha * p; daxpy<<<grid_size, block_size>>>(m_n, alpha, p , 1., x); // r = r - alpha * Ap; daxpy<<<grid_size, block_size>>>(m_n, -alpha, Ap, 1., r); cudaDeviceSynchronize(); // rsnew = r' * r; auto rsnew = cblas_ddot(m_n, r, 1, r, 1); // if sqrt(rsnew) < 1e-10 // break; if (std::sqrt(rsnew) < m_tolerance) break; // Convergence test auto beta = rsnew / rsold; // p = r + (rsnew / rsold) * p; daxpy<<<grid_size, block_size>>>(m_n, 1., r, beta, p); cudaDeviceSynchronize(); // rsold = rsnew; rsold = rsnew; if (DEBUG) { std::cout << "\t[STEP " << k << "] residual = " << std::scientific << std::sqrt(rsold) << "\r" << std::flush; } } if (DEBUG) { std::fill_n(&r[0], m_n, 0.); cblas_dgemv(CblasRowMajor, CblasNoTrans, m_m, m_n, 1., m_A.data(), m_n, x, 1, 0., r, 1); cblas_daxpy(m_n, -1., m_b, 1, r, 1); auto res = std::sqrt(cblas_ddot(m_n, r, 1, r, 1)) / std::sqrt(cblas_ddot(m_n, m_b, 1, m_b, 1)); auto nx = std::sqrt(cblas_ddot(m_n, x, 1, x, 1)); std::cout << "\t[STEP " << k << "] residual = " << std::scientific << std::sqrt(rsold) << ", ||x|| = " << nx << ", ||Ax - b||/||b|| = " << res << std::endl; } cudaFree(&r); cudaFree(&p); cudaFree(&Ap); cudaFree(&tmp); } void CGSolver::read_matrix(const std::string & filename) { m_A.read(filename); m_m = m_A.m(); m_n = m_A.n(); } /* Initialization of the source term b */ void Solver::init_source_term(double h) { //m_b.resize(m_n); cudaMallocManaged(&m_b, m_n*sizeof(double)); for (int i = 0; i < m_n; i++) { m_b[i] = -2. * i * M_PI * M_PI * std::sin(10. * M_PI * i * h) * std::sin(10. * M_PI * i * h); } }
8bdcfe035e9398350f7f053f51458042cc00c51d.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <rocblas.h> __global__ void kernel_rbf( const double gamma , const double* __restrict__ squares , const double* __restrict__ matrix_base , const int* __restrict__ RowPtrA , const int* __restrict__ ColIndA , const int* __restrict__ indexPermutation , float* __restrict__ result , const int len , const unsigned i_off , const int j_new ) { const int result_i = blockDim.x * blockIdx.x + threadIdx.x; if (result_i < len) { double res = 0; const int i = indexPermutation[i_off + result_i]; const int j = indexPermutation[j_new]; const double* pi = matrix_base + RowPtrA[i]; const int* icols = &ColIndA[RowPtrA[i]]; const int ilen = RowPtrA[i + 1] - RowPtrA[i]; const double* pj = matrix_base + RowPtrA[j]; const int* jcols = &ColIndA[RowPtrA[j]]; const int jlen = RowPtrA[j + 1] - RowPtrA[j]; for(int ipos = 0, jpos = 0; ipos < ilen && jpos < jlen; ) { if (icols[ipos] < jcols[jpos]) { ++ipos; } else if (icols[ipos] > jcols[jpos]) { ++jpos; } else { res += pi[ipos] * pj[jpos]; ++ipos; ++jpos; } } result[result_i] = (float)exp(-gamma*(squares[i] + squares[j] - 2*res)); } } // Note, that matrix indexees are zero-based, but x indexes are 1-based __global__ void kernel_rbf_predict( const double gamma , const double* __restrict__ matrix_base , const int* __restrict__ RowPtrA , const int* __restrict__ ColIndA , double* __restrict__ result_vector , const int rows , const int x_len , const int* __restrict__ x_cols , const double* __restrict__ x_val , const double* __restrict__ sv_coef ) { const int result_i = blockDim.x * blockIdx.x + threadIdx.x; if (result_i < rows) { //const bool debug = (result_i == 0) ? true : false; double res = 0; const double* pi = matrix_base + RowPtrA[result_i]; const int* icols = &ColIndA[RowPtrA[result_i]]; const int ilen = RowPtrA[result_i + 1] - RowPtrA[result_i]; int ipos = 0; int xpos = 0; //if (debug) { // printf("ilen=%d x_len=%d :\n", ilen, x_len); //} while(ipos < ilen && xpos < x_len) { if (icols[ipos] + 1 < x_cols[xpos]) { //if (debug) { // printf("i[%d]**2=%f**2 ", icols[ipos] + 1, (double)pi[ipos]); //} res += pi[ipos] * pi[ipos]; ++ipos; } else if (icols[ipos] + 1 > x_cols[xpos]) { //if (debug) { // printf("x[%d]**2=%f**2 ", x_cols[xpos], (double)x_val[xpos]); //} res += x_val[xpos] * x_val[xpos]; ++xpos; } else { const double d = pi[ipos] - x_val[xpos]; //if (debug) { // printf("(i[%d]=%f - x[%d]=%f)**2=%f**2 ", icols[ipos] + 1, (double)pi[ipos], x_cols[xpos], (double)x_val[xpos], d); //} res += d*d; ++ipos; ++xpos; } } while(ipos < ilen) { //if (debug) { // printf("i[%d]**2=%f**2 ", icols[ipos] + 1, (double)pi[ipos]); //} res += pi[ipos] * pi[ipos]; ++ipos; } while(xpos < x_len) { //if (debug) { // printf("x[%d]**2=%f**2 ", x_cols[xpos], (double)x_val[xpos]); //} res += x_val[xpos] * x_val[xpos]; ++xpos; } result_vector[result_i] = (sv_coef ? sv_coef[result_i] : 1) * exp(-gamma * res); //if (debug) { // printf("\n"); //} } } int calculate_vector_rbf( const double gamma , const double* squares , const double* matrix_base , const int* RowPtrA , const int* ColIndA , const int* indexPermutation , float* result , const int len , const int j , const int from ) { const int threadsPerBlock = 64; const int blocksPerGrid =(len + threadsPerBlock - 1) / threadsPerBlock; //std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; hipLaunchKernelGGL(( kernel_rbf), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, gamma , squares , matrix_base , RowPtrA , ColIndA , indexPermutation , result , len , from , j ); const hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cerr << "Failed to launch kernel_rbf kernel (error code " << hipGetErrorString(err) << " )!" << std::endl; exit(EXIT_FAILURE); } return 0; } void calculate_vector_rbf_predict( const double gamma , const double* matrix_base , const int* RowPtrA , const int* ColIndA , double* result_vector /* to store values */ , const int rows // number of rows in matrix (and output length of result_vector) , const int x_len , const int* x_cols , const double* x_val , const double* sv_coef // size() = rows , hipblasHandle_t cublas_handle , hipStream_t custream ) { const int threadsPerBlock = 64; const int blocksPerGrid =(rows + threadsPerBlock - 1) / threadsPerBlock; //std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; hipLaunchKernelGGL(( kernel_rbf_predict), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, custream, gamma , matrix_base , RowPtrA , ColIndA , result_vector , rows , x_len , x_cols , x_val , sv_coef ); const hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cerr << "Failed to launch kernel_rbf_predict (error code " << hipGetErrorString(err) << " )!" << std::endl; exit(EXIT_FAILURE); } }
8bdcfe035e9398350f7f053f51458042cc00c51d.cu
#include <iostream> #include <cuda_runtime.h> #include <cublas_v2.h> __global__ void kernel_rbf( const double gamma , const double* __restrict__ squares , const double* __restrict__ matrix_base , const int* __restrict__ RowPtrA , const int* __restrict__ ColIndA , const int* __restrict__ indexPermutation , float* __restrict__ result , const int len , const unsigned i_off , const int j_new ) { const int result_i = blockDim.x * blockIdx.x + threadIdx.x; if (result_i < len) { double res = 0; const int i = indexPermutation[i_off + result_i]; const int j = indexPermutation[j_new]; const double* pi = matrix_base + RowPtrA[i]; const int* icols = &ColIndA[RowPtrA[i]]; const int ilen = RowPtrA[i + 1] - RowPtrA[i]; const double* pj = matrix_base + RowPtrA[j]; const int* jcols = &ColIndA[RowPtrA[j]]; const int jlen = RowPtrA[j + 1] - RowPtrA[j]; for(int ipos = 0, jpos = 0; ipos < ilen && jpos < jlen; ) { if (icols[ipos] < jcols[jpos]) { ++ipos; } else if (icols[ipos] > jcols[jpos]) { ++jpos; } else { res += pi[ipos] * pj[jpos]; ++ipos; ++jpos; } } result[result_i] = (float)exp(-gamma*(squares[i] + squares[j] - 2*res)); } } // Note, that matrix indexees are zero-based, but x indexes are 1-based __global__ void kernel_rbf_predict( const double gamma , const double* __restrict__ matrix_base , const int* __restrict__ RowPtrA , const int* __restrict__ ColIndA , double* __restrict__ result_vector , const int rows , const int x_len , const int* __restrict__ x_cols , const double* __restrict__ x_val , const double* __restrict__ sv_coef ) { const int result_i = blockDim.x * blockIdx.x + threadIdx.x; if (result_i < rows) { //const bool debug = (result_i == 0) ? true : false; double res = 0; const double* pi = matrix_base + RowPtrA[result_i]; const int* icols = &ColIndA[RowPtrA[result_i]]; const int ilen = RowPtrA[result_i + 1] - RowPtrA[result_i]; int ipos = 0; int xpos = 0; //if (debug) { // printf("ilen=%d x_len=%d :\n", ilen, x_len); //} while(ipos < ilen && xpos < x_len) { if (icols[ipos] + 1 < x_cols[xpos]) { //if (debug) { // printf("i[%d]**2=%f**2 ", icols[ipos] + 1, (double)pi[ipos]); //} res += pi[ipos] * pi[ipos]; ++ipos; } else if (icols[ipos] + 1 > x_cols[xpos]) { //if (debug) { // printf("x[%d]**2=%f**2 ", x_cols[xpos], (double)x_val[xpos]); //} res += x_val[xpos] * x_val[xpos]; ++xpos; } else { const double d = pi[ipos] - x_val[xpos]; //if (debug) { // printf("(i[%d]=%f - x[%d]=%f)**2=%f**2 ", icols[ipos] + 1, (double)pi[ipos], x_cols[xpos], (double)x_val[xpos], d); //} res += d*d; ++ipos; ++xpos; } } while(ipos < ilen) { //if (debug) { // printf("i[%d]**2=%f**2 ", icols[ipos] + 1, (double)pi[ipos]); //} res += pi[ipos] * pi[ipos]; ++ipos; } while(xpos < x_len) { //if (debug) { // printf("x[%d]**2=%f**2 ", x_cols[xpos], (double)x_val[xpos]); //} res += x_val[xpos] * x_val[xpos]; ++xpos; } result_vector[result_i] = (sv_coef ? sv_coef[result_i] : 1) * exp(-gamma * res); //if (debug) { // printf("\n"); //} } } int calculate_vector_rbf( const double gamma , const double* squares , const double* matrix_base , const int* RowPtrA , const int* ColIndA , const int* indexPermutation , float* result , const int len , const int j , const int from ) { const int threadsPerBlock = 64; const int blocksPerGrid =(len + threadsPerBlock - 1) / threadsPerBlock; //std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; kernel_rbf<<<blocksPerGrid, threadsPerBlock>>>( gamma , squares , matrix_base , RowPtrA , ColIndA , indexPermutation , result , len , from , j ); const cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { std::cerr << "Failed to launch kernel_rbf kernel (error code " << cudaGetErrorString(err) << " )!" << std::endl; exit(EXIT_FAILURE); } return 0; } void calculate_vector_rbf_predict( const double gamma , const double* matrix_base , const int* RowPtrA , const int* ColIndA , double* result_vector /* to store values */ , const int rows // number of rows in matrix (and output length of result_vector) , const int x_len , const int* x_cols , const double* x_val , const double* sv_coef // size() = rows , cublasHandle_t cublas_handle , cudaStream_t custream ) { const int threadsPerBlock = 64; const int blocksPerGrid =(rows + threadsPerBlock - 1) / threadsPerBlock; //std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; kernel_rbf_predict<<<blocksPerGrid, threadsPerBlock, 0, custream>>>( gamma , matrix_base , RowPtrA , ColIndA , result_vector , rows , x_len , x_cols , x_val , sv_coef ); const cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { std::cerr << "Failed to launch kernel_rbf_predict (error code " << cudaGetErrorString(err) << " )!" << std::endl; exit(EXIT_FAILURE); } }
ae1bd1a48a900719fe1010ba9fad95202428d002.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "AvoidObstaclesCUDA.cuh" using namespace OpenSteer; // Kernel function prototype. extern "C" { __host__ void SteerToAvoidObstaclesKernelBindTextures( float4 const* pdObstaclePosition, float const* pdObstacleRadius, uint const numObstacles ); __host__ void SteerToAvoidObstaclesKernelUnbindTextures( void ); __global__ void SteerToAvoidObstaclesKernel( uint const* pdKNNIndices, // In: Indices of the K Nearest Obstacles. float const* pdKNNDistances, // In: Distances to the K Nearest Obstacles. size_t const k, float4 const* pdPosition, // In: Agent positions. float4 const* pdDirection, // In: Agent directions. float3 const* pdSide, float3 const* pdUp, float const* pdRadius, // In: Agent radii. float const* pdSpeed, // In: Agent speeds. float const minTimeToCollision, float4 * pdSteering, // Out: Agent steering vectors. uint const numAgents, // In: Number of agents. uint const numObstacles, // In: Number of obstacles. float const fWeight, // In: Weight for this kernel uint * pdAppliedKernels, uint const doNotApplyWith ); } AvoidObstaclesCUDA::AvoidObstaclesCUDA( AgentGroup * pAgentGroup, ObstacleGroup * pObstacleGroup, KNNData * pKNNData, float const fMinTimeToCollision, float const fWeight, uint const doNotApplyWith ) : AbstractCUDAKernel( pAgentGroup, fWeight, doNotApplyWith ), m_pObstacleGroup( pObstacleGroup ), m_fMinTimeToCollision( fMinTimeToCollision ), m_pKNNData( pKNNData ) { // Nothing to do. } void AvoidObstaclesCUDA::init(void) { } void AvoidObstaclesCUDA::run(void) { dim3 grid = gridDim(); dim3 block = blockDim(); uint const* pdKNNIndices = m_pKNNData->pdKNNIndices(); float const* pdKNNDistances = m_pKNNData->pdKNNDistances(); uint const& k = m_pKNNData->k(); float4 const* pdPosition = m_pAgentGroupData->pdPosition(); float4 const* pdDirection = m_pAgentGroupData->pdDirection(); float3 const* pdSide = m_pAgentGroupData->pdSide(); float3 const* pdUp = m_pAgentGroupData->pdUp(); float const* pdRadius = m_pAgentGroupData->pdRadius(); float const* pdSpeed = m_pAgentGroupData->pdSpeed(); float4 const* pdObstaclePosition = m_pObstacleGroup->pdPosition(); float const* pdObstacleRadius = m_pObstacleGroup->pdRadius(); float4 * pdSteering = m_pAgentGroupData->pdSteering(); uint const& numAgents = m_pAgentGroup->Size(); uint const& numObstacles = m_pObstacleGroup->Size(); uint * pdAppliedKernels = m_pAgentGroupData->pdAppliedKernels(); size_t shMemSize = k * THREADSPERBLOCK * (sizeof(uint) + sizeof(float)); // Bind the textures. SteerToAvoidObstaclesKernelBindTextures( pdObstaclePosition, pdObstacleRadius, numObstacles ); hipLaunchKernelGGL(( SteerToAvoidObstaclesKernel), dim3(grid), dim3(block), shMemSize , 0, pdKNNIndices, pdKNNDistances, k, pdPosition, pdDirection, pdSide, pdUp, pdRadius, pdSpeed, m_fMinTimeToCollision, pdSteering, numAgents, numObstacles, m_fWeight, pdAppliedKernels, m_doNotApplyWith ); cutilCheckMsg( "AvoidObstaclesCUDAKernel failed." ); //CUDA_SAFE_CALL( hipDeviceSynchronize() ); // Unbind the textures. SteerToAvoidObstaclesKernelUnbindTextures(); } void AvoidObstaclesCUDA::close(void) { // Device data has changed. Instruct the AgentGroup it needs to synchronize the host. m_pAgentGroup->SetSyncHost(); }
ae1bd1a48a900719fe1010ba9fad95202428d002.cu
#include "AvoidObstaclesCUDA.cuh" using namespace OpenSteer; // Kernel function prototype. extern "C" { __host__ void SteerToAvoidObstaclesKernelBindTextures( float4 const* pdObstaclePosition, float const* pdObstacleRadius, uint const numObstacles ); __host__ void SteerToAvoidObstaclesKernelUnbindTextures( void ); __global__ void SteerToAvoidObstaclesKernel( uint const* pdKNNIndices, // In: Indices of the K Nearest Obstacles. float const* pdKNNDistances, // In: Distances to the K Nearest Obstacles. size_t const k, float4 const* pdPosition, // In: Agent positions. float4 const* pdDirection, // In: Agent directions. float3 const* pdSide, float3 const* pdUp, float const* pdRadius, // In: Agent radii. float const* pdSpeed, // In: Agent speeds. float const minTimeToCollision, float4 * pdSteering, // Out: Agent steering vectors. uint const numAgents, // In: Number of agents. uint const numObstacles, // In: Number of obstacles. float const fWeight, // In: Weight for this kernel uint * pdAppliedKernels, uint const doNotApplyWith ); } AvoidObstaclesCUDA::AvoidObstaclesCUDA( AgentGroup * pAgentGroup, ObstacleGroup * pObstacleGroup, KNNData * pKNNData, float const fMinTimeToCollision, float const fWeight, uint const doNotApplyWith ) : AbstractCUDAKernel( pAgentGroup, fWeight, doNotApplyWith ), m_pObstacleGroup( pObstacleGroup ), m_fMinTimeToCollision( fMinTimeToCollision ), m_pKNNData( pKNNData ) { // Nothing to do. } void AvoidObstaclesCUDA::init(void) { } void AvoidObstaclesCUDA::run(void) { dim3 grid = gridDim(); dim3 block = blockDim(); uint const* pdKNNIndices = m_pKNNData->pdKNNIndices(); float const* pdKNNDistances = m_pKNNData->pdKNNDistances(); uint const& k = m_pKNNData->k(); float4 const* pdPosition = m_pAgentGroupData->pdPosition(); float4 const* pdDirection = m_pAgentGroupData->pdDirection(); float3 const* pdSide = m_pAgentGroupData->pdSide(); float3 const* pdUp = m_pAgentGroupData->pdUp(); float const* pdRadius = m_pAgentGroupData->pdRadius(); float const* pdSpeed = m_pAgentGroupData->pdSpeed(); float4 const* pdObstaclePosition = m_pObstacleGroup->pdPosition(); float const* pdObstacleRadius = m_pObstacleGroup->pdRadius(); float4 * pdSteering = m_pAgentGroupData->pdSteering(); uint const& numAgents = m_pAgentGroup->Size(); uint const& numObstacles = m_pObstacleGroup->Size(); uint * pdAppliedKernels = m_pAgentGroupData->pdAppliedKernels(); size_t shMemSize = k * THREADSPERBLOCK * (sizeof(uint) + sizeof(float)); // Bind the textures. SteerToAvoidObstaclesKernelBindTextures( pdObstaclePosition, pdObstacleRadius, numObstacles ); SteerToAvoidObstaclesKernel<<< grid, block, shMemSize >>>( pdKNNIndices, pdKNNDistances, k, pdPosition, pdDirection, pdSide, pdUp, pdRadius, pdSpeed, m_fMinTimeToCollision, pdSteering, numAgents, numObstacles, m_fWeight, pdAppliedKernels, m_doNotApplyWith ); cutilCheckMsg( "AvoidObstaclesCUDAKernel failed." ); //CUDA_SAFE_CALL( cudaThreadSynchronize() ); // Unbind the textures. SteerToAvoidObstaclesKernelUnbindTextures(); } void AvoidObstaclesCUDA::close(void) { // Device data has changed. Instruct the AgentGroup it needs to synchronize the host. m_pAgentGroup->SetSyncHost(); }
7bbb56fd643fc0af1b5e8b53a0f362276c010ac7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "cuda_auxiliary.h" #define BLOCK_SIZE 64 __global__ void gpu_dgemv(double *A, double *x, double *y, const int dim) { __shared__ double cache[BLOCK_SIZE]; int gid = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; double sum = 0; for (int i = 0; i < ((dim + BLOCK_SIZE - 1) / BLOCK_SIZE); ++i){ if(i * BLOCK_SIZE + tid < dim) cache[tid] = x[threadIdx.x + i * BLOCK_SIZE]; else cache[tid] = 0.f; __syncthreads(); for (int j = 0; j < BLOCK_SIZE; ++j){ sum += A[gid * dim + (i * BLOCK_SIZE + j)] * cache[j]; } __syncthreads(); } if(gid < dim) y[gid] = sum; } __global__ void gpu_dnrm2(double *x, double *nrm, const int dim, bool invert) { __shared__ double cache[BLOCK_SIZE]; int gid = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; if (gid < dim) cache[tid] = x[gid]; else cache[tid] = 0; __syncthreads(); cache[tid] = cache[tid] * cache[tid]; __syncthreads(); for(int i = blockDim.x / 2; i > 0; i >>= 1){ if(tid < i) cache[tid] = cache[tid] + cache[tid + i]; __syncthreads(); } if (tid == 0) { if (invert) nrm[0] = 1.0/sqrt(cache[0]); else nrm[0] = sqrt(cache[0]); } } __global__ void gpu_dscal(double *x, double *y, double *alpha, const int dim) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < dim) y[gid] = x[gid] * alpha[0]; } __global__ void gpu_subtract(double *x, double *y, double *out, const int dim) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < dim) out[gid] = x[gid] - y[gid]; } __global__ void gpu_ddot(double *x, double *y, double *out, const int dim) { __shared__ double cache[BLOCK_SIZE]; int cacheindex = threadIdx.x; double temp; for(int gid = blockIdx.x * blockDim.x + threadIdx.x; gid < dim; gid += blockDim.x * gridDim.x) temp += x[gid] * y[gid]; cache[cacheindex] = temp; __syncthreads(); for (int i = blockDim.x / 2; i > 0; i >>= 1) { if (cacheindex < i) cache[cacheindex] += cache[cacheindex + i]; __syncthreads(); } if (threadIdx.x == 0) out[0] = cache[0]; } int main(int argc, char **argv) { FILE *fp_A = NULL; FILE *fp_x = NULL; double *hst_A = NULL; double *hst_x = NULL; double *dev_A = NULL; double *dev_x = NULL; double *dev_y = NULL; double *dev_nrm_inv = NULL; double *dev_lambda; double eigval; double lambda; double subsnorm; double EPS = 0.00001; bool converged = false; dim3 block_size; dim3 grid_size; int dim; double dgemv_timer = 0.0; if (argc != 4) { fprintf(stderr, "usage: %s N A.dat x0.dat\n", argv[0]); exit(EXIT_FAILURE); } dim = atoi(argv[1]); open_file(fp_A, argv[2], "r"); host_alloc(hst_A, double, dim * dim); open_file(fp_x, argv[3], "r"); host_alloc(hst_x, double, dim); read_file(hst_A, sizeof(double), dim * dim, fp_A); read_file(hst_x, sizeof(double), dim, fp_x); cuda_exec(hipMalloc(&dev_A, dim * dim * sizeof(double))); cuda_exec(hipMalloc(&dev_x, dim * sizeof(double))); cuda_exec(hipMalloc(&dev_y, dim * sizeof(double))); cuda_exec(hipMalloc(&dev_nrm_inv, sizeof(double))); cuda_exec(hipMalloc(&dev_lambda, sizeof(double))); cuda_exec(hipMemcpy(dev_A, hst_A, dim * dim * sizeof(double), hipMemcpyHostToDevice)); cuda_exec(hipMemcpy(dev_x, hst_x, dim * sizeof(double), hipMemcpyHostToDevice)); cuda_exec(hipMemcpy(dev_y, hst_x, dim * sizeof(double), hipMemcpyHostToDevice)); block_size.x = BLOCK_SIZE; grid_size.x = min((dim + block_size.x - 1) / block_size.x, 65535); int cnt = 0; while(!converged){ hipLaunchKernelGGL(( gpu_dnrm2), dim3(grid_size), dim3(block_size), 0, 0, dev_y, dev_nrm_inv, dim, true); hipLaunchKernelGGL(( gpu_dscal), dim3(grid_size), dim3(block_size), 0, 0, dev_y, dev_x, dev_nrm_inv, dim); if(cnt == 1) dgemv_timer -= timer(); hipLaunchKernelGGL(( gpu_dgemv), dim3(grid_size), dim3(block_size), 0, 0, dev_A, dev_x, dev_y, dim); if(cnt == 1) dgemv_timer += timer(); hipLaunchKernelGGL(( gpu_ddot), dim3(grid_size), dim3(block_size), 0, 0, dev_x, dev_y, dev_lambda, dim); hipLaunchKernelGGL(( gpu_dscal), dim3(grid_size), dim3(block_size), 0, 0, dev_x, dev_x, dev_lambda, dim); hipLaunchKernelGGL(( gpu_subtract), dim3(grid_size), dim3(block_size), 0, 0, dev_y, dev_x, dev_x, dim); hipLaunchKernelGGL(( gpu_dnrm2), dim3(grid_size), dim3(block_size), 0, 0, dev_x, dev_nrm_inv, dim, false); cuda_exec(hipMemcpy(&lambda, dev_lambda, sizeof(double), hipMemcpyDeviceToHost)); cuda_exec(hipMemcpy(&subsnorm, dev_nrm_inv, sizeof(double), hipMemcpyDeviceToHost)); if (subsnorm < EPS * abs(lambda)) converged = true; if (cnt == 100000){ printf("died after %d iterations: %#.16lg > %#.16lg", cnt, subsnorm, EPS * lambda); break; } cnt++; } cuda_exec(hipMemcpy(&eigval, dev_lambda, sizeof(double), hipMemcpyDeviceToHost)); printf("Dgemv took: %.3lgms\n", 1000* dgemv_timer); printf("Spectrum: %#.16lg, done after %d iterations\n", eigval, cnt); hipFree(dev_A); hipFree(dev_x); hipFree(dev_y); hipFree(dev_nrm_inv); hipFree(dev_lambda); host_free(hst_A); host_free(hst_x); return 0; }
7bbb56fd643fc0af1b5e8b53a0f362276c010ac7.cu
#include <stdio.h> #include "cuda_auxiliary.h" #define BLOCK_SIZE 64 __global__ void gpu_dgemv(double *A, double *x, double *y, const int dim) { __shared__ double cache[BLOCK_SIZE]; int gid = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; double sum = 0; for (int i = 0; i < ((dim + BLOCK_SIZE - 1) / BLOCK_SIZE); ++i){ if(i * BLOCK_SIZE + tid < dim) cache[tid] = x[threadIdx.x + i * BLOCK_SIZE]; else cache[tid] = 0.f; __syncthreads(); for (int j = 0; j < BLOCK_SIZE; ++j){ sum += A[gid * dim + (i * BLOCK_SIZE + j)] * cache[j]; } __syncthreads(); } if(gid < dim) y[gid] = sum; } __global__ void gpu_dnrm2(double *x, double *nrm, const int dim, bool invert) { __shared__ double cache[BLOCK_SIZE]; int gid = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; if (gid < dim) cache[tid] = x[gid]; else cache[tid] = 0; __syncthreads(); cache[tid] = cache[tid] * cache[tid]; __syncthreads(); for(int i = blockDim.x / 2; i > 0; i >>= 1){ if(tid < i) cache[tid] = cache[tid] + cache[tid + i]; __syncthreads(); } if (tid == 0) { if (invert) nrm[0] = 1.0/sqrt(cache[0]); else nrm[0] = sqrt(cache[0]); } } __global__ void gpu_dscal(double *x, double *y, double *alpha, const int dim) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < dim) y[gid] = x[gid] * alpha[0]; } __global__ void gpu_subtract(double *x, double *y, double *out, const int dim) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < dim) out[gid] = x[gid] - y[gid]; } __global__ void gpu_ddot(double *x, double *y, double *out, const int dim) { __shared__ double cache[BLOCK_SIZE]; int cacheindex = threadIdx.x; double temp; for(int gid = blockIdx.x * blockDim.x + threadIdx.x; gid < dim; gid += blockDim.x * gridDim.x) temp += x[gid] * y[gid]; cache[cacheindex] = temp; __syncthreads(); for (int i = blockDim.x / 2; i > 0; i >>= 1) { if (cacheindex < i) cache[cacheindex] += cache[cacheindex + i]; __syncthreads(); } if (threadIdx.x == 0) out[0] = cache[0]; } int main(int argc, char **argv) { FILE *fp_A = NULL; FILE *fp_x = NULL; double *hst_A = NULL; double *hst_x = NULL; double *dev_A = NULL; double *dev_x = NULL; double *dev_y = NULL; double *dev_nrm_inv = NULL; double *dev_lambda; double eigval; double lambda; double subsnorm; double EPS = 0.00001; bool converged = false; dim3 block_size; dim3 grid_size; int dim; double dgemv_timer = 0.0; if (argc != 4) { fprintf(stderr, "usage: %s N A.dat x0.dat\n", argv[0]); exit(EXIT_FAILURE); } dim = atoi(argv[1]); open_file(fp_A, argv[2], "r"); host_alloc(hst_A, double, dim * dim); open_file(fp_x, argv[3], "r"); host_alloc(hst_x, double, dim); read_file(hst_A, sizeof(double), dim * dim, fp_A); read_file(hst_x, sizeof(double), dim, fp_x); cuda_exec(cudaMalloc(&dev_A, dim * dim * sizeof(double))); cuda_exec(cudaMalloc(&dev_x, dim * sizeof(double))); cuda_exec(cudaMalloc(&dev_y, dim * sizeof(double))); cuda_exec(cudaMalloc(&dev_nrm_inv, sizeof(double))); cuda_exec(cudaMalloc(&dev_lambda, sizeof(double))); cuda_exec(cudaMemcpy(dev_A, hst_A, dim * dim * sizeof(double), cudaMemcpyHostToDevice)); cuda_exec(cudaMemcpy(dev_x, hst_x, dim * sizeof(double), cudaMemcpyHostToDevice)); cuda_exec(cudaMemcpy(dev_y, hst_x, dim * sizeof(double), cudaMemcpyHostToDevice)); block_size.x = BLOCK_SIZE; grid_size.x = min((dim + block_size.x - 1) / block_size.x, 65535); int cnt = 0; while(!converged){ gpu_dnrm2<<<grid_size, block_size>>>(dev_y, dev_nrm_inv, dim, true); gpu_dscal<<<grid_size, block_size>>>(dev_y, dev_x, dev_nrm_inv, dim); if(cnt == 1) dgemv_timer -= timer(); gpu_dgemv<<<grid_size, block_size>>>(dev_A, dev_x, dev_y, dim); if(cnt == 1) dgemv_timer += timer(); gpu_ddot<<<grid_size, block_size>>>(dev_x, dev_y, dev_lambda, dim); gpu_dscal<<<grid_size, block_size>>>(dev_x, dev_x, dev_lambda, dim); gpu_subtract<<<grid_size, block_size>>>(dev_y, dev_x, dev_x, dim); gpu_dnrm2<<<grid_size, block_size>>>(dev_x, dev_nrm_inv, dim, false); cuda_exec(cudaMemcpy(&lambda, dev_lambda, sizeof(double), cudaMemcpyDeviceToHost)); cuda_exec(cudaMemcpy(&subsnorm, dev_nrm_inv, sizeof(double), cudaMemcpyDeviceToHost)); if (subsnorm < EPS * abs(lambda)) converged = true; if (cnt == 100000){ printf("died after %d iterations: %#.16lg > %#.16lg", cnt, subsnorm, EPS * lambda); break; } cnt++; } cuda_exec(cudaMemcpy(&eigval, dev_lambda, sizeof(double), cudaMemcpyDeviceToHost)); printf("Dgemv took: %.3lgms\n", 1000* dgemv_timer); printf("Spectrum: %#.16lg, done after %d iterations\n", eigval, cnt); cudaFree(dev_A); cudaFree(dev_x); cudaFree(dev_y); cudaFree(dev_nrm_inv); cudaFree(dev_lambda); host_free(hst_A); host_free(hst_x); return 0; }
0fa0fbd90dd539ab6a503ba15c9885929950702b.hip
// !!! This is a file automatically generated by hipify!!! /* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <hip/hip_runtime.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> // GPU configuration. #define WARP_SIZE 32 // MMA matrix tile dimensions. #define M 8 #define N 8 #define K 128 #define C_LAYOUT wmma::mem_row_major // Implementation constants. #define WARPS_PER_BLOCK 8 #define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK) #define CHUNK_K 1 #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS) #define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS) #define GLOBAL_MEM_STRIDE N_GLOBAL #define SHMEM_STRIDE (N * BLOCK_ROW_TILES) #define SHMEM_OFFSET (N * WARP_ROW_TILES) // The macro below is used to shift rows of the A matrix and columns of the B // matrix in shared memory to minimize possible bank conflicts. Before // performing the nvcuda::wmma::mma_sync operation, the warp must load the // matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the // memory access pattern is not specified for that function, each lane in the // warp can read one or multiple matrix elements from different matrix rows or // columns. For shared memory, such access can result in bank conflicts if // different rows / columns of the matrix map to the same bank. By shifting each // row and column by a few bytes, we make sure that they map to different banks, // thus reducing the number of possible bank conflicts. The number of 32 // one-byte "uint8_t" elements is chosen as the minimum possible shift because // we must keep each row and column 256-bit aligned, as required by // nvcuda::wmma::load_matrix_sync. #define SKEW 0 // Updated for int4 #define checkKernelErrors(expr) \ do { \ expr; \ \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ hipGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; __global__ void apmm_w2a2_decompose(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) { // GEMM configuration. int K_TILES = K_GLOBAL / 128; int W_bit_offset = M_GLOBAL*K_GLOBAL/128; int X_bit_offset = N_GLOBAL*K_GLOBAL/128; int ROW_BIT = K_GLOBAL/128; extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<M_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("W[%d][%d]: %x\n", i, j, *((int*)W+i*K_GLOBAL/32+j)); // } // } // } // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int b=0; b<xb; b++) { // for(int i=0; i<N_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("bit: %d, X[%d][%d]: %x\n", b, i, j, *((int*)X+b*X_bit_offset + i*K_GLOBAL/32+j)); // } // } // } // } for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = block_pos / (N_GLOBAL/32) * 32; const unsigned int block_tile_j = block_pos % (N_GLOBAL/32) * 32; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_GLOBAL) { break; } typedef union { int4 vec; int a[4]; } U4; wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j = 0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const int4 *warp_ptr; if (warpId < 2) { warp_ptr = &W[block_tile_i * ROW_BIT] + warpId * 16 * ROW_BIT; } else if (warpId < 4) { warp_ptr = &W[block_tile_i * ROW_BIT + W_bit_offset] + (warpId-2) * 16 * ROW_BIT; } else if (warpId < 6) { warp_ptr = &X[block_tile_j * ROW_BIT] + (warpId -4) * 16 * ROW_BIT; } else { warp_ptr = &X[block_tile_j * ROW_BIT + X_bit_offset] + (warpId-6)*16*ROW_BIT; } // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop. // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy // the B matrix. int *shmem_ptr = (int*)shmem + warpId*16*4*(CHUNK_K+SKEW) + (laneId/4)*4*(CHUNK_K+SKEW) + laneId%4; // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. // int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) + // (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) + // (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit. int *lane_ptr = (int*)warp_ptr + laneId/4*ROW_BIT*4 + laneId%4 + tile_k*4; *shmem_ptr = *lane_ptr; shmem_ptr += 8*4*(CHUNK_K+SKEW); lane_ptr += 8*ROW_BIT*4; *shmem_ptr = *lane_ptr; // U4 tmp_probe; // tmp_probe.vec = *lane_ptr; // printf("tmp_probe.a[0]: %d, tmp_probe.a[1]: %d, tmp_probe.a[2]: %d, tmp_probe.a[3]: %d\n", tmp_probe.a[0], tmp_probe.a[1], tmp_probe.a[2], tmp_probe.a[3]); __syncthreads(); // if (warpId == 0 && laneId == 0 && block_tile_i == 64 && block_tile_j == 0) { // // for(int i=0; i<128; i++) { // int i = 12; // printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // i = 44; // printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // i = 64; // printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // i = 96; // printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // // } // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int t=0; t<a[i].num_elements; t++) { // printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]); // } // printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step); // } #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND); } } } __syncthreads(); } // This pointer is used to access the C and D matrix tiles this warp computes. int *shmem_warp_tile_ptr = (int*)&shmem[0][0] + (warpId / 2) * 64 * 16 + (warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8; wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT); } } __syncthreads(); // if (warpId == 4 && laneId == 0 && block_tile_i == 64 && block_tile_j == 0) { // for(int i=12; i<13; i++) { // for(int j=0; j<64; j++) { // printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64+j)); // } // for(int j=0; j<64; j++) { // printf("i: %d, j: %d, val: %d\n", i+32, j, *((int*)&shmem[0][0]+(i+32)*64+j)); // } // } // } int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId*64 + laneId; int mask = 1; int bit0, bit1; unsigned r0, r1; int tmp0, tmp1, tmp2, tmp3; int val; #pragma unroll for(int i=0; i<4; i++) { tmp0 = *(shmem_warp_stream_ptr+i*8*64); tmp1 = *(shmem_warp_stream_ptr+32+i*8*64); tmp2 = *(shmem_warp_stream_ptr+32*64+i*8*64); tmp3 = *(shmem_warp_stream_ptr+32+32*64+i*8*64); val = tmp0 + 2*tmp1 + 2*tmp2 + 4*tmp3; // c4f07 = 0000 0000 0000 1100 0100 1111 0000 0111 // 8efa0 = 0000 0000 0000 1000 1110 1111 1010 0000 bit0 = val & (mask << 0); bit1 = (val & (mask << 1)) >> 1; r0 = __ballot_sync(0xFFFFFFFF, bit0); r1 = __ballot_sync(0xFFFFFFFF, bit1); // if (block_tile_i == 64 && block_tile_j == 0 && warpId == 4 & i == 1 && laneId == 0){ // printf("laneId: %d, tmp0: %d, tmp1: %d, tmp2: %d, tmp3: %d, val: %d, r1: %x\n", laneId, tmp0, tmp1, tmp2, tmp3, val, __brev(r1)); // } if (laneId == 0) { size_t gmem_idx = block_tile_i*N_GLOBAL/32 + block_tile_j/32 + warpId*N_GLOBAL/32 + i*8*N_GLOBAL/32; D[gmem_idx] = __brev(r0); D[gmem_idx+M_GLOBAL*N_GLOBAL/32] = __brev(r1); } } __syncthreads(); } } void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){ int *W_int = (int*) W; int *X_int = (int*) X; for(int b=0; b<W_BIT; b++) { for(int i = 0; i < M_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i; W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } for(int b = 0; b<X_BIT; b++) { for(int i = 0; i < N_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i*K_GLOBAL+j; X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } } int popcnt(int i) { // Java: use int, and use >>> instead of >> // C or C++: use int i = i - ((i >> 1) & 0x55555555); i = (i & 0x33333333) + ((i >> 2) & 0x33333333); return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; } int int_pow(int base, int exp) { int result = 1; while (exp) { if (exp % 2) result *= base; exp /= 2; base *= base; } return result; } void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) { int *W_int = (int*) W; int *X_int = (int*) X; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } } } } ref_C[m*N_GLOBAL+n]= tmp; } } } void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. int *W_int = (int*) W; int *X_int = (int*) X; int C_ref_before_decompose[M_GLOBAL*N_GLOBAL]; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; int term[4]; term[0] = 0; term[1] = 0; term[2] = 0; term[3] = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; if (xb == 0 && wb == 0) { term[0] += x_val * w_val; } else if (xb == 0 && wb == 1) { term[1] += x_val * w_val; } else if (xb == 1 && wb == 0) { term[2] += x_val * w_val; } else { term[3] += x_val * w_val; } } } } } C_ref_before_decompose[m*N_GLOBAL+n]= tmp; // if (m == 76 && n < 32) { // if (m == 76 && n == 0) { // printf("n: %d, term[0]: %d, term[1]: %d, term[2]: %d, term[3]: %d, tmp: %d\n", n, term[0], term[1], term[2], term[3], tmp); // } } } // 652d45c9 = 0110 0101 0010 1101 0100 0101 1100 1001 // for(int i=0; i<32; i++) { // printf("i=%d: %d\n", i, C_ref_before_decompose[76*N_GLOBAL+i]); // } // printf("w0: %x %x %x %x\n", W_int[0*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 0], W_int[0*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 1], W_int[0*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 2], W_int[0*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 3]); // printf("w1: %x %x %x %x\n", W_int[1*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 0], W_int[1*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 1], W_int[1*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 2], W_int[1*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 3]); // printf("x0: %d %d %d %d\n", X_int[wb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]) // int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; // int x_int = X_int[wb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int m=0; m<M_GLOBAL; m++) { for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) { int val[OUT_BIT]; for(int b=0; b<OUT_BIT; b++) val[b] = 0; for(int n=0; n<32; n++) { int tmp = C_ref_before_decompose[m*N_GLOBAL+n_tile*32+n]; tmp = (tmp - 0); // Can be modified for other quantized parameters. for(int b=0; b<OUT_BIT; b++) { int mask = 1; val[b] = val[b] << 1; val[b] = val[b] | (((mask<<b) & tmp) >> b); } } for(int b=0; b<OUT_BIT; b++) { ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile] = val[b]; } } } // printf("b=1, m=76, n_tile=0: %x\n", ref_C[1*M_GLOBAL*N_GLOBAL/32+76*N_GLOBAL/32+0]); } // 8efa0 = 0000 0000 0000 1000 1110 1111 1010 0000 // 652d45c9 = 0110 0101 0010 1101 0100 0101 1100 1001 void validate_results(int *C, int* ref_C, int M_, int N_) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int i = 0; i < M_; i++) { for(int j = 0; j < N_; j++) { int idx = i*N_+j; double dst = fabs(C[idx] - ref_C[idx]); double abs = fabs(C[idx]) * fabs(ref_C[idx]); double ref_err = dst / abs; if (ref_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result with pack for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int m = 0; m < M_; m++) { for(int n_tile = 0; n_tile < N_/32; n_tile++) { for(int b=0; b<OUT_BIT; b++) { int idx = b*M_*N_/32 + m*N_/32+n_tile; double dst = fabs(C[idx] - ref_C[idx]); double abs = fabs(C[idx]) * fabs(ref_C[idx]); double ref_err = dst / abs; if (ref_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("m: %d, n_tile: %d, b: %d, C: %x, ref_C: %x\n", m, n_tile, b, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } #define verify_output int main(int argc, char **argv) { int dev = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); int X_BIT = 2; int W_BIT = 2; for (int M_GLOBAL=128; M_GLOBAL<=1024; M_GLOBAL += 128 ) { // int M_GLOBAL = 128; int N_GLOBAL = M_GLOBAL; int K_GLOBAL = M_GLOBAL; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors( hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT)); checkCudaErrors( hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT)); checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL)); #ifdef verify_output int4 *W_h = NULL; int4 *X_h = NULL; int *Output_h = NULL; W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT); X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT); Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); printf("Preparing validation data for GPU...\n"); init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT); checkCudaErrors(hipMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, hipMemcpyHostToDevice)); #endif int SHMEM_SZ = 65536; checkCudaErrors(hipFuncSetAttribute( apmm_w2a2_decompose, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; int NUM_PROFILES = 1000; for(int iter=0; iter<NUM_PROFILES; ++iter){ float bmma_ms = 0.0f; hipEvent_t bmma_start; hipEvent_t bmma_end; hipEventCreate(&bmma_start); hipEventCreate(&bmma_end); hipEventRecord(bmma_start); checkKernelErrors( hipLaunchKernelGGL(( (apmm_w2a2_decompose), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT))); hipEventRecord(bmma_end); hipEventSynchronize(bmma_end); hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end); hipEventDestroy(bmma_start); hipEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES; printf("V73, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT); printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12); #ifdef verify_output printf("Validating results...\n"); checkCudaErrors(hipMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost)); int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL/32 * X_BIT); /* Copmpute reference matrix on CPU */ compute_ref_pack(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT, X_BIT); /* validation results */ validate_results_pack(Output_h, Output_ref, M_GLOBAL, N_GLOBAL, X_BIT); free(W_h); free(X_h); free(Output_h); free(Output_ref); #endif checkCudaErrors(hipFree(reinterpret_cast<void *>(W))); checkCudaErrors(hipFree(reinterpret_cast<void *>(X))); checkCudaErrors(hipFree(reinterpret_cast<void *>(Output))); } return EXIT_SUCCESS; }
0fa0fbd90dd539ab6a503ba15c9885929950702b.cu
/* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <cuda.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> // GPU configuration. #define WARP_SIZE 32 // MMA matrix tile dimensions. #define M 8 #define N 8 #define K 128 #define C_LAYOUT wmma::mem_row_major // Implementation constants. #define WARPS_PER_BLOCK 8 #define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK) #define CHUNK_K 1 #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS) #define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS) #define GLOBAL_MEM_STRIDE N_GLOBAL #define SHMEM_STRIDE (N * BLOCK_ROW_TILES) #define SHMEM_OFFSET (N * WARP_ROW_TILES) // The macro below is used to shift rows of the A matrix and columns of the B // matrix in shared memory to minimize possible bank conflicts. Before // performing the nvcuda::wmma::mma_sync operation, the warp must load the // matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the // memory access pattern is not specified for that function, each lane in the // warp can read one or multiple matrix elements from different matrix rows or // columns. For shared memory, such access can result in bank conflicts if // different rows / columns of the matrix map to the same bank. By shifting each // row and column by a few bytes, we make sure that they map to different banks, // thus reducing the number of possible bank conflicts. The number of 32 // one-byte "uint8_t" elements is chosen as the minimum possible shift because // we must keep each row and column 256-bit aligned, as required by // nvcuda::wmma::load_matrix_sync. #define SKEW 0 // Updated for int4 #define checkKernelErrors(expr) \ do { \ expr; \ \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ cudaGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; __global__ void apmm_w2a2_decompose(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) { // GEMM configuration. int K_TILES = K_GLOBAL / 128; int W_bit_offset = M_GLOBAL*K_GLOBAL/128; int X_bit_offset = N_GLOBAL*K_GLOBAL/128; int ROW_BIT = K_GLOBAL/128; extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<M_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("W[%d][%d]: %x\n", i, j, *((int*)W+i*K_GLOBAL/32+j)); // } // } // } // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int b=0; b<xb; b++) { // for(int i=0; i<N_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("bit: %d, X[%d][%d]: %x\n", b, i, j, *((int*)X+b*X_bit_offset + i*K_GLOBAL/32+j)); // } // } // } // } for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = block_pos / (N_GLOBAL/32) * 32; const unsigned int block_tile_j = block_pos % (N_GLOBAL/32) * 32; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_GLOBAL) { break; } typedef union { int4 vec; int a[4]; } U4; wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j = 0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const int4 *warp_ptr; if (warpId < 2) { warp_ptr = &W[block_tile_i * ROW_BIT] + warpId * 16 * ROW_BIT; } else if (warpId < 4) { warp_ptr = &W[block_tile_i * ROW_BIT + W_bit_offset] + (warpId-2) * 16 * ROW_BIT; } else if (warpId < 6) { warp_ptr = &X[block_tile_j * ROW_BIT] + (warpId -4) * 16 * ROW_BIT; } else { warp_ptr = &X[block_tile_j * ROW_BIT + X_bit_offset] + (warpId-6)*16*ROW_BIT; } // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop. // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy // the B matrix. int *shmem_ptr = (int*)shmem + warpId*16*4*(CHUNK_K+SKEW) + (laneId/4)*4*(CHUNK_K+SKEW) + laneId%4; // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. // int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) + // (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) + // (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit. int *lane_ptr = (int*)warp_ptr + laneId/4*ROW_BIT*4 + laneId%4 + tile_k*4; *shmem_ptr = *lane_ptr; shmem_ptr += 8*4*(CHUNK_K+SKEW); lane_ptr += 8*ROW_BIT*4; *shmem_ptr = *lane_ptr; // U4 tmp_probe; // tmp_probe.vec = *lane_ptr; // printf("tmp_probe.a[0]: %d, tmp_probe.a[1]: %d, tmp_probe.a[2]: %d, tmp_probe.a[3]: %d\n", tmp_probe.a[0], tmp_probe.a[1], tmp_probe.a[2], tmp_probe.a[3]); __syncthreads(); // if (warpId == 0 && laneId == 0 && block_tile_i == 64 && block_tile_j == 0) { // // for(int i=0; i<128; i++) { // int i = 12; // printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // i = 44; // printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // i = 64; // printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // i = 96; // printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // // } // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int t=0; t<a[i].num_elements; t++) { // printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]); // } // printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step); // } #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND); } } } __syncthreads(); } // This pointer is used to access the C and D matrix tiles this warp computes. int *shmem_warp_tile_ptr = (int*)&shmem[0][0] + (warpId / 2) * 64 * 16 + (warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8; wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT); } } __syncthreads(); // if (warpId == 4 && laneId == 0 && block_tile_i == 64 && block_tile_j == 0) { // for(int i=12; i<13; i++) { // for(int j=0; j<64; j++) { // printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64+j)); // } // for(int j=0; j<64; j++) { // printf("i: %d, j: %d, val: %d\n", i+32, j, *((int*)&shmem[0][0]+(i+32)*64+j)); // } // } // } int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId*64 + laneId; int mask = 1; int bit0, bit1; unsigned r0, r1; int tmp0, tmp1, tmp2, tmp3; int val; #pragma unroll for(int i=0; i<4; i++) { tmp0 = *(shmem_warp_stream_ptr+i*8*64); tmp1 = *(shmem_warp_stream_ptr+32+i*8*64); tmp2 = *(shmem_warp_stream_ptr+32*64+i*8*64); tmp3 = *(shmem_warp_stream_ptr+32+32*64+i*8*64); val = tmp0 + 2*tmp1 + 2*tmp2 + 4*tmp3; // c4f07 = 0000 0000 0000 1100 0100 1111 0000 0111 // 8efa0 = 0000 0000 0000 1000 1110 1111 1010 0000 bit0 = val & (mask << 0); bit1 = (val & (mask << 1)) >> 1; r0 = __ballot_sync(0xFFFFFFFF, bit0); r1 = __ballot_sync(0xFFFFFFFF, bit1); // if (block_tile_i == 64 && block_tile_j == 0 && warpId == 4 & i == 1 && laneId == 0){ // printf("laneId: %d, tmp0: %d, tmp1: %d, tmp2: %d, tmp3: %d, val: %d, r1: %x\n", laneId, tmp0, tmp1, tmp2, tmp3, val, __brev(r1)); // } if (laneId == 0) { size_t gmem_idx = block_tile_i*N_GLOBAL/32 + block_tile_j/32 + warpId*N_GLOBAL/32 + i*8*N_GLOBAL/32; D[gmem_idx] = __brev(r0); D[gmem_idx+M_GLOBAL*N_GLOBAL/32] = __brev(r1); } } __syncthreads(); } } void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){ int *W_int = (int*) W; int *X_int = (int*) X; for(int b=0; b<W_BIT; b++) { for(int i = 0; i < M_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i; W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } for(int b = 0; b<X_BIT; b++) { for(int i = 0; i < N_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i*K_GLOBAL+j; X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } } int popcnt(int i) { // Java: use int, and use >>> instead of >> // C or C++: use int i = i - ((i >> 1) & 0x55555555); i = (i & 0x33333333) + ((i >> 2) & 0x33333333); return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; } int int_pow(int base, int exp) { int result = 1; while (exp) { if (exp % 2) result *= base; exp /= 2; base *= base; } return result; } void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) { int *W_int = (int*) W; int *X_int = (int*) X; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } } } } ref_C[m*N_GLOBAL+n]= tmp; } } } void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. int *W_int = (int*) W; int *X_int = (int*) X; int C_ref_before_decompose[M_GLOBAL*N_GLOBAL]; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; int term[4]; term[0] = 0; term[1] = 0; term[2] = 0; term[3] = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; if (xb == 0 && wb == 0) { term[0] += x_val * w_val; } else if (xb == 0 && wb == 1) { term[1] += x_val * w_val; } else if (xb == 1 && wb == 0) { term[2] += x_val * w_val; } else { term[3] += x_val * w_val; } } } } } C_ref_before_decompose[m*N_GLOBAL+n]= tmp; // if (m == 76 && n < 32) { // if (m == 76 && n == 0) { // printf("n: %d, term[0]: %d, term[1]: %d, term[2]: %d, term[3]: %d, tmp: %d\n", n, term[0], term[1], term[2], term[3], tmp); // } } } // 652d45c9 = 0110 0101 0010 1101 0100 0101 1100 1001 // for(int i=0; i<32; i++) { // printf("i=%d: %d\n", i, C_ref_before_decompose[76*N_GLOBAL+i]); // } // printf("w0: %x %x %x %x\n", W_int[0*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 0], W_int[0*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 1], W_int[0*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 2], W_int[0*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 3]); // printf("w1: %x %x %x %x\n", W_int[1*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 0], W_int[1*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 1], W_int[1*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 2], W_int[1*M_GLOBAL*K_GLOBAL/32 + 76*K_GLOBAL/32 + 3]); // printf("x0: %d %d %d %d\n", X_int[wb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]) // int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; // int x_int = X_int[wb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int m=0; m<M_GLOBAL; m++) { for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) { int val[OUT_BIT]; for(int b=0; b<OUT_BIT; b++) val[b] = 0; for(int n=0; n<32; n++) { int tmp = C_ref_before_decompose[m*N_GLOBAL+n_tile*32+n]; tmp = (tmp - 0); // Can be modified for other quantized parameters. for(int b=0; b<OUT_BIT; b++) { int mask = 1; val[b] = val[b] << 1; val[b] = val[b] | (((mask<<b) & tmp) >> b); } } for(int b=0; b<OUT_BIT; b++) { ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile] = val[b]; } } } // printf("b=1, m=76, n_tile=0: %x\n", ref_C[1*M_GLOBAL*N_GLOBAL/32+76*N_GLOBAL/32+0]); } // 8efa0 = 0000 0000 0000 1000 1110 1111 1010 0000 // 652d45c9 = 0110 0101 0010 1101 0100 0101 1100 1001 void validate_results(int *C, int* ref_C, int M_, int N_) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int i = 0; i < M_; i++) { for(int j = 0; j < N_; j++) { int idx = i*N_+j; double dst = fabs(C[idx] - ref_C[idx]); double abs = fabs(C[idx]) * fabs(ref_C[idx]); double ref_err = dst / abs; if (ref_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result with pack for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int m = 0; m < M_; m++) { for(int n_tile = 0; n_tile < N_/32; n_tile++) { for(int b=0; b<OUT_BIT; b++) { int idx = b*M_*N_/32 + m*N_/32+n_tile; double dst = fabs(C[idx] - ref_C[idx]); double abs = fabs(C[idx]) * fabs(ref_C[idx]); double ref_err = dst / abs; if (ref_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("m: %d, n_tile: %d, b: %d, C: %x, ref_C: %x\n", m, n_tile, b, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } #define verify_output int main(int argc, char **argv) { int dev = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); int X_BIT = 2; int W_BIT = 2; for (int M_GLOBAL=128; M_GLOBAL<=1024; M_GLOBAL += 128 ) { // int M_GLOBAL = 128; int N_GLOBAL = M_GLOBAL; int K_GLOBAL = M_GLOBAL; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors( cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT)); checkCudaErrors( cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT)); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL)); #ifdef verify_output int4 *W_h = NULL; int4 *X_h = NULL; int *Output_h = NULL; W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT); X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT); Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); printf("Preparing validation data for GPU...\n"); init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT); checkCudaErrors(cudaMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, cudaMemcpyHostToDevice)); #endif int SHMEM_SZ = 65536; checkCudaErrors(cudaFuncSetAttribute( apmm_w2a2_decompose, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; int NUM_PROFILES = 1000; for(int iter=0; iter<NUM_PROFILES; ++iter){ float bmma_ms = 0.0f; cudaEvent_t bmma_start; cudaEvent_t bmma_end; cudaEventCreate(&bmma_start); cudaEventCreate(&bmma_end); cudaEventRecord(bmma_start); checkKernelErrors( (apmm_w2a2_decompose<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK, SHMEM_SZ>>>(W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT))); cudaEventRecord(bmma_end); cudaEventSynchronize(bmma_end); cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end); cudaEventDestroy(bmma_start); cudaEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES; printf("V73, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT); printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12); #ifdef verify_output printf("Validating results...\n"); checkCudaErrors(cudaMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost)); int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL/32 * X_BIT); /* Copmpute reference matrix on CPU */ compute_ref_pack(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT, X_BIT); /* validation results */ validate_results_pack(Output_h, Output_ref, M_GLOBAL, N_GLOBAL, X_BIT); free(W_h); free(X_h); free(Output_h); free(Output_ref); #endif checkCudaErrors(cudaFree(reinterpret_cast<void *>(W))); checkCudaErrors(cudaFree(reinterpret_cast<void *>(X))); checkCudaErrors(cudaFree(reinterpret_cast<void *>(Output))); } return EXIT_SUCCESS; }
187fea792479a0e8a34ad8d9d536c97c53608ad9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #define DATATYPE int #define SMEMSIZE 1024 #define REP 128 #define conflictnum 8 __global__ void shared_model_1(double *time,DATATYPE *in1,DATATYPE *in2,DATATYPE *out,int its) { __shared__ DATATYPE smem1[SMEMSIZE]; __shared__ DATATYPE smem2[SMEMSIZE]; unsigned int tid=threadIdx.x; while(tid<SMEMSIZE) { smem1[tid]=in1[tid]; smem2[tid]=in2[tid]; tid+=blockDim.x; } // DATATYPE p,q=(threadIdx.x/conflictnum); DATATYPE p,q=(threadIdx.x/conflictnum*conflictnum); double time_tmp=0.0; unsigned int start_time=0,stop_time=0; unsigned int i,j; for (i=0;i<its;i++) { __syncthreads(); start_time=clock(); #pragma unroll for (j=0;j<REP;j++) { p=smem1[q]; q=smem2[p]; } stop_time=clock(); time_tmp+=(stop_time-start_time); } time_tmp=time_tmp/REP/its; out[blockDim.x*blockIdx.x+threadIdx.x] = p+q; time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp; } int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2) { int its=30; //int blocks=1,threads=32; DATATYPE *d_in1,*d_in2; hipMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE); hipMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE); hipMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice); hipMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice); double *h_time,*d_time; DATATYPE *d_out; h_time=(double*)malloc(sizeof(double)*blocks*threads); hipMalloc((void**)&d_time,sizeof(double)*blocks*threads); hipMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads); hipLaunchKernelGGL(( shared_model_1), dim3(blocks),dim3(threads), 0, 0, d_time,d_in1,d_in1,d_out,its); hipMemcpy(h_time,d_time,sizeof(double)*blocks*threads,hipMemcpyDeviceToHost); double avert=0.0,maxt=0.0,mint=99999.9; int nn=0; for (int i=0;i<blocks;i++) { for (int j=0;j<threads;j+=32) { avert+=h_time[i*threads+j]; nn++; if (maxt<h_time[i*threads+j]) { maxt=h_time[i*threads+j]; } if (mint>h_time[i*threads+j]) { mint=h_time[i*threads+j]; } } } avert/=nn; printf("%d\t%d\t\t%f\t%f\t%f\n", blocks,threads,avert,mint,maxt); hipFree(d_time); hipFree(d_out); hipFree(d_in1); hipFree(d_in2); free(h_time); return 0; } void init_order(DATATYPE *a,int n) { for (int i=0;i<n;i++) { a[i]=i; } } int main() { DATATYPE *h_in1; h_in1=(DATATYPE*)malloc(sizeof(DATATYPE)*SMEMSIZE); init_order(h_in1,SMEMSIZE); /* for (int i=0;i<SMEMSIZE;i+=32) { for (int j=0;j<32;j++) { printf("%d\t",h_in3[i+j]); } printf("\n"); } */ printf("blocks\t threads\t aver \t min \t max \t(clocks)\n"); //main_test(1,32,h_in1,h_in1,1); //main_test(1,32,h_in2,h_in2,2); //main_test(1,32,h_in3,h_in3,3); //main_test(1,512,h_in1,h_in1,1); //main_test(1,512,h_in2,h_in2,2); //main_test(1,512,h_in3,h_in3,3); for (int i=0;i<=1;i+=32) { int blocks=i; if (i==0) { blocks=1; } for (int j=0;j<=512;j+=32) { int threads=j; if (j==0) { threads=1; } main_test(blocks,threads,h_in1,h_in1); } } /* for (int i=0;i<=1024;i+=32) { int blocks=i; if (i==0) { blocks=1; } for (int j=256;j<=256;j+=32) { int threads=j; if (j==0) { threads=1; } main_test(blocks,threads,h_in1,h_in1); } } */ free(h_in1); return 0; }
187fea792479a0e8a34ad8d9d536c97c53608ad9.cu
#include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #define DATATYPE int #define SMEMSIZE 1024 #define REP 128 #define conflictnum 8 __global__ void shared_model_1(double *time,DATATYPE *in1,DATATYPE *in2,DATATYPE *out,int its) { __shared__ DATATYPE smem1[SMEMSIZE]; __shared__ DATATYPE smem2[SMEMSIZE]; unsigned int tid=threadIdx.x; while(tid<SMEMSIZE) { smem1[tid]=in1[tid]; smem2[tid]=in2[tid]; tid+=blockDim.x; } // DATATYPE p,q=(threadIdx.x/conflictnum); DATATYPE p,q=(threadIdx.x/conflictnum*conflictnum); double time_tmp=0.0; unsigned int start_time=0,stop_time=0; unsigned int i,j; for (i=0;i<its;i++) { __syncthreads(); start_time=clock(); #pragma unroll for (j=0;j<REP;j++) { p=smem1[q]; q=smem2[p]; } stop_time=clock(); time_tmp+=(stop_time-start_time); } time_tmp=time_tmp/REP/its; out[blockDim.x*blockIdx.x+threadIdx.x] = p+q; time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp; } int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2) { int its=30; //int blocks=1,threads=32; DATATYPE *d_in1,*d_in2; cudaMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE); cudaMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE); cudaMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice); cudaMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice); double *h_time,*d_time; DATATYPE *d_out; h_time=(double*)malloc(sizeof(double)*blocks*threads); cudaMalloc((void**)&d_time,sizeof(double)*blocks*threads); cudaMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads); shared_model_1<<<blocks,threads>>>(d_time,d_in1,d_in1,d_out,its); cudaMemcpy(h_time,d_time,sizeof(double)*blocks*threads,cudaMemcpyDeviceToHost); double avert=0.0,maxt=0.0,mint=99999.9; int nn=0; for (int i=0;i<blocks;i++) { for (int j=0;j<threads;j+=32) { avert+=h_time[i*threads+j]; nn++; if (maxt<h_time[i*threads+j]) { maxt=h_time[i*threads+j]; } if (mint>h_time[i*threads+j]) { mint=h_time[i*threads+j]; } } } avert/=nn; printf("%d\t%d\t\t%f\t%f\t%f\n", blocks,threads,avert,mint,maxt); cudaFree(d_time); cudaFree(d_out); cudaFree(d_in1); cudaFree(d_in2); free(h_time); return 0; } void init_order(DATATYPE *a,int n) { for (int i=0;i<n;i++) { a[i]=i; } } int main() { DATATYPE *h_in1; h_in1=(DATATYPE*)malloc(sizeof(DATATYPE)*SMEMSIZE); init_order(h_in1,SMEMSIZE); /* for (int i=0;i<SMEMSIZE;i+=32) { for (int j=0;j<32;j++) { printf("%d\t",h_in3[i+j]); } printf("\n"); } */ printf("blocks\t threads\t aver \t min \t max \t(clocks)\n"); //main_test(1,32,h_in1,h_in1,1); //main_test(1,32,h_in2,h_in2,2); //main_test(1,32,h_in3,h_in3,3); //main_test(1,512,h_in1,h_in1,1); //main_test(1,512,h_in2,h_in2,2); //main_test(1,512,h_in3,h_in3,3); for (int i=0;i<=1;i+=32) { int blocks=i; if (i==0) { blocks=1; } for (int j=0;j<=512;j+=32) { int threads=j; if (j==0) { threads=1; } main_test(blocks,threads,h_in1,h_in1); } } /* for (int i=0;i<=1024;i+=32) { int blocks=i; if (i==0) { blocks=1; } for (int j=256;j<=256;j+=32) { int threads=j; if (j==0) { threads=1; } main_test(blocks,threads,h_in1,h_in1); } } */ free(h_in1); return 0; }
814d741325b65eb39390ac95a971818bf635ca66.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_initialise_chunk_kernel_zz [2][2]; static int dims_initialise_chunk_kernel_zz_h [2][2] = {0}; //user function __device__ void initialise_chunk_kernel_zz_gpu(ACC<int> &zz, int *idx) { zz(0,0,0) = idx[2]-2; } __global__ void ops_initialise_chunk_kernel_zz( int* __restrict arg0, int arg_idx0, int arg_idx1, int arg_idx2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int arg_idx[3]; arg_idx[0] = arg_idx0+idx_x; arg_idx[1] = arg_idx1+idx_y; arg_idx[2] = arg_idx2+idx_z; arg0 += idx_x * 0*1 + idx_y * 0*1 * dims_initialise_chunk_kernel_zz[0][0] + idx_z * 1*1 * dims_initialise_chunk_kernel_zz[0][0] * dims_initialise_chunk_kernel_zz[0][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<int> argp0(dims_initialise_chunk_kernel_zz[0][0], dims_initialise_chunk_kernel_zz[0][1], arg0); initialise_chunk_kernel_zz_gpu(argp0, arg_idx); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_zz(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_initialise_chunk_kernel_zz_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif //Timing double t1,t2,c1,c2; ops_arg args[2] = { arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,2,range,2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2,"initialise_chunk_kernel_zz"); OPS_kernels[2].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI int arg_idx[3]; #ifdef OPS_MPI if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; arg_idx[n] = start[n]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; if (xdim0 != dims_initialise_chunk_kernel_zz_h[0][0] || ydim0 != dims_initialise_chunk_kernel_zz_h[0][1]) { dims_initialise_chunk_kernel_zz_h[0][0] = xdim0; dims_initialise_chunk_kernel_zz_h[0][1] = ydim0; cutilSafeCall(hipMemcpyToSymbol( dims_initialise_chunk_kernel_zz, dims_initialise_chunk_kernel_zz_h, sizeof(dims_initialise_chunk_kernel_zz))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); char *p_a[2]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_initialise_chunk_kernel_zz), dim3(grid), dim3(tblock) , 0, 0, (int *)p_a[0], arg_idx[0], arg_idx[1], arg_idx[2],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[2].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); } } #ifdef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_zz(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 2; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 2; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg*)malloc(2*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->function = ops_par_loop_initialise_chunk_kernel_zz_execute; if (OPS_diags > 1) { ops_timing_realloc(2,"initialise_chunk_kernel_zz"); } ops_enqueue_kernel(desc); } #endif
814d741325b65eb39390ac95a971818bf635ca66.cu
// // auto-generated by ops.py // __constant__ int dims_initialise_chunk_kernel_zz [2][2]; static int dims_initialise_chunk_kernel_zz_h [2][2] = {0}; //user function __device__ void initialise_chunk_kernel_zz_gpu(ACC<int> &zz, int *idx) { zz(0,0,0) = idx[2]-2; } __global__ void ops_initialise_chunk_kernel_zz( int* __restrict arg0, int arg_idx0, int arg_idx1, int arg_idx2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int arg_idx[3]; arg_idx[0] = arg_idx0+idx_x; arg_idx[1] = arg_idx1+idx_y; arg_idx[2] = arg_idx2+idx_z; arg0 += idx_x * 0*1 + idx_y * 0*1 * dims_initialise_chunk_kernel_zz[0][0] + idx_z * 1*1 * dims_initialise_chunk_kernel_zz[0][0] * dims_initialise_chunk_kernel_zz[0][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<int> argp0(dims_initialise_chunk_kernel_zz[0][0], dims_initialise_chunk_kernel_zz[0][1], arg0); initialise_chunk_kernel_zz_gpu(argp0, arg_idx); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_zz(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_initialise_chunk_kernel_zz_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif //Timing double t1,t2,c1,c2; ops_arg args[2] = { arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,2,range,2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2,"initialise_chunk_kernel_zz"); OPS_kernels[2].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI int arg_idx[3]; #ifdef OPS_MPI if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; arg_idx[n] = start[n]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; if (xdim0 != dims_initialise_chunk_kernel_zz_h[0][0] || ydim0 != dims_initialise_chunk_kernel_zz_h[0][1]) { dims_initialise_chunk_kernel_zz_h[0][0] = xdim0; dims_initialise_chunk_kernel_zz_h[0][1] = ydim0; cutilSafeCall(cudaMemcpyToSymbol( dims_initialise_chunk_kernel_zz, dims_initialise_chunk_kernel_zz_h, sizeof(dims_initialise_chunk_kernel_zz))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); char *p_a[2]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_initialise_chunk_kernel_zz<<<grid, tblock >>> ( (int *)p_a[0], arg_idx[0], arg_idx[1], arg_idx[2],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[2].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); } } #ifdef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_zz(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 2; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 2; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg*)malloc(2*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->function = ops_par_loop_initialise_chunk_kernel_zz_execute; if (OPS_diags > 1) { ops_timing_realloc(2,"initialise_chunk_kernel_zz"); } ops_enqueue_kernel(desc); } #endif
959e0bb3f0a8949736b854238115cddc2939eee7.hip
// !!! This is a file automatically generated by hipify!!! // info.cu #include <hip/hip_runtime.h> #include <iostream> int main() { int count; hipDeviceProp_t prop; hipGetDeviceCount(&count); std::cout << "devices count : " << count << std::endl; hipGetDeviceProperties(&prop, 0); std::cout << "max block size : " << prop.maxGridSize[0] << ' ' << prop.maxGridSize[1] << ' ' << prop.maxGridSize[2] << std::endl; std::cout << "max thread size : " << prop.maxThreadsDim[0] << ' ' << prop.maxThreadsDim[1] << ' ' << prop.maxThreadsDim[2] << std::endl; return EXIT_SUCCESS; }
959e0bb3f0a8949736b854238115cddc2939eee7.cu
// info.cu #include <cuda_runtime.h> #include <iostream> int main() { int count; cudaDeviceProp prop; cudaGetDeviceCount(&count); std::cout << "devices count : " << count << std::endl; cudaGetDeviceProperties(&prop, 0); std::cout << "max block size : " << prop.maxGridSize[0] << ' ' << prop.maxGridSize[1] << ' ' << prop.maxGridSize[2] << std::endl; std::cout << "max thread size : " << prop.maxThreadsDim[0] << ' ' << prop.maxThreadsDim[1] << ' ' << prop.maxThreadsDim[2] << std::endl; return EXIT_SUCCESS; }
3b5b6be405a57fe5b68c9e46afc253dbab776aa1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2010 Ruslan Kudubayev. */ /* * Device code. */ #ifndef _TSP_ANTCOLONY_KERNEL_H_ #define _TSP_ANTCOLONY_KERNEL_H_ #include <stdio.h> #include "tsp_antcolony.h" //////////////////////////////////////////////////////////////////////////////// // colonisation //////////////////////////////////////////////////////////////////////////////// __global__ void colonise(float* C, int* A, float* Rand, int* d_best, int* d_path, const int n, const float R, const float tau0) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int antid = ((bx+by)*BLOCK_SIZE+(tx+ty)); //calculate the offset in the Rand array int offset = antid*(2*n-1); float ra = Rand[offset + 2*n-2]; int startNode = ((int)(ra*n))%n; int curNode = startNode; int visited[WA]; for (int i=0; i<n; ++i) visited[i] = -1; visited[startNode] = 0; int cost = 0; int collected; // in a while loop do movements according to the matrix updating tau for (collected=1; collected<n; ++collected) { int nNode = -1; // get random ra = Rand[offset + collected*2 - 2]; //exploit or explore if (ra > 0.2f) { //exploit the paths, get the max one simply float max = -1.0f; int first = 1; for (int i=0; i<n; ++i) if (visited[i]==-1 && i!=curNode) { float eeta, phe; if (A[curNode*n+i]==0) eeta = 1.1f; else eeta = (1.0f/A[curNode*n+i]); phe = C[curNode*n+i] * eeta; if (first || phe > max) { max = phe; nNode = i; first = 0; } } } else { // explore properly float sum = 0.0f; float sump = 0.0f; // calculate the sum for (int i=0; i<n; ++i) if (visited[i] == -1 && i!=curNode) { // take care of the zero divisions... float eeta; if (A[curNode*n+i]==0) eeta = 1.1f; else eeta = (1.0f/A[curNode*n+i]); sum += C[curNode*n+i] * eeta; } //generate a random number ra = Rand[offset + collected*2 - 1]; float target = ra * sum; // precalculate this for the p formula division // calculate the probability and jump if that probability occurs this time. for (int i=0; i<n; ++i) if (visited[i] == -1 && i!=curNode) { // calculate the probability as per the equation before. float p; float eeta; if (A[curNode*n+i]==0) eeta = 1.1f; else eeta = (1.0f/A[curNode*n+i]); // p calculated here with squaring eeta for better results p = (C[curNode*n+i] * eeta); if (target>sump && target<=p+sump) { // yes. move. nNode = i; break; } nNode = i; sump += p; } } if (nNode >= 0) { // accept the next node cost = cost + A[curNode*n+nNode]; visited[curNode] = nNode; // apply local updating rule right now. C[curNode*n+nNode] = (1.0f - R) * C[curNode*n+nNode] + tau0; // move on curNode = nNode; } else { // don't really prefer to go there // this means that an ant has arrived // into some deadlock where it is best to die than // lead anyone else here. break; } } if (collected == n) { cost = cost + A[curNode*n+startNode]; visited[curNode] = startNode; C[curNode*n+startNode] = (1.0f - R) * C[curNode*n+startNode] + tau0; // after done, compare the path with the best achieved so far if (cost < *d_best) { *d_best = cost; for (int i=0; i<n; ++i) d_path[i] = visited[i]; } } } //////////////////////////////////////////////////////////////////////////////// // global update //////////////////////////////////////////////////////////////////////////////// __global__ void update_pheromones(float* C, int* d_best, int* d_path, const int n, const float A) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int i = (bx * BLOCK_SIDE_UPDATER) + tx; int j = (by * BLOCK_SIDE_UPDATER) + ty; if (i<n && j<n) { // update feromones. float deposition = 0.0f; float evaporation = C[i*n+j] * (1.0f - A); if (d_path[i] == j) { deposition = A/(*d_best); } C[i*n+j] = evaporation+deposition; } } #endif // #ifndef _TSP_ANTCOLONY_KERNEL_H_
3b5b6be405a57fe5b68c9e46afc253dbab776aa1.cu
/* * Copyright 2010 Ruslan Kudubayev. */ /* * Device code. */ #ifndef _TSP_ANTCOLONY_KERNEL_H_ #define _TSP_ANTCOLONY_KERNEL_H_ #include <stdio.h> #include "tsp_antcolony.h" //////////////////////////////////////////////////////////////////////////////// // colonisation //////////////////////////////////////////////////////////////////////////////// __global__ void colonise(float* C, int* A, float* Rand, int* d_best, int* d_path, const int n, const float R, const float tau0) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int antid = ((bx+by)*BLOCK_SIZE+(tx+ty)); //calculate the offset in the Rand array int offset = antid*(2*n-1); float ra = Rand[offset + 2*n-2]; int startNode = ((int)(ra*n))%n; int curNode = startNode; int visited[WA]; for (int i=0; i<n; ++i) visited[i] = -1; visited[startNode] = 0; int cost = 0; int collected; // in a while loop do movements according to the matrix updating tau for (collected=1; collected<n; ++collected) { int nNode = -1; // get random ra = Rand[offset + collected*2 - 2]; //exploit or explore if (ra > 0.2f) { //exploit the paths, get the max one simply float max = -1.0f; int first = 1; for (int i=0; i<n; ++i) if (visited[i]==-1 && i!=curNode) { float eeta, phe; if (A[curNode*n+i]==0) eeta = 1.1f; else eeta = (1.0f/A[curNode*n+i]); phe = C[curNode*n+i] * eeta; if (first || phe > max) { max = phe; nNode = i; first = 0; } } } else { // explore properly float sum = 0.0f; float sump = 0.0f; // calculate the sum for (int i=0; i<n; ++i) if (visited[i] == -1 && i!=curNode) { // take care of the zero divisions... float eeta; if (A[curNode*n+i]==0) eeta = 1.1f; else eeta = (1.0f/A[curNode*n+i]); sum += C[curNode*n+i] * eeta; } //generate a random number ra = Rand[offset + collected*2 - 1]; float target = ra * sum; // precalculate this for the p formula division // calculate the probability and jump if that probability occurs this time. for (int i=0; i<n; ++i) if (visited[i] == -1 && i!=curNode) { // calculate the probability as per the equation before. float p; float eeta; if (A[curNode*n+i]==0) eeta = 1.1f; else eeta = (1.0f/A[curNode*n+i]); // p calculated here with squaring eeta for better results p = (C[curNode*n+i] * eeta); if (target>sump && target<=p+sump) { // yes. move. nNode = i; break; } nNode = i; sump += p; } } if (nNode >= 0) { // accept the next node cost = cost + A[curNode*n+nNode]; visited[curNode] = nNode; // apply local updating rule right now. C[curNode*n+nNode] = (1.0f - R) * C[curNode*n+nNode] + tau0; // move on curNode = nNode; } else { // don't really prefer to go there // this means that an ant has arrived // into some deadlock where it is best to die than // lead anyone else here. break; } } if (collected == n) { cost = cost + A[curNode*n+startNode]; visited[curNode] = startNode; C[curNode*n+startNode] = (1.0f - R) * C[curNode*n+startNode] + tau0; // after done, compare the path with the best achieved so far if (cost < *d_best) { *d_best = cost; for (int i=0; i<n; ++i) d_path[i] = visited[i]; } } } //////////////////////////////////////////////////////////////////////////////// // global update //////////////////////////////////////////////////////////////////////////////// __global__ void update_pheromones(float* C, int* d_best, int* d_path, const int n, const float A) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int i = (bx * BLOCK_SIDE_UPDATER) + tx; int j = (by * BLOCK_SIDE_UPDATER) + ty; if (i<n && j<n) { // update feromones. float deposition = 0.0f; float evaporation = C[i*n+j] * (1.0f - A); if (d_path[i] == j) { deposition = A/(*d_best); } C[i*n+j] = evaporation+deposition; } } #endif // #ifndef _TSP_ANTCOLONY_KERNEL_H_
992868bdf32bbd6a4d96f713c0d89bcf2c935ef8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <nvfunctional> #include <cstdio> #include <cassert> #include <chrono> #include <ftk/numeric/inverse_linear_interpolation_solver.hh> #include <ftk/numeric/linear_interpolation.hh> #include <ftk/numeric/clamp.hh> #include <ftk/numeric/symmetric_matrix.hh> #include <ftk/numeric/fixed_point.hh> #include <ftk/numeric/critical_point_type.hh> #include <ftk/numeric/critical_point_test.hh> #include <ftk/mesh/lattice.hh> // #include <ftk/filters/critical_point_lite.hh> #include "common_hip.cuh" using namespace ftk; template <int scope> __device__ __host__ bool check_simplex_contour_3dt( int current_timestep, const lattice4_t& domain, const lattice4_t& core, const lattice3_t& ext, // array dimension const element41_t& e, double threshold, const double *F[2], cp_t &p) { if (e.corner[3] != current_timestep) return false; int vertices[2][4], indices[2]; size_t local_indices[2]; for (int i = 0; i < 2; i ++) { for (int j = 0; j < 4; j ++) { vertices[i][j] = e.corner[j] + unit_simplex_offset_4_1<scope>(e.type, i, j); if (vertices[i][j] < domain.st[j] || vertices[i][j] > domain.st[j] + domain.sz[j] - 1) return false; } indices[i] = domain.to_index(vertices[i]); local_indices[i] = ext.to_index(vertices[i]); } const long long factor = 2 << 20; // using fixed point rep double X[2][4]; double f[2]; long long fi[2]; for (int i = 0; i < 2; i ++) { const size_t k = local_indices[i]; // k = ext.to_index(vertices[i]); const size_t t = unit_simplex_offset_4_1<scope>(e.type, i, 3); f[i] = F[t][k] - threshold; fi[i] = f[i] * factor; for (int j = 0; j < 4; j ++) X[i][j] = vertices[i][j]; } bool succ = robust_critical_point_in_simplex1(fi, indices); if (!succ) return false; double mu[2]; bool succ2 = inverse_lerp_s1v1(f, mu); double x[4]; lerp_s1v4(X, mu, x); p.x[0] = x[0]; p.x[1] = x[1]; p.x[2] = x[2]; p.t = x[3]; return true; } template <int scope> __global__ void sweep_simplices( int current_timestep, const lattice4_t domain, const lattice4_t core, const lattice3_t ext, // array dimension double threshold, const double *F_c, // current timestep const double *F_n, // next timestep unsigned long long &ncps, cp_t *cps) { const double *F[2] = {F_c, F_n}; int tid = getGlobalIdx_3D_1D(); const element41_t e = element41_from_index<scope>(core, tid); cp_t cp; bool succ = check_simplex_contour_3dt<scope>( current_timestep, domain, core, ext, e, threshold, F, cp); if (succ) { unsigned long long i = atomicAdd(&ncps, 1ul); cp.tag = tid; cps[i] = cp; } } template <int scope> static std::vector<cp_t> extract_contour_3dt( int current_timestep, const lattice4_t& domain, const lattice4_t& core, const lattice3_t& ext, double threshold, const double *F_c, const double *F_n) { auto t0 = std::chrono::high_resolution_clock::now(); const size_t ntasks = core.n() * ntypes_4_1<scope>(); // fprintf(stderr, "ntasks=%zu\n", ntasks); const int maxGridDim = 1024; const int blockSize = 256; const int nBlocks = idivup(ntasks, blockSize); dim3 gridSize; if (nBlocks >= maxGridDim) gridSize = dim3(idivup(nBlocks, maxGridDim), maxGridDim); else gridSize = dim3(nBlocks); double *dF_c = NULL, *dF_n = NULL; if (F_c) { hipMalloc((void**)&dF_c, sizeof(double) * ext.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dF_c"); hipMemcpy(dF_c, F_c, sizeof(double) * ext.n(), hipMemcpyHostToDevice); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying drho_c"); } if (F_n) { hipMalloc((void**)&dF_n, sizeof(double) * ext.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating drho_l"); hipMemcpy(dF_n, F_n, sizeof(double) * ext.n(), hipMemcpyHostToDevice); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying drho_l"); } unsigned long long *dncps; // number of cps hipMalloc((void**)&dncps, sizeof(unsigned long long)); hipMemset(dncps, 0, sizeof(unsigned long long)); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dncps"); cp_t *dcps; hipMalloc((void**)&dcps, sizeof(cp_t) * core.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dcps"); hipDeviceSynchronize(); fprintf(stderr, "calling kernel func...\n"); hipLaunchKernelGGL(( sweep_simplices<scope>), dim3(gridSize), dim3(blockSize), 0, 0, current_timestep, domain, core, ext, threshold, dF_c, dF_n, *dncps, dcps); hipDeviceSynchronize(); checkLastCudaError("[FTK-CUDA] error: sweep_simplices, kernel function"); unsigned long long ncps = 0; hipMemcpy(&ncps, dncps, sizeof(unsigned long long), hipMemcpyDeviceToHost); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipMemcpyDeviceToHost, dncps"); fprintf(stderr, "ncps=%llu\n", ncps); std::vector<cp_t> cps(ncps); hipMemcpy(cps.data(), dcps, sizeof(cp_t) * ncps, hipMemcpyDeviceToHost); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipMemcpyDeviceToHost"); if (dF_c) hipFree(dF_c); if (dF_n) hipFree(dF_n); hipFree(dncps); hipFree(dcps); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipFree"); hipDeviceSynchronize(); auto t1 = std::chrono::high_resolution_clock::now(); double duration = std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count() * 1e-9; fprintf(stderr, "exitting gpu kernel, ncps=%llu, time=%f\n", ncps, duration); return cps; } std::vector<cp_t> extract_contour_3dt_cuda( int scope, int current_timestep, const ftk::lattice& domain, const ftk::lattice& core, const ftk::lattice& ext, double threshold, const double *F_c, const double *F_n) { lattice4_t D(domain); lattice4_t C(core); lattice3_t E(ext); if (scope == scope_interval) return extract_contour_3dt<scope_interval>(current_timestep, D, C, E, threshold, F_c, F_n); else return extract_contour_3dt<scope_ordinal>(current_timestep, D, C, E, threshold, F_c, F_n); }
992868bdf32bbd6a4d96f713c0d89bcf2c935ef8.cu
#include <nvfunctional> #include <cstdio> #include <cassert> #include <chrono> #include <ftk/numeric/inverse_linear_interpolation_solver.hh> #include <ftk/numeric/linear_interpolation.hh> #include <ftk/numeric/clamp.hh> #include <ftk/numeric/symmetric_matrix.hh> #include <ftk/numeric/fixed_point.hh> #include <ftk/numeric/critical_point_type.hh> #include <ftk/numeric/critical_point_test.hh> #include <ftk/mesh/lattice.hh> // #include <ftk/filters/critical_point_lite.hh> #include "common.cuh" using namespace ftk; template <int scope> __device__ __host__ bool check_simplex_contour_3dt( int current_timestep, const lattice4_t& domain, const lattice4_t& core, const lattice3_t& ext, // array dimension const element41_t& e, double threshold, const double *F[2], cp_t &p) { if (e.corner[3] != current_timestep) return false; int vertices[2][4], indices[2]; size_t local_indices[2]; for (int i = 0; i < 2; i ++) { for (int j = 0; j < 4; j ++) { vertices[i][j] = e.corner[j] + unit_simplex_offset_4_1<scope>(e.type, i, j); if (vertices[i][j] < domain.st[j] || vertices[i][j] > domain.st[j] + domain.sz[j] - 1) return false; } indices[i] = domain.to_index(vertices[i]); local_indices[i] = ext.to_index(vertices[i]); } const long long factor = 2 << 20; // using fixed point rep double X[2][4]; double f[2]; long long fi[2]; for (int i = 0; i < 2; i ++) { const size_t k = local_indices[i]; // k = ext.to_index(vertices[i]); const size_t t = unit_simplex_offset_4_1<scope>(e.type, i, 3); f[i] = F[t][k] - threshold; fi[i] = f[i] * factor; for (int j = 0; j < 4; j ++) X[i][j] = vertices[i][j]; } bool succ = robust_critical_point_in_simplex1(fi, indices); if (!succ) return false; double mu[2]; bool succ2 = inverse_lerp_s1v1(f, mu); double x[4]; lerp_s1v4(X, mu, x); p.x[0] = x[0]; p.x[1] = x[1]; p.x[2] = x[2]; p.t = x[3]; return true; } template <int scope> __global__ void sweep_simplices( int current_timestep, const lattice4_t domain, const lattice4_t core, const lattice3_t ext, // array dimension double threshold, const double *F_c, // current timestep const double *F_n, // next timestep unsigned long long &ncps, cp_t *cps) { const double *F[2] = {F_c, F_n}; int tid = getGlobalIdx_3D_1D(); const element41_t e = element41_from_index<scope>(core, tid); cp_t cp; bool succ = check_simplex_contour_3dt<scope>( current_timestep, domain, core, ext, e, threshold, F, cp); if (succ) { unsigned long long i = atomicAdd(&ncps, 1ul); cp.tag = tid; cps[i] = cp; } } template <int scope> static std::vector<cp_t> extract_contour_3dt( int current_timestep, const lattice4_t& domain, const lattice4_t& core, const lattice3_t& ext, double threshold, const double *F_c, const double *F_n) { auto t0 = std::chrono::high_resolution_clock::now(); const size_t ntasks = core.n() * ntypes_4_1<scope>(); // fprintf(stderr, "ntasks=%zu\n", ntasks); const int maxGridDim = 1024; const int blockSize = 256; const int nBlocks = idivup(ntasks, blockSize); dim3 gridSize; if (nBlocks >= maxGridDim) gridSize = dim3(idivup(nBlocks, maxGridDim), maxGridDim); else gridSize = dim3(nBlocks); double *dF_c = NULL, *dF_n = NULL; if (F_c) { cudaMalloc((void**)&dF_c, sizeof(double) * ext.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dF_c"); cudaMemcpy(dF_c, F_c, sizeof(double) * ext.n(), cudaMemcpyHostToDevice); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying drho_c"); } if (F_n) { cudaMalloc((void**)&dF_n, sizeof(double) * ext.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating drho_l"); cudaMemcpy(dF_n, F_n, sizeof(double) * ext.n(), cudaMemcpyHostToDevice); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying drho_l"); } unsigned long long *dncps; // number of cps cudaMalloc((void**)&dncps, sizeof(unsigned long long)); cudaMemset(dncps, 0, sizeof(unsigned long long)); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dncps"); cp_t *dcps; cudaMalloc((void**)&dcps, sizeof(cp_t) * core.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dcps"); cudaDeviceSynchronize(); fprintf(stderr, "calling kernel func...\n"); sweep_simplices<scope><<<gridSize, blockSize>>>( current_timestep, domain, core, ext, threshold, dF_c, dF_n, *dncps, dcps); cudaDeviceSynchronize(); checkLastCudaError("[FTK-CUDA] error: sweep_simplices, kernel function"); unsigned long long ncps = 0; cudaMemcpy(&ncps, dncps, sizeof(unsigned long long), cudaMemcpyDeviceToHost); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaMemcpyDeviceToHost, dncps"); fprintf(stderr, "ncps=%llu\n", ncps); std::vector<cp_t> cps(ncps); cudaMemcpy(cps.data(), dcps, sizeof(cp_t) * ncps, cudaMemcpyDeviceToHost); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaMemcpyDeviceToHost"); if (dF_c) cudaFree(dF_c); if (dF_n) cudaFree(dF_n); cudaFree(dncps); cudaFree(dcps); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaFree"); cudaDeviceSynchronize(); auto t1 = std::chrono::high_resolution_clock::now(); double duration = std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count() * 1e-9; fprintf(stderr, "exitting gpu kernel, ncps=%llu, time=%f\n", ncps, duration); return cps; } std::vector<cp_t> extract_contour_3dt_cuda( int scope, int current_timestep, const ftk::lattice& domain, const ftk::lattice& core, const ftk::lattice& ext, double threshold, const double *F_c, const double *F_n) { lattice4_t D(domain); lattice4_t C(core); lattice3_t E(ext); if (scope == scope_interval) return extract_contour_3dt<scope_interval>(current_timestep, D, C, E, threshold, F_c, F_n); else return extract_contour_3dt<scope_ordinal>(current_timestep, D, C, E, threshold, F_c, F_n); }
b8153960b5da6b8b8df55122347d55212b09e2be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <helper_math.h> #include <nvMatrix.h> // #include <nvVector.h> __device__ inline float return_one() {{ return 1.0f; }} extern "C" __global__ void MatrixMulKernel(float *A, float *C) {{ // Block index const uint bx = blockIdx.x; const uint by = blockIdx.y; const uint bw = blockDim.x; const uint bh = blockDim.y; const uint gw = gridDim.x; const uint width = gw*bw; // Thread index const uint tx = threadIdx.x; const uint ty = threadIdx.y; // Stride access locations const uint aBegin = bh*width*by + bw*bx; // Make sure we have an accumulator __shared__ float accumulator; if (tx == 0 & ty == 0) accumulator = 0.0; // Grab a value from global memory float this_val = A[aBegin + width*ty + tx]; // Add our value to the accumulator atomicAdd(&accumulator, this_val); float one = return_one(); atomicAdd(&accumulator, one); __syncthreads(); matrix4<float> am(4.0); float qqq = am.element(1,1); printf("I think some element is: %f\n", qqq); // Store out the accumulated value into our global array C[bx + by*gw] = accumulator; }}
b8153960b5da6b8b8df55122347d55212b09e2be.cu
#include <stdio.h> #include <helper_math.h> #include <nvMatrix.h> // #include <nvVector.h> __device__ inline float return_one() {{ return 1.0f; }} extern "C" __global__ void MatrixMulKernel(float *A, float *C) {{ // Block index const uint bx = blockIdx.x; const uint by = blockIdx.y; const uint bw = blockDim.x; const uint bh = blockDim.y; const uint gw = gridDim.x; const uint width = gw*bw; // Thread index const uint tx = threadIdx.x; const uint ty = threadIdx.y; // Stride access locations const uint aBegin = bh*width*by + bw*bx; // Make sure we have an accumulator __shared__ float accumulator; if (tx == 0 & ty == 0) accumulator = 0.0; // Grab a value from global memory float this_val = A[aBegin + width*ty + tx]; // Add our value to the accumulator atomicAdd(&accumulator, this_val); float one = return_one(); atomicAdd(&accumulator, one); __syncthreads(); matrix4<float> am(4.0); float qqq = am.element(1,1); printf("I think some element is: %f\n", qqq); // Store out the accumulated value into our global array C[bx + by*gw] = accumulator; }}
8b4dad22ccdd2a8676b79f5204b580964756438b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <torch/library.h> #include <ATen/native/hip/KernelUtils.cuh> #include "cuda_helpers.h" namespace vision { namespace ops { namespace { template <typename T> __device__ T bilinear_interpolate( const T* input, int height, int width, T y, T x, int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = input[y_low * width + x_low]; T v2 = input[y_low * width + x_high]; T v3 = input[y_high * width + x_low]; T v4 = input[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void ps_roi_align_forward_kernel_impl( int nthreads, const T* input, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, int sampling_ratio, const T* rois, int channels_out, T* output, int* channel_mapping) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c_out, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c_out = (index / pooled_width / pooled_height) % channels_out; int n = index / pooled_width / pooled_height / channels_out; // (n, c_in, ph, pw) is the associated element in the input int c_in = (c_out * pooled_height + ph) * pooled_width + pw; // [start, end) interval for spatial sampling const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; const T* offset_input = input + (roi_batch_ind * channels + c_in) * height * width; T out_sum = 0; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_input, height, width, y, x, index); out_sum += val; } } out_sum /= count; output[index] = out_sum; channel_mapping[index] = c_in; } } template <typename T> __device__ void bilinear_interpolate_gradient( int height, int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high, int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = input[y_low * width + x_low]; // T v2 = input[y_low * width + x_high]; // T v3 = input[y_high * width + x_low]; // T v4 = input[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; } template <typename T> __global__ void ps_roi_align_backward_kernel_impl( int nthreads, const T* grad_output, const int* channel_mapping, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, int sampling_ratio, int channels_out, T* grad_input, const T* rois, const int memory_span) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, *, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels_out; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); // Force too small ROIs to be 1x1 T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int c_in = channel_mapping[index]; // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; const T grad_output_this_bin = grad_output[index]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; const int offset = (roi_batch_ind * channels + c_in) * height * width; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = grad_output_this_bin * w1 / count; T g2 = grad_output_this_bin * w2 / count; T g3 = grad_output_this_bin * w3 / count; T g4 = grad_output_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { at::native::fastAtomicAdd( grad_input, offset + y_low * width + x_low, memory_span, static_cast<T>(g1), true); at::native::fastAtomicAdd( grad_input, offset + y_low * width + x_high, memory_span, static_cast<T>(g2), true); at::native::fastAtomicAdd( grad_input, offset + y_high * width + x_low, memory_span, static_cast<T>(g3), true); at::native::fastAtomicAdd( grad_input, offset + y_high * width + x_high, memory_span, static_cast<T>(g4), true); } // if } // ix } // iy } } std::tuple<at::Tensor, at::Tensor> ps_roi_align_forward_kernel( const at::Tensor& input, const at::Tensor& rois, double spatial_scale, int64_t pooled_height, int64_t pooled_width, int64_t sampling_ratio) { // Check if input tensors are CUDA tensors TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "ps_roi_align_forward_kernel"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); TORCH_CHECK( channels % (pooled_height * pooled_width) == 0, "input channels must be a multiple of pooling height * pooling width"); int channels_out = channels / (pooled_height * pooled_width); auto output = at::zeros( {num_rois, channels_out, pooled_height, pooled_width}, input.options()); auto channel_mapping = at::zeros(output.sizes(), input.options().dtype(at::kInt)); auto output_size = output.numel(); if (output_size == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(output, channel_mapping); } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "ps_roi_align_forward_kernel", [&] { hipLaunchKernelGGL(( ps_roi_align_forward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois_.data_ptr<scalar_t>(), channels_out, output.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>()); }); AT_CUDA_CHECK(hipGetLastError()); hipDeviceSynchronize(); return std::make_tuple(output, channel_mapping); } at::Tensor ps_roi_align_backward_kernel( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& channel_mapping, double spatial_scale, int64_t pooled_height, int64_t pooled_width, int64_t sampling_ratio, int64_t batch_size, int64_t channels, int64_t height, int64_t width) { // Check if input tensors are CUDA tensors TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, channel_mapping_t{channel_mapping, "channel_mapping", 3}; at::CheckedFrom c = "ps_roi_align_backward_kernel"; at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device()); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_input; } int channels_out = channels / (pooled_height * pooled_width); at::globalContext().alertNotDeterministic("ps_roi_align_backward_kernel"); auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "ps_roi_align_backward_kernel", [&] { hipLaunchKernelGGL(( ps_roi_align_backward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad_.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, channels_out, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>(), grad_input.numel()); }); AT_CUDA_CHECK(hipGetLastError()); return grad_input; } } // namespace TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { m.impl( TORCH_SELECTIVE_NAME("torchvision::ps_roi_align"), TORCH_FN(ps_roi_align_forward_kernel)); m.impl( TORCH_SELECTIVE_NAME("torchvision::_ps_roi_align_backward"), TORCH_FN(ps_roi_align_backward_kernel)); } } // namespace ops } // namespace vision
8b4dad22ccdd2a8676b79f5204b580964756438b.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <torch/library.h> #include <ATen/native/cuda/KernelUtils.cuh> #include "cuda_helpers.h" namespace vision { namespace ops { namespace { template <typename T> __device__ T bilinear_interpolate( const T* input, int height, int width, T y, T x, int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = input[y_low * width + x_low]; T v2 = input[y_low * width + x_high]; T v3 = input[y_high * width + x_low]; T v4 = input[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void ps_roi_align_forward_kernel_impl( int nthreads, const T* input, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, int sampling_ratio, const T* rois, int channels_out, T* output, int* channel_mapping) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c_out, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c_out = (index / pooled_width / pooled_height) % channels_out; int n = index / pooled_width / pooled_height / channels_out; // (n, c_in, ph, pw) is the associated element in the input int c_in = (c_out * pooled_height + ph) * pooled_width + pw; // [start, end) interval for spatial sampling const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; const T* offset_input = input + (roi_batch_ind * channels + c_in) * height * width; T out_sum = 0; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_input, height, width, y, x, index); out_sum += val; } } out_sum /= count; output[index] = out_sum; channel_mapping[index] = c_in; } } template <typename T> __device__ void bilinear_interpolate_gradient( int height, int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high, int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = input[y_low * width + x_low]; // T v2 = input[y_low * width + x_high]; // T v3 = input[y_high * width + x_low]; // T v4 = input[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; } template <typename T> __global__ void ps_roi_align_backward_kernel_impl( int nthreads, const T* grad_output, const int* channel_mapping, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, int sampling_ratio, int channels_out, T* grad_input, const T* rois, const int memory_span) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, *, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels_out; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); // Force too small ROIs to be 1x1 T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int c_in = channel_mapping[index]; // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; const T grad_output_this_bin = grad_output[index]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; const int offset = (roi_batch_ind * channels + c_in) * height * width; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = grad_output_this_bin * w1 / count; T g2 = grad_output_this_bin * w2 / count; T g3 = grad_output_this_bin * w3 / count; T g4 = grad_output_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { at::native::fastAtomicAdd( grad_input, offset + y_low * width + x_low, memory_span, static_cast<T>(g1), true); at::native::fastAtomicAdd( grad_input, offset + y_low * width + x_high, memory_span, static_cast<T>(g2), true); at::native::fastAtomicAdd( grad_input, offset + y_high * width + x_low, memory_span, static_cast<T>(g3), true); at::native::fastAtomicAdd( grad_input, offset + y_high * width + x_high, memory_span, static_cast<T>(g4), true); } // if } // ix } // iy } } std::tuple<at::Tensor, at::Tensor> ps_roi_align_forward_kernel( const at::Tensor& input, const at::Tensor& rois, double spatial_scale, int64_t pooled_height, int64_t pooled_width, int64_t sampling_ratio) { // Check if input tensors are CUDA tensors TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "ps_roi_align_forward_kernel"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::cuda::CUDAGuard device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); TORCH_CHECK( channels % (pooled_height * pooled_width) == 0, "input channels must be a multiple of pooling height * pooling width"); int channels_out = channels / (pooled_height * pooled_width); auto output = at::zeros( {num_rois, channels_out, pooled_height, pooled_width}, input.options()); auto channel_mapping = at::zeros(output.sizes(), input.options().dtype(at::kInt)); auto output_size = output.numel(); if (output_size == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(output, channel_mapping); } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "ps_roi_align_forward_kernel", [&] { ps_roi_align_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois_.data_ptr<scalar_t>(), channels_out, output.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>()); }); AT_CUDA_CHECK(cudaGetLastError()); cudaDeviceSynchronize(); return std::make_tuple(output, channel_mapping); } at::Tensor ps_roi_align_backward_kernel( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& channel_mapping, double spatial_scale, int64_t pooled_height, int64_t pooled_width, int64_t sampling_ratio, int64_t batch_size, int64_t channels, int64_t height, int64_t width) { // Check if input tensors are CUDA tensors TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, channel_mapping_t{channel_mapping, "channel_mapping", 3}; at::CheckedFrom c = "ps_roi_align_backward_kernel"; at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::cuda::CUDAGuard device_guard(grad.device()); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_input; } int channels_out = channels / (pooled_height * pooled_width); at::globalContext().alertNotDeterministic("ps_roi_align_backward_kernel"); auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "ps_roi_align_backward_kernel", [&] { ps_roi_align_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad_.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, channels_out, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>(), grad_input.numel()); }); AT_CUDA_CHECK(cudaGetLastError()); return grad_input; } } // namespace TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { m.impl( TORCH_SELECTIVE_NAME("torchvision::ps_roi_align"), TORCH_FN(ps_roi_align_forward_kernel)); m.impl( TORCH_SELECTIVE_NAME("torchvision::_ps_roi_align_backward"), TORCH_FN(ps_roi_align_backward_kernel)); } } // namespace ops } // namespace vision
411f809015b72c96e3dbfed0309971bd7beeb522.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "compress_impl.h" #include <thrust/scan.h> #include <thrust/execution_policy.h> namespace onnxruntime { namespace cuda { void PrefixSumImpl(const int8_t* condition_data, int32_t* condition_cumulative_sum, const size_t length) { thrust::inclusive_scan(thrust::device, condition_data, condition_data + length, condition_cumulative_sum); } template <typename T> __global__ void _CompressKernel(const int32_t valid_condition_length, const fast_divmod axis_right_stride_div, const fast_divmod input_axis_included_stride_div, const int32_t output_axis_included_stride, const int32_t* condition_cumulative_sum, const bool* condition_data, const T* input_data, T* output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG output_index = 0; int div, mod; input_axis_included_stride_div.divmod(id, div, mod); output_index = output_axis_included_stride * div; axis_right_stride_div.divmod(mod, div, mod); if (div < valid_condition_length && condition_data[div]) { output_index += (condition_cumulative_sum[div] - 1) * axis_right_stride_div.d_ + mod; output_data[output_index] = input_data[id]; } } Status CompressImpl(const size_t element_bytes, const int32_t valid_condition_length, const int32_t axis_right_stride, const int32_t input_axis_dim_length, const int32_t output_axis_dim_length, const int32_t* condition_cumulative_sum, const bool* condition_data, const void* input_data, void* output_data, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); fast_divmod axis_right_stride_div(axis_right_stride); fast_divmod input_axis_included_stride_div(axis_right_stride * input_axis_dim_length); int output_axis_included_stride = axis_right_stride * output_axis_dim_length; switch (element_bytes) { case sizeof(int8_t): hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int16_t): hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int32_t): hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int64_t): hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data), (CUDA_LONG)N); break; default: return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Compress operator"); } return Status::OK(); } } // namespace cuda } // namespace onnxruntime
411f809015b72c96e3dbfed0309971bd7beeb522.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "compress_impl.h" #include <thrust/scan.h> #include <thrust/execution_policy.h> namespace onnxruntime { namespace cuda { void PrefixSumImpl(const int8_t* condition_data, int32_t* condition_cumulative_sum, const size_t length) { thrust::inclusive_scan(thrust::device, condition_data, condition_data + length, condition_cumulative_sum); } template <typename T> __global__ void _CompressKernel(const int32_t valid_condition_length, const fast_divmod axis_right_stride_div, const fast_divmod input_axis_included_stride_div, const int32_t output_axis_included_stride, const int32_t* condition_cumulative_sum, const bool* condition_data, const T* input_data, T* output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG output_index = 0; int div, mod; input_axis_included_stride_div.divmod(id, div, mod); output_index = output_axis_included_stride * div; axis_right_stride_div.divmod(mod, div, mod); if (div < valid_condition_length && condition_data[div]) { output_index += (condition_cumulative_sum[div] - 1) * axis_right_stride_div.d_ + mod; output_data[output_index] = input_data[id]; } } Status CompressImpl(const size_t element_bytes, const int32_t valid_condition_length, const int32_t axis_right_stride, const int32_t input_axis_dim_length, const int32_t output_axis_dim_length, const int32_t* condition_cumulative_sum, const bool* condition_data, const void* input_data, void* output_data, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); fast_divmod axis_right_stride_div(axis_right_stride); fast_divmod input_axis_included_stride_div(axis_right_stride * input_axis_dim_length); int output_axis_included_stride = axis_right_stride * output_axis_dim_length; switch (element_bytes) { case sizeof(int8_t): _CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int16_t): _CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int32_t): _CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int64_t): _CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data), (CUDA_LONG)N); break; default: return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Compress operator"); } return Status::OK(); } } // namespace cuda } // namespace onnxruntime
9c304dd191d71266a67c1e17c145dfc9866ccbea.hip
// !!! This is a file automatically generated by hipify!!! /** * syr2k.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "../../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size */ #define N 2048 #define M 2048 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 12435 #define BETA 4546 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *A_gpu, DATA_TYPE *B_gpu, DATA_TYPE *C_gpu) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i*N + j] = ((DATA_TYPE) i*j + 2) / N; C_gpu[i*N + j] = ((DATA_TYPE) i*j + 2) / N; } for (j = 0; j < M; j++) { A[i*N + j] = ((DATA_TYPE) i*j) / N; B[i*N + j] = ((DATA_TYPE) i*j + 1) / N; A_gpu[i*N + j] = ((DATA_TYPE) i*j) / N; B_gpu[i*N + j] = ((DATA_TYPE) i*j + 1) / N; } } } void syr2k(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i*N + j] *= BETA; } } for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < M; k++) { C[i*N + j] += ALPHA * A[i*M + k] * B[j*M + k]; C[i*N + j] += ALPHA * B[i*M + k] * A[j*M + k]; } } } } void compareResults(DATA_TYPE *C, DATA_TYPE *C_outputFromGpu) { int i,j,fail; fail = 0; // Compare C with D for (i=0; i<N; i++) { for (j=0; j<N; j++) { if (percentDiff(C[i*N + j], C_outputFromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void syr2k_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < N) && (j < N)) { c[i * N + j] *= BETA; int k; for(k = 0; k < M; k++) { c[i * N + j] += ALPHA * a[i * M + k] * b[j * M + k] + ALPHA * b[i * M + k] * a[j * M + k]; } } } void syr2kCuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu) { double t_start, t_end; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((size_t)ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_X) ), (size_t)(ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_Y) ))); t_start = rtclock(); hipLaunchKernelGGL(( syr2k_kernel), dim3(grid),dim3(block), 0, 0, A_gpu,B_gpu,C_gpu); hipDeviceSynchronize(); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); } int main() { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* C; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; A = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE)); C = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE)); hipMallocManaged(&A_gpu, sizeof(DATA_TYPE) * N * M); hipMallocManaged(&B_gpu, sizeof(DATA_TYPE) * N * M); hipMallocManaged(&C_gpu, sizeof(DATA_TYPE) * N * N); init_arrays(A, B, C,A_gpu, B_gpu, C_gpu ); GPU_argv_init(); syr2kCuda(A_gpu, B_gpu, C_gpu); t_start = rtclock(); syr2k(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(C, C_gpu); free(A); free(B); free(C); hipFree(A_gpu); hipFree(B_gpu); hipFree(C_gpu); return 0; }
9c304dd191d71266a67c1e17c145dfc9866ccbea.cu
/** * syr2k.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <cuda.h> #include "../../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size */ #define N 2048 #define M 2048 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 12435 #define BETA 4546 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *A_gpu, DATA_TYPE *B_gpu, DATA_TYPE *C_gpu) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i*N + j] = ((DATA_TYPE) i*j + 2) / N; C_gpu[i*N + j] = ((DATA_TYPE) i*j + 2) / N; } for (j = 0; j < M; j++) { A[i*N + j] = ((DATA_TYPE) i*j) / N; B[i*N + j] = ((DATA_TYPE) i*j + 1) / N; A_gpu[i*N + j] = ((DATA_TYPE) i*j) / N; B_gpu[i*N + j] = ((DATA_TYPE) i*j + 1) / N; } } } void syr2k(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i*N + j] *= BETA; } } for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < M; k++) { C[i*N + j] += ALPHA * A[i*M + k] * B[j*M + k]; C[i*N + j] += ALPHA * B[i*M + k] * A[j*M + k]; } } } } void compareResults(DATA_TYPE *C, DATA_TYPE *C_outputFromGpu) { int i,j,fail; fail = 0; // Compare C with D for (i=0; i<N; i++) { for (j=0; j<N; j++) { if (percentDiff(C[i*N + j], C_outputFromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void syr2k_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < N) && (j < N)) { c[i * N + j] *= BETA; int k; for(k = 0; k < M; k++) { c[i * N + j] += ALPHA * a[i * M + k] * b[j * M + k] + ALPHA * b[i * M + k] * a[j * M + k]; } } } void syr2kCuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu) { double t_start, t_end; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((size_t)ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_X) ), (size_t)(ceil( ((float)N) / ((float)DIM_THREAD_BLOCK_Y) ))); t_start = rtclock(); syr2k_kernel<<<grid,block>>>(A_gpu,B_gpu,C_gpu); cudaDeviceSynchronize(); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); } int main() { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* C; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; A = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE)); C = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE)); cudaMallocManaged(&A_gpu, sizeof(DATA_TYPE) * N * M); cudaMallocManaged(&B_gpu, sizeof(DATA_TYPE) * N * M); cudaMallocManaged(&C_gpu, sizeof(DATA_TYPE) * N * N); init_arrays(A, B, C,A_gpu, B_gpu, C_gpu ); GPU_argv_init(); syr2kCuda(A_gpu, B_gpu, C_gpu); t_start = rtclock(); syr2k(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(C, C_gpu); free(A); free(B); free(C); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu); return 0; }
26428c91c885c000487d8ec372db7151df57ac8f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #define Width 32 // size of Width x Width matrix #define TILE_WIDTH 16 __global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols) { int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; // Pvalue is used to store the element of the output matrix // that is computed by the thread float Pvalue = 0; for (int k = 0; k < ncols; ++k) { float Melement = Md[row*ncols+k]; float Nelement = Nd[k*ncols+col]; Pvalue += Melement * Nelement; } Pd[row*ncols+col] = Pvalue; } int main(int argc, char *argv[]) { int i,j; int size = Width * Width * sizeof(float); float M[Width][Width], N[Width][Width],P[Width][Width]; float *Md, *Nd, *Pd; for(i=0;i<Width;i++) { for(j=0; j<Width;j++) { M[i][j] = 1; N[i][j]= 2; } } hipMalloc( (void**)&Md, size); hipMalloc( (void**)&Nd, size); hipMalloc( (void**)&Pd, size); hipMemcpy( Md, M, size, hipMemcpyHostToDevice); hipMemcpy( Nd, N, size, hipMemcpyHostToDevice); // Setup the execution configuration dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(Width/TILE_WIDTH,Width/TILE_WIDTH); // Launch the device computation threads! hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, Width); // Read P from the device hipMemcpy(P, Pd, size, hipMemcpyDeviceToHost); // Free device matrices hipFree(Md); hipFree(Nd); hipFree(Pd); for(i=0;i<Width;i++) { for (j=0;j<Width;j++) { printf("%.2f ",P[i][j]); } printf("\n"); } }
26428c91c885c000487d8ec372db7151df57ac8f.cu
#include<stdio.h> #define Width 32 // size of Width x Width matrix #define TILE_WIDTH 16 __global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols) { int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; // Pvalue is used to store the element of the output matrix // that is computed by the thread float Pvalue = 0; for (int k = 0; k < ncols; ++k) { float Melement = Md[row*ncols+k]; float Nelement = Nd[k*ncols+col]; Pvalue += Melement * Nelement; } Pd[row*ncols+col] = Pvalue; } int main(int argc, char *argv[]) { int i,j; int size = Width * Width * sizeof(float); float M[Width][Width], N[Width][Width],P[Width][Width]; float *Md, *Nd, *Pd; for(i=0;i<Width;i++) { for(j=0; j<Width;j++) { M[i][j] = 1; N[i][j]= 2; } } cudaMalloc( (void**)&Md, size); cudaMalloc( (void**)&Nd, size); cudaMalloc( (void**)&Pd, size); cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice); cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice); // Setup the execution configuration dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(Width/TILE_WIDTH,Width/TILE_WIDTH); // Launch the device computation threads! MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, Width); // Read P from the device cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost); // Free device matrices cudaFree(Md); cudaFree(Nd); cudaFree(Pd); for(i=0;i<Width;i++) { for (j=0;j<Width;j++) { printf("%.2f ",P[i][j]); } printf("\n"); } }
ab7659606724e05417e053359c64819065d14a93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "layers/cross_entropy_loss.cu" #include "basics/tensor.cu" #include <vector> #include <assert.h> __global__ void initial_bottoms(Tensor<float>* bottoms_0, Tensor<float>* bottoms_1) { const size_t* dims = bottoms_0->GetDims(); printf("(%d, %d)\n", int(dims[0]), int(dims[3])); for (int i = 0; i < int(dims[0]); ++i) { for (int j = 0; j < int(dims[3]); ++j) { bottoms_0->at(i,0,0,j) = (float) (i + j + 1) / (int(dims[0]) + int(dims[3])+2); printf("(%d, %d): %f\n", i, j, bottoms_0->at(i,0,0,j)); } } bottoms_1->at(0,0,0,0) = 1; bottoms_1->at(1,0,0,0) = 2; } __global__ void show_tops(Tensor<float>* tops) { printf("Printing tops data\n"); printf("(%d, %d): %f\n", 0, 0, tops->at(0,0,0,0)); } __global__ void show_bottom_diff(Tensor<float>* bottom_diff) { printf("Printing bottom diff\n"); const size_t* dims = bottom_diff->GetDims(); for (int i = 0; i < int(dims[0]); ++i) { for (int j = 0; j < int(dims[3]); ++j) { printf("(%d, %d): %f\n", i, j, bottom_diff->at(i,0,0,j)); } } } void test_cross_entropy_loss_cpu() { printf("Begin test cross-entropy loss layer CPU\n"); int b = 2; int c = 3; Session* session = Session::GetNewSession(); session->gpu = false; session->batch_size = b; size_t dims[4] = {b, 1, 1, c}; std::vector<Tensor<float>*> bottoms; bottoms.push_back(Tensor<float>::CreateTensorCPU(dims)); printf("(%d, %d)\n", bottoms[0]->GetDims()[0], bottoms[0]->GetDims()[3]); for (size_t i = 0; i < dims[0]; ++i) { for (size_t j = 0; j < dims[3]; ++j) { bottoms[0]->at(i,0,0,j) = (float) (i + j + 1) / (dims[0] + dims[3]+2); printf("(%d, %d): %f\n", i, j, bottoms[0]->at(i,0,0,j)); } } size_t dims_l[4] = {b, 1, 1, 1}; bottoms.push_back(Tensor<float>::CreateTensorCPU(dims_l)); bottoms[1]->at(0,0,0,0) = 1; bottoms[1]->at(1,0,0,0) = 2; size_t dims_t[4] = {1, 1, 1, 1}; std::vector<Tensor<float>*> tops; tops.push_back(Tensor<float>::CreateTensorCPU(dims_t)); CrossEntropyLoss<float> cross_entropy_loss_layer; cross_entropy_loss_layer.Forward(bottoms, tops); printf("Printing tops data\n"); for (size_t i = 0; i < dims_t[0]; ++i) { for (size_t j = 0; j < dims_t[3]; ++j) { printf("(%d, %d): %f\n", i, j, tops[0]->at(i,0,0,j)); } } std::vector<Tensor<float>*> bottoms_diff; bottoms_diff.push_back(Tensor<float>::CreateTensorCPU(dims)); bottoms_diff.push_back(Tensor<float>::CreateTensorCPU(dims_l)); cross_entropy_loss_layer.Backward(tops, tops, bottoms, bottoms_diff); printf("Printing bottoms diff\n"); for (size_t i = 0; i < dims[0]; ++i) { for (size_t j = 0; j < dims[3]; ++j) { printf("(%d, %d): %f\n", i, j, bottoms_diff[0]->at(i,0,0,j)); } } delete bottoms[0], bottoms[1], tops[0], bottoms_diff[0], bottoms_diff[1]; } void test_cross_entropy_loss_gpu() { printf("Begin test cross entropy loss layer GPU\n"); int b = 2; int c = 3; Session* session = Session::GetNewSession(); session->gpu = true; session->batch_size = b; hipError_t cudaStatus = hipSetDevice(0); checkCudaErrors(cudaStatus); size_t dims[4] = {b, 1, 1, c}; std::vector<Tensor<float>*> bottoms; bottoms.push_back(Tensor<float>::CreateTensorGPU(dims)); size_t dims_l[4] = {b, 1, 1, 1}; bottoms.push_back(Tensor<float>::CreateTensorGPU(dims_l)); hipLaunchKernelGGL(( initial_bottoms), dim3(1),dim3(1), 0, 0, bottoms[0], bottoms[1]); size_t dims_t[4] = {1, 1, 1, 1}; std::vector<Tensor<float>*> tops; tops.push_back(Tensor<float>::CreateTensorGPU(dims_t)); CrossEntropyLoss<float> cross_entropy_loss_layer; cross_entropy_loss_layer.Forward(bottoms, tops); printf("Done GPU forward.\n"); hipLaunchKernelGGL(( show_tops), dim3(1),dim3(1), 0, 0, tops[0]); std::vector<Tensor<float>*> bottoms_diff; bottoms_diff.push_back(Tensor<float>::CreateTensorGPU(dims)); bottoms_diff.push_back(Tensor<float>::CreateTensorGPU(dims_l)); cross_entropy_loss_layer.Backward(tops, tops, bottoms, bottoms_diff); printf("Done GPU backward.\n"); hipLaunchKernelGGL(( show_bottom_diff), dim3(1),dim3(1), 0, 0, bottoms_diff[0]); hipFree(bottoms[0]); hipFree(bottoms[1]); hipFree(tops[0]); hipFree(bottoms_diff[0]); hipFree(bottoms_diff[1]); } int main() { test_cross_entropy_loss_cpu(); test_cross_entropy_loss_gpu(); }
ab7659606724e05417e053359c64819065d14a93.cu
#include "layers/cross_entropy_loss.cu" #include "basics/tensor.cu" #include <vector> #include <assert.h> __global__ void initial_bottoms(Tensor<float>* bottoms_0, Tensor<float>* bottoms_1) { const size_t* dims = bottoms_0->GetDims(); printf("(%d, %d)\n", int(dims[0]), int(dims[3])); for (int i = 0; i < int(dims[0]); ++i) { for (int j = 0; j < int(dims[3]); ++j) { bottoms_0->at(i,0,0,j) = (float) (i + j + 1) / (int(dims[0]) + int(dims[3])+2); printf("(%d, %d): %f\n", i, j, bottoms_0->at(i,0,0,j)); } } bottoms_1->at(0,0,0,0) = 1; bottoms_1->at(1,0,0,0) = 2; } __global__ void show_tops(Tensor<float>* tops) { printf("Printing tops data\n"); printf("(%d, %d): %f\n", 0, 0, tops->at(0,0,0,0)); } __global__ void show_bottom_diff(Tensor<float>* bottom_diff) { printf("Printing bottom diff\n"); const size_t* dims = bottom_diff->GetDims(); for (int i = 0; i < int(dims[0]); ++i) { for (int j = 0; j < int(dims[3]); ++j) { printf("(%d, %d): %f\n", i, j, bottom_diff->at(i,0,0,j)); } } } void test_cross_entropy_loss_cpu() { printf("Begin test cross-entropy loss layer CPU\n"); int b = 2; int c = 3; Session* session = Session::GetNewSession(); session->gpu = false; session->batch_size = b; size_t dims[4] = {b, 1, 1, c}; std::vector<Tensor<float>*> bottoms; bottoms.push_back(Tensor<float>::CreateTensorCPU(dims)); printf("(%d, %d)\n", bottoms[0]->GetDims()[0], bottoms[0]->GetDims()[3]); for (size_t i = 0; i < dims[0]; ++i) { for (size_t j = 0; j < dims[3]; ++j) { bottoms[0]->at(i,0,0,j) = (float) (i + j + 1) / (dims[0] + dims[3]+2); printf("(%d, %d): %f\n", i, j, bottoms[0]->at(i,0,0,j)); } } size_t dims_l[4] = {b, 1, 1, 1}; bottoms.push_back(Tensor<float>::CreateTensorCPU(dims_l)); bottoms[1]->at(0,0,0,0) = 1; bottoms[1]->at(1,0,0,0) = 2; size_t dims_t[4] = {1, 1, 1, 1}; std::vector<Tensor<float>*> tops; tops.push_back(Tensor<float>::CreateTensorCPU(dims_t)); CrossEntropyLoss<float> cross_entropy_loss_layer; cross_entropy_loss_layer.Forward(bottoms, tops); printf("Printing tops data\n"); for (size_t i = 0; i < dims_t[0]; ++i) { for (size_t j = 0; j < dims_t[3]; ++j) { printf("(%d, %d): %f\n", i, j, tops[0]->at(i,0,0,j)); } } std::vector<Tensor<float>*> bottoms_diff; bottoms_diff.push_back(Tensor<float>::CreateTensorCPU(dims)); bottoms_diff.push_back(Tensor<float>::CreateTensorCPU(dims_l)); cross_entropy_loss_layer.Backward(tops, tops, bottoms, bottoms_diff); printf("Printing bottoms diff\n"); for (size_t i = 0; i < dims[0]; ++i) { for (size_t j = 0; j < dims[3]; ++j) { printf("(%d, %d): %f\n", i, j, bottoms_diff[0]->at(i,0,0,j)); } } delete bottoms[0], bottoms[1], tops[0], bottoms_diff[0], bottoms_diff[1]; } void test_cross_entropy_loss_gpu() { printf("Begin test cross entropy loss layer GPU\n"); int b = 2; int c = 3; Session* session = Session::GetNewSession(); session->gpu = true; session->batch_size = b; cudaError_t cudaStatus = cudaSetDevice(0); checkCudaErrors(cudaStatus); size_t dims[4] = {b, 1, 1, c}; std::vector<Tensor<float>*> bottoms; bottoms.push_back(Tensor<float>::CreateTensorGPU(dims)); size_t dims_l[4] = {b, 1, 1, 1}; bottoms.push_back(Tensor<float>::CreateTensorGPU(dims_l)); initial_bottoms<<<1,1>>>(bottoms[0], bottoms[1]); size_t dims_t[4] = {1, 1, 1, 1}; std::vector<Tensor<float>*> tops; tops.push_back(Tensor<float>::CreateTensorGPU(dims_t)); CrossEntropyLoss<float> cross_entropy_loss_layer; cross_entropy_loss_layer.Forward(bottoms, tops); printf("Done GPU forward.\n"); show_tops<<<1,1>>>(tops[0]); std::vector<Tensor<float>*> bottoms_diff; bottoms_diff.push_back(Tensor<float>::CreateTensorGPU(dims)); bottoms_diff.push_back(Tensor<float>::CreateTensorGPU(dims_l)); cross_entropy_loss_layer.Backward(tops, tops, bottoms, bottoms_diff); printf("Done GPU backward.\n"); show_bottom_diff<<<1,1>>>(bottoms_diff[0]); cudaFree(bottoms[0]); cudaFree(bottoms[1]); cudaFree(tops[0]); cudaFree(bottoms_diff[0]); cudaFree(bottoms_diff[1]); } int main() { test_cross_entropy_loss_cpu(); test_cross_entropy_loss_gpu(); }
bcb66db70f34b7fe5e680eb5552aa6791a12ef00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ /** * Performs a slice operation where the input matrix is sparse and the output matrix is dense. * This function avoids unnecessary sparse to dense conversion of the input matrix. * Parallelization: rows of output matrix. * * @params inVal input val pointer * @params inRowPtr input row pointer * @params colInd input col index pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param retClen number of columns of output matrix */ extern "C" /** * Performs a slice operation where the input matrix is sparse and the output matrix is dense. * This function avoids unnecessary sparse to dense conversion of the input matrix. * Parallelization: subset of number of non-zeroes of input matrix. * * @params inVal input val pointer * @params inRowPtr input row pointer * @params colInd input col index pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param retClen number of columns of output matrix */ extern "C" /** * Performs a slice operation where the input matrix is dense and the output matrix is dense. * * @params in dense input pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param inClen number of columns of input matrix * @param retRlen number of rows of output matrix * @param retClen number of columns of output matrix */ extern "C" /** * Does a copy of upper to lower triangle of the given matrix * @param ret the input and output array allocated on the GPU * @param dim the number of rows of the square matrix ret * @param N total number of elements of the matrix */ extern "C" extern "C" __global__ void matrix_floor(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = floor(A[index]); } }
bcb66db70f34b7fe5e680eb5552aa6791a12ef00.cu
#include "includes.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ /** * Performs a slice operation where the input matrix is sparse and the output matrix is dense. * This function avoids unnecessary sparse to dense conversion of the input matrix. * Parallelization: rows of output matrix. * * @params inVal input val pointer * @params inRowPtr input row pointer * @params colInd input col index pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param retClen number of columns of output matrix */ extern "C" /** * Performs a slice operation where the input matrix is sparse and the output matrix is dense. * This function avoids unnecessary sparse to dense conversion of the input matrix. * Parallelization: subset of number of non-zeroes of input matrix. * * @params inVal input val pointer * @params inRowPtr input row pointer * @params colInd input col index pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param retClen number of columns of output matrix */ extern "C" /** * Performs a slice operation where the input matrix is dense and the output matrix is dense. * * @params in dense input pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param inClen number of columns of input matrix * @param retRlen number of rows of output matrix * @param retClen number of columns of output matrix */ extern "C" /** * Does a copy of upper to lower triangle of the given matrix * @param ret the input and output array allocated on the GPU * @param dim the number of rows of the square matrix ret * @param N total number of elements of the matrix */ extern "C" extern "C" __global__ void matrix_floor(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = floor(A[index]); } }
719885a19acbc7989ab0b610a758a56f3eb6232b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CUDA_runtime.h" #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include <math.h> typedef short WORD; typedef int DWORD; typedef int LONG; // sbox used in host __constant__ static unsigned char box[256] = { // 0 1 2 3 4 5 6 7 8 9 a b c d e f 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, // 0 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, // 1 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, // 2 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, // 3 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, // 4 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, // 5 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, // 6 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, // 7 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, // 8 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, // 9 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, // a 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, // b 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, // c 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, // d 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, // e 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };// f // Round Keys __constant__ static unsigned char rcon[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; // sbox used in device __device__ static unsigned char s_box[256] = { // 0 1 2 3 4 5 6 7 8 9 a b c d e f 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, // 0 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, // 1 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, // 2 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, // 3 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, // 4 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, // 5 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, // 6 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, // 7 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, // 8 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, // 9 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, // a 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, // b 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, // c 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, // d 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, // e 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };// f // inversed sbox used in device __device__ static unsigned char inv_s_box[256] = { // 0 1 2 3 4 5 6 7 8 9 a b c d e f 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, // 0 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, // 1 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, // 2 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, // 3 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, // 4 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, // 5 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, // 6 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, // 7 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, // 8 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, // 9 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, // a 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, // b 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, // c 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, // d 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, // e 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d };// f __device__ const int Nr = 10; // numbers of rounds __device__ const int Nk = 4; // numbers of columns in a key __device__ const int Nb = Nk * 4; // key size __device__ void shift_rows(unsigned char* state) { unsigned char i, k, s, col; for (i = 1; i < 4; i++) { s = 0; while (s < i) { col = state[Nk * i + 0]; for (k = 1; k < Nk; k++) { state[Nk * i + k - 1] = state[Nk * i + k]; } state[Nk * i + Nk - 1] = col; s++; } } } __device__ void inv_shift_rows(unsigned char* state) { unsigned char i, k, s, col; for (i = 1; i < 4; i++) { s = 0; while (s < i) { col = state[Nk * i + Nk - 1]; for (k = Nk - 1; k > 0; k--) { state[Nk * i + k] = state[Nk * i + k - 1]; } state[Nk * i + 0] = col; s++; } } } __device__ unsigned char gmult(unsigned char a, unsigned char b) { unsigned char p = 0, i = 0, hbs = 0; for (i = 0; i < 8; i++) { if (b & 1) { p ^= a; } hbs = a & 0x80; a <<= 1; if (hbs) a ^= 0x1b; b >>= 1; } return (unsigned char)p; } __device__ void coef_mult(unsigned char* a, unsigned char* b, unsigned char* d) { d[0] = gmult(a[0], b[0]) ^ gmult(a[3], b[1]) ^ gmult(a[2], b[2]) ^ gmult(a[1], b[3]); d[1] = gmult(a[1], b[0]) ^ gmult(a[0], b[1]) ^ gmult(a[3], b[2]) ^ gmult(a[2], b[3]); d[2] = gmult(a[2], b[0]) ^ gmult(a[1], b[1]) ^ gmult(a[0], b[2]) ^ gmult(a[3], b[3]); d[3] = gmult(a[3], b[0]) ^ gmult(a[2], b[1]) ^ gmult(a[1], b[2]) ^ gmult(a[0], b[3]); } __device__ void mix_columns(unsigned char* state) { unsigned char a[] = { 0x02, 0x01, 0x01, 0x03 }; unsigned char i, j, col[4], res[4]; for (j = 0; j < Nk; j++) { for (i = 0; i < 4; i++) { col[i] = state[Nk * i + j]; } coef_mult(a, col, res); for (i = 0; i < 4; i++) { state[Nk * i + j] = res[i]; } } } __device__ void inv_mix_columns(unsigned char* state) { unsigned char a[] = { 0x0e, 0x09, 0x0d, 0x0b }; unsigned char i, j, col[4], res[4]; for (j = 0; j < Nk; j++) { for (i = 0; i < 4; i++) { col[i] = state[Nk * i + j]; } coef_mult(a, col, res); for (i = 0; i < 4; i++) { state[Nk * i + j] = res[i]; } } } // expand original key so to use it in AddRoundKey stage - key_xor() void key_expansion(unsigned char* key, unsigned char* w) { unsigned char r, i, j, k, col[4]; // first round key is just the key for (j = 0; j < Nk; j++) { for (i = 0; i < 4; i++) { w[Nk * i + j] = key[Nk * i + j]; } } for (r = 1; r < Nr + 1; r++) { for (j = 0; j < Nk; j++) { for (i = 0; i < 4; i++) { if (j % Nk != 0) { col[i] = w[r * Nb + Nk * i + j - 1]; } else { col[i] = w[(r - 1) * Nb + Nk * i + Nk - 1]; } } if (j % Nk == 0) { // rotate 4 bytes in word k = col[0]; col[0] = col[1]; col[1] = col[2]; col[2] = col[3]; col[3] = k; col[0] = box[col[0]]; col[1] = box[col[1]]; col[2] = box[col[2]]; col[3] = box[col[3]]; col[0] = col[0] ^ rcon[r - 1]; } w[r * Nb + Nk * 0 + j] = w[(r - 1) * Nb + Nk * 0 + j] ^ col[0]; w[r * Nb + Nk * 1 + j] = w[(r - 1) * Nb + Nk * 1 + j] ^ col[1]; w[r * Nb + Nk * 2 + j] = w[(r - 1) * Nb + Nk * 2 + j] ^ col[2]; w[r * Nb + Nk * 3 + j] = w[(r - 1) * Nb + Nk * 3 + j] ^ col[3]; } } } __device__ void key_xor(unsigned char* state, unsigned char* key) { unsigned char i; for (i = 0; i < Nb; i++) { state[i] = state[i] ^ key[i]; } } #pragma pack(push, 1) typedef struct tagBITMAPFILEHEADER { WORD bfType; // specifies the file type DWORD bfSize; // specifies the size in bytes of the bitmap file WORD bfReserved1; // reserved; must be 0 WORD bfReserved2; // reserved; must be 0 DWORD bOffBits; // species the offset in bytes from the bitmapfileheader to the bitmap bits }BITMAPFILEHEADER; #pragma pack(pop) #pragma pack(push, 1) typedef struct tagBITMAPINFOHEADER { DWORD biSize; // specifies the number of bytes required by the struct LONG biWidth; // specifies width in pixels LONG biHeight; // species height in pixels WORD biPlanes; // specifies the number of color planes, must be 1 WORD biBitCount; // specifies the number of bit per pixel DWORD biCompression;// spcifies the type of compression DWORD biSizeImage; // size of image in bytes LONG biXPelsPerMeter; // number of pixels per meter in x axis LONG biYPelsPerMeter; // number of pixels per meter in y axis DWORD biClrUsed; // number of colors used by th ebitmap DWORD biClrImportant; // number of colors that are important }BITMAPINFOHEADER; #pragma pack(pop) // load image from file unsigned char* LoadBitmapFile(char* filename, BITMAPINFOHEADER* bitmapInfoHeader, BITMAPFILEHEADER* bitmapFileHeader) { FILE* filePtr; // our file pointer unsigned char* bitmapImage; // store image data // open filename in read binary mode filePtr = fopen(filename, "rb"); if (filePtr == NULL) return NULL; // read the bitmap file header fread(bitmapFileHeader, sizeof(BITMAPFILEHEADER), 1, filePtr); // verify that this is a bmp file by check bitmap id if (bitmapFileHeader->bfType != 0x4D42) { fclose(filePtr); return NULL; } // read the bitmap info header fread(bitmapInfoHeader, sizeof(BITMAPINFOHEADER), 1, filePtr); // move file point to the begging of bitmap data fseek(filePtr, long(sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER)), SEEK_SET); // allocate enough memory for the bitmap image data bitmapImage = (unsigned char*)malloc(bitmapInfoHeader->biSizeImage); // verify memory allocation if (!bitmapImage) { free(bitmapImage); fclose(filePtr); return NULL; } // read in the bitmap image data fread(bitmapImage, 1, bitmapInfoHeader->biSizeImage, filePtr); // make sure bitmap image data was read if (bitmapImage == NULL) { fclose(filePtr); return NULL; } unsigned char* d_bitmapImage; // store image data in device // Allocate size to array in device memory hipMalloc((void**)&d_bitmapImage, bitmapInfoHeader->biSizeImage); // Copy data from host to device hipMemcpy(d_bitmapImage, bitmapImage, bitmapInfoHeader->biSizeImage, hipMemcpyHostToDevice); // Kernel call hipMemcpy(bitmapImage, d_bitmapImage, bitmapInfoHeader->biSizeImage, hipMemcpyDeviceToHost); // close file and return bitmap iamge data fclose(filePtr); return bitmapImage; } // Save image to file void SaveBitmapFile(char* filename, unsigned char* bitmapImage, BITMAPFILEHEADER* bitmapFileHeader, BITMAPINFOHEADER* bitmapInfoHeader) { FILE* filePtr; // our file pointer // open filename in write binary mode filePtr = fopen(filename, "wb"); if (filePtr == NULL) { printf("\nERROR: Cannot open file %s", filename); exit(1); } // write the bitmap file header fwrite(bitmapFileHeader, sizeof(BITMAPFILEHEADER), 1, filePtr); // write the bitmap info header fwrite(bitmapInfoHeader, sizeof(BITMAPINFOHEADER), 1, filePtr); // write in the bitmap image data fwrite(bitmapImage, bitmapInfoHeader->biSizeImage, 1, filePtr); // close file fclose(filePtr); } __global__ void encrypt(unsigned char* bitmapImage, unsigned char* expanded_key, int size, int threadN) { int threadId = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned char sdata[512 * Nb]; int i; unsigned int tid = threadIdx.x; for (int k = tid * Nb; k < (tid + 1) * Nb; k++) { int gid = k + blockIdx.x * 512 * Nb; if (gid < size) sdata[k] = bitmapImage[gid]; } __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[0]); __syncthreads(); for (int r = 1; r < Nr; r++) { // substitution for (i = tid * Nb; i < (tid + 1) * Nb; i++) { sdata[i] = s_box[sdata[i]]; } __syncthreads(); // shift rows shift_rows(&sdata[tid * Nb]); __syncthreads(); // mix columns mix_columns(&sdata[tid * Nb]); __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[r * Nb]); __syncthreads(); } // substitution for (i = tid * Nb; i < (tid + 1) * Nb; i++) { sdata[i] = s_box[sdata[i]]; } __syncthreads(); // shift rows shift_rows(&sdata[tid * Nb]); __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[Nr * Nb]); __syncthreads(); for (int k = tid * Nb; k < (tid + 1) * Nb; k++) { int gid = k + blockIdx.x * 512 * Nb; if (gid < size) bitmapImage[gid] = sdata[k]; } __syncthreads(); } __global__ void decrypt(unsigned char* bitmapImage, unsigned char* expanded_key, int size, int threadN) { int threadId = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned char sdata[512 * Nb]; int i; unsigned int tid = threadIdx.x; for (int k = tid * Nb; k < (tid + 1) * Nb; k++) { int gid = k + blockIdx.x * 512 * Nb; if (gid < size) sdata[k] = bitmapImage[gid]; } __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[Nr * Nb]); __syncthreads(); for (int r = 1; r < Nr; r++) { // shift rows inv_shift_rows(&sdata[tid * Nb]); __syncthreads(); // substitution for (i = tid * Nb; i < (tid + 1) * Nb; i++) { sdata[i] = inv_s_box[sdata[i]]; } __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[(Nr - r) * Nb]); __syncthreads(); // mix columns inv_mix_columns(&sdata[tid * Nb]); __syncthreads(); } // substitution for (i = tid * Nb; i < (tid + 1) * Nb; i++) { sdata[i] = inv_s_box[sdata[i]]; } __syncthreads(); // shift rows inv_shift_rows(&sdata[tid * Nb]); __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[0]); __syncthreads(); for (int k = tid * Nb; k < (tid + 1) * Nb; k++) { int gid = k + blockIdx.x * 512 * Nb; if (gid < size) bitmapImage[gid] = sdata[k]; } __syncthreads(); } int main() { BITMAPINFOHEADER bitmapInfoHeader; BITMAPFILEHEADER bitmapFileHeader; unsigned char* bitmapData; unsigned char* d_bitmapImage; //////////////////////////////////////////////////////////////////////////////////////////// Expand key unsigned char key[16] = { 0x2b, 0x28, 0xab, 0x09, 0x7e, 0xae, 0xf7, 0xcf, 0x15, 0xd2, 0x15, 0x4f, 0x16, 0xa6, 0x88, 0x3c }; // unsigned char key[] = "lqesutrlhajqzxck"; unsigned char expanded_key[(Nr + 1) * Nb]; key_expansion(key, expanded_key); unsigned char* d_expanded_key; hipMalloc((void**)&d_expanded_key, (Nr + 1) * Nb); hipMemcpy(d_expanded_key, expanded_key, (Nr + 1) * Nb, hipMemcpyHostToDevice); //////////////////////////////////////////////////////////////////////////////////////////// Encryption // Load image to CUDA memory bitmapData = LoadBitmapFile("lena.bmp", &bitmapInfoHeader, &bitmapFileHeader); hipMalloc((void**)&d_bitmapImage, bitmapInfoHeader.biSizeImage); hipMemcpy(d_bitmapImage, bitmapData, bitmapInfoHeader.biSizeImage, hipMemcpyHostToDevice); // Encryption kernel call int B = ceil(bitmapInfoHeader.biSizeImage / (512 * Nb)); int T = 512; int threadN = B * T; hipLaunchKernelGGL(( encrypt) , dim3(B), dim3(T), 0, 0, d_bitmapImage, d_expanded_key, bitmapInfoHeader.biSizeImage, threadN); // Save Encrypted image from CUDA memory to file hipMemcpy(bitmapData, d_bitmapImage, bitmapInfoHeader.biSizeImage, hipMemcpyDeviceToHost); SaveBitmapFile("Encrypted.bmp", bitmapData, &bitmapFileHeader, &bitmapInfoHeader); //////////////////////////////////////////////////////////////////////////////////////////// Decryption // load encrypted image from file tp CUDA memory bitmapData = LoadBitmapFile("Encrypted.bmp", &bitmapInfoHeader, &bitmapFileHeader); hipMemcpy(d_bitmapImage, bitmapData, bitmapInfoHeader.biSizeImage, hipMemcpyHostToDevice); // Decryption kernel call hipLaunchKernelGGL(( decrypt) , dim3(B), dim3(T), 0, 0, d_bitmapImage, d_expanded_key, bitmapInfoHeader.biSizeImage, threadN); // Save Decrypted image from CUDA memory to file hipMemcpy(bitmapData, d_bitmapImage, bitmapInfoHeader.biSizeImage, hipMemcpyDeviceToHost); SaveBitmapFile("Decrypted.bmp", bitmapData, &bitmapFileHeader, &bitmapInfoHeader); hipFree(d_bitmapImage); hipFree(d_expanded_key); return 0; }
719885a19acbc7989ab0b610a758a56f3eb6232b.cu
#include "CUDA_runtime.h" #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include <math.h> typedef short WORD; typedef int DWORD; typedef int LONG; // sbox used in host __constant__ static unsigned char box[256] = { // 0 1 2 3 4 5 6 7 8 9 a b c d e f 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, // 0 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, // 1 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, // 2 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, // 3 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, // 4 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, // 5 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, // 6 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, // 7 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, // 8 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, // 9 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, // a 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, // b 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, // c 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, // d 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, // e 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };// f // Round Keys __constant__ static unsigned char rcon[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; // sbox used in device __device__ static unsigned char s_box[256] = { // 0 1 2 3 4 5 6 7 8 9 a b c d e f 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, // 0 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, // 1 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, // 2 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, // 3 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, // 4 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, // 5 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, // 6 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, // 7 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, // 8 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, // 9 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, // a 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, // b 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, // c 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, // d 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, // e 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };// f // inversed sbox used in device __device__ static unsigned char inv_s_box[256] = { // 0 1 2 3 4 5 6 7 8 9 a b c d e f 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, // 0 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, // 1 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, // 2 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, // 3 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, // 4 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, // 5 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, // 6 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, // 7 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, // 8 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, // 9 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, // a 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, // b 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, // c 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, // d 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, // e 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d };// f __device__ const int Nr = 10; // numbers of rounds __device__ const int Nk = 4; // numbers of columns in a key __device__ const int Nb = Nk * 4; // key size __device__ void shift_rows(unsigned char* state) { unsigned char i, k, s, col; for (i = 1; i < 4; i++) { s = 0; while (s < i) { col = state[Nk * i + 0]; for (k = 1; k < Nk; k++) { state[Nk * i + k - 1] = state[Nk * i + k]; } state[Nk * i + Nk - 1] = col; s++; } } } __device__ void inv_shift_rows(unsigned char* state) { unsigned char i, k, s, col; for (i = 1; i < 4; i++) { s = 0; while (s < i) { col = state[Nk * i + Nk - 1]; for (k = Nk - 1; k > 0; k--) { state[Nk * i + k] = state[Nk * i + k - 1]; } state[Nk * i + 0] = col; s++; } } } __device__ unsigned char gmult(unsigned char a, unsigned char b) { unsigned char p = 0, i = 0, hbs = 0; for (i = 0; i < 8; i++) { if (b & 1) { p ^= a; } hbs = a & 0x80; a <<= 1; if (hbs) a ^= 0x1b; b >>= 1; } return (unsigned char)p; } __device__ void coef_mult(unsigned char* a, unsigned char* b, unsigned char* d) { d[0] = gmult(a[0], b[0]) ^ gmult(a[3], b[1]) ^ gmult(a[2], b[2]) ^ gmult(a[1], b[3]); d[1] = gmult(a[1], b[0]) ^ gmult(a[0], b[1]) ^ gmult(a[3], b[2]) ^ gmult(a[2], b[3]); d[2] = gmult(a[2], b[0]) ^ gmult(a[1], b[1]) ^ gmult(a[0], b[2]) ^ gmult(a[3], b[3]); d[3] = gmult(a[3], b[0]) ^ gmult(a[2], b[1]) ^ gmult(a[1], b[2]) ^ gmult(a[0], b[3]); } __device__ void mix_columns(unsigned char* state) { unsigned char a[] = { 0x02, 0x01, 0x01, 0x03 }; unsigned char i, j, col[4], res[4]; for (j = 0; j < Nk; j++) { for (i = 0; i < 4; i++) { col[i] = state[Nk * i + j]; } coef_mult(a, col, res); for (i = 0; i < 4; i++) { state[Nk * i + j] = res[i]; } } } __device__ void inv_mix_columns(unsigned char* state) { unsigned char a[] = { 0x0e, 0x09, 0x0d, 0x0b }; unsigned char i, j, col[4], res[4]; for (j = 0; j < Nk; j++) { for (i = 0; i < 4; i++) { col[i] = state[Nk * i + j]; } coef_mult(a, col, res); for (i = 0; i < 4; i++) { state[Nk * i + j] = res[i]; } } } // expand original key so to use it in AddRoundKey stage - key_xor() void key_expansion(unsigned char* key, unsigned char* w) { unsigned char r, i, j, k, col[4]; // first round key is just the key for (j = 0; j < Nk; j++) { for (i = 0; i < 4; i++) { w[Nk * i + j] = key[Nk * i + j]; } } for (r = 1; r < Nr + 1; r++) { for (j = 0; j < Nk; j++) { for (i = 0; i < 4; i++) { if (j % Nk != 0) { col[i] = w[r * Nb + Nk * i + j - 1]; } else { col[i] = w[(r - 1) * Nb + Nk * i + Nk - 1]; } } if (j % Nk == 0) { // rotate 4 bytes in word k = col[0]; col[0] = col[1]; col[1] = col[2]; col[2] = col[3]; col[3] = k; col[0] = box[col[0]]; col[1] = box[col[1]]; col[2] = box[col[2]]; col[3] = box[col[3]]; col[0] = col[0] ^ rcon[r - 1]; } w[r * Nb + Nk * 0 + j] = w[(r - 1) * Nb + Nk * 0 + j] ^ col[0]; w[r * Nb + Nk * 1 + j] = w[(r - 1) * Nb + Nk * 1 + j] ^ col[1]; w[r * Nb + Nk * 2 + j] = w[(r - 1) * Nb + Nk * 2 + j] ^ col[2]; w[r * Nb + Nk * 3 + j] = w[(r - 1) * Nb + Nk * 3 + j] ^ col[3]; } } } __device__ void key_xor(unsigned char* state, unsigned char* key) { unsigned char i; for (i = 0; i < Nb; i++) { state[i] = state[i] ^ key[i]; } } #pragma pack(push, 1) typedef struct tagBITMAPFILEHEADER { WORD bfType; // specifies the file type DWORD bfSize; // specifies the size in bytes of the bitmap file WORD bfReserved1; // reserved; must be 0 WORD bfReserved2; // reserved; must be 0 DWORD bOffBits; // species the offset in bytes from the bitmapfileheader to the bitmap bits }BITMAPFILEHEADER; #pragma pack(pop) #pragma pack(push, 1) typedef struct tagBITMAPINFOHEADER { DWORD biSize; // specifies the number of bytes required by the struct LONG biWidth; // specifies width in pixels LONG biHeight; // species height in pixels WORD biPlanes; // specifies the number of color planes, must be 1 WORD biBitCount; // specifies the number of bit per pixel DWORD biCompression;// spcifies the type of compression DWORD biSizeImage; // size of image in bytes LONG biXPelsPerMeter; // number of pixels per meter in x axis LONG biYPelsPerMeter; // number of pixels per meter in y axis DWORD biClrUsed; // number of colors used by th ebitmap DWORD biClrImportant; // number of colors that are important }BITMAPINFOHEADER; #pragma pack(pop) // load image from file unsigned char* LoadBitmapFile(char* filename, BITMAPINFOHEADER* bitmapInfoHeader, BITMAPFILEHEADER* bitmapFileHeader) { FILE* filePtr; // our file pointer unsigned char* bitmapImage; // store image data // open filename in read binary mode filePtr = fopen(filename, "rb"); if (filePtr == NULL) return NULL; // read the bitmap file header fread(bitmapFileHeader, sizeof(BITMAPFILEHEADER), 1, filePtr); // verify that this is a bmp file by check bitmap id if (bitmapFileHeader->bfType != 0x4D42) { fclose(filePtr); return NULL; } // read the bitmap info header fread(bitmapInfoHeader, sizeof(BITMAPINFOHEADER), 1, filePtr); // move file point to the begging of bitmap data fseek(filePtr, long(sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER)), SEEK_SET); // allocate enough memory for the bitmap image data bitmapImage = (unsigned char*)malloc(bitmapInfoHeader->biSizeImage); // verify memory allocation if (!bitmapImage) { free(bitmapImage); fclose(filePtr); return NULL; } // read in the bitmap image data fread(bitmapImage, 1, bitmapInfoHeader->biSizeImage, filePtr); // make sure bitmap image data was read if (bitmapImage == NULL) { fclose(filePtr); return NULL; } unsigned char* d_bitmapImage; // store image data in device // Allocate size to array in device memory cudaMalloc((void**)&d_bitmapImage, bitmapInfoHeader->biSizeImage); // Copy data from host to device cudaMemcpy(d_bitmapImage, bitmapImage, bitmapInfoHeader->biSizeImage, cudaMemcpyHostToDevice); // Kernel call cudaMemcpy(bitmapImage, d_bitmapImage, bitmapInfoHeader->biSizeImage, cudaMemcpyDeviceToHost); // close file and return bitmap iamge data fclose(filePtr); return bitmapImage; } // Save image to file void SaveBitmapFile(char* filename, unsigned char* bitmapImage, BITMAPFILEHEADER* bitmapFileHeader, BITMAPINFOHEADER* bitmapInfoHeader) { FILE* filePtr; // our file pointer // open filename in write binary mode filePtr = fopen(filename, "wb"); if (filePtr == NULL) { printf("\nERROR: Cannot open file %s", filename); exit(1); } // write the bitmap file header fwrite(bitmapFileHeader, sizeof(BITMAPFILEHEADER), 1, filePtr); // write the bitmap info header fwrite(bitmapInfoHeader, sizeof(BITMAPINFOHEADER), 1, filePtr); // write in the bitmap image data fwrite(bitmapImage, bitmapInfoHeader->biSizeImage, 1, filePtr); // close file fclose(filePtr); } __global__ void encrypt(unsigned char* bitmapImage, unsigned char* expanded_key, int size, int threadN) { int threadId = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned char sdata[512 * Nb]; int i; unsigned int tid = threadIdx.x; for (int k = tid * Nb; k < (tid + 1) * Nb; k++) { int gid = k + blockIdx.x * 512 * Nb; if (gid < size) sdata[k] = bitmapImage[gid]; } __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[0]); __syncthreads(); for (int r = 1; r < Nr; r++) { // substitution for (i = tid * Nb; i < (tid + 1) * Nb; i++) { sdata[i] = s_box[sdata[i]]; } __syncthreads(); // shift rows shift_rows(&sdata[tid * Nb]); __syncthreads(); // mix columns mix_columns(&sdata[tid * Nb]); __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[r * Nb]); __syncthreads(); } // substitution for (i = tid * Nb; i < (tid + 1) * Nb; i++) { sdata[i] = s_box[sdata[i]]; } __syncthreads(); // shift rows shift_rows(&sdata[tid * Nb]); __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[Nr * Nb]); __syncthreads(); for (int k = tid * Nb; k < (tid + 1) * Nb; k++) { int gid = k + blockIdx.x * 512 * Nb; if (gid < size) bitmapImage[gid] = sdata[k]; } __syncthreads(); } __global__ void decrypt(unsigned char* bitmapImage, unsigned char* expanded_key, int size, int threadN) { int threadId = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned char sdata[512 * Nb]; int i; unsigned int tid = threadIdx.x; for (int k = tid * Nb; k < (tid + 1) * Nb; k++) { int gid = k + blockIdx.x * 512 * Nb; if (gid < size) sdata[k] = bitmapImage[gid]; } __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[Nr * Nb]); __syncthreads(); for (int r = 1; r < Nr; r++) { // shift rows inv_shift_rows(&sdata[tid * Nb]); __syncthreads(); // substitution for (i = tid * Nb; i < (tid + 1) * Nb; i++) { sdata[i] = inv_s_box[sdata[i]]; } __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[(Nr - r) * Nb]); __syncthreads(); // mix columns inv_mix_columns(&sdata[tid * Nb]); __syncthreads(); } // substitution for (i = tid * Nb; i < (tid + 1) * Nb; i++) { sdata[i] = inv_s_box[sdata[i]]; } __syncthreads(); // shift rows inv_shift_rows(&sdata[tid * Nb]); __syncthreads(); // key_xor key_xor(&sdata[tid * Nb], &expanded_key[0]); __syncthreads(); for (int k = tid * Nb; k < (tid + 1) * Nb; k++) { int gid = k + blockIdx.x * 512 * Nb; if (gid < size) bitmapImage[gid] = sdata[k]; } __syncthreads(); } int main() { BITMAPINFOHEADER bitmapInfoHeader; BITMAPFILEHEADER bitmapFileHeader; unsigned char* bitmapData; unsigned char* d_bitmapImage; //////////////////////////////////////////////////////////////////////////////////////////// Expand key unsigned char key[16] = { 0x2b, 0x28, 0xab, 0x09, 0x7e, 0xae, 0xf7, 0xcf, 0x15, 0xd2, 0x15, 0x4f, 0x16, 0xa6, 0x88, 0x3c }; // unsigned char key[] = "lqesutrlhajqzxck"; unsigned char expanded_key[(Nr + 1) * Nb]; key_expansion(key, expanded_key); unsigned char* d_expanded_key; cudaMalloc((void**)&d_expanded_key, (Nr + 1) * Nb); cudaMemcpy(d_expanded_key, expanded_key, (Nr + 1) * Nb, cudaMemcpyHostToDevice); //////////////////////////////////////////////////////////////////////////////////////////// Encryption // Load image to CUDA memory bitmapData = LoadBitmapFile("lena.bmp", &bitmapInfoHeader, &bitmapFileHeader); cudaMalloc((void**)&d_bitmapImage, bitmapInfoHeader.biSizeImage); cudaMemcpy(d_bitmapImage, bitmapData, bitmapInfoHeader.biSizeImage, cudaMemcpyHostToDevice); // Encryption kernel call int B = ceil(bitmapInfoHeader.biSizeImage / (512 * Nb)); int T = 512; int threadN = B * T; encrypt <<<B, T>>> (d_bitmapImage, d_expanded_key, bitmapInfoHeader.biSizeImage, threadN); // Save Encrypted image from CUDA memory to file cudaMemcpy(bitmapData, d_bitmapImage, bitmapInfoHeader.biSizeImage, cudaMemcpyDeviceToHost); SaveBitmapFile("Encrypted.bmp", bitmapData, &bitmapFileHeader, &bitmapInfoHeader); //////////////////////////////////////////////////////////////////////////////////////////// Decryption // load encrypted image from file tp CUDA memory bitmapData = LoadBitmapFile("Encrypted.bmp", &bitmapInfoHeader, &bitmapFileHeader); cudaMemcpy(d_bitmapImage, bitmapData, bitmapInfoHeader.biSizeImage, cudaMemcpyHostToDevice); // Decryption kernel call decrypt <<<B, T>>> (d_bitmapImage, d_expanded_key, bitmapInfoHeader.biSizeImage, threadN); // Save Decrypted image from CUDA memory to file cudaMemcpy(bitmapData, d_bitmapImage, bitmapInfoHeader.biSizeImage, cudaMemcpyDeviceToHost); SaveBitmapFile("Decrypted.bmp", bitmapData, &bitmapFileHeader, &bitmapInfoHeader); cudaFree(d_bitmapImage); cudaFree(d_expanded_key); return 0; }
326052cd15aaa459b606f8ba78bc7b0cb9e76737.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <numeric> #include <vector> #include <algorithm> #include <stdio.h> #include <cufftMp.h> #include <mpi.h> #include <nvshmem.h> #include "../common/error_checks.hpp" #include "../common/generate_random.hpp" #include "../common/scaling.cuh" #include "../iterators/box_iterator.hpp" /** * This samples illustrates a basic use of cuFFTMp using the built-in, optimized, data distributions * in the case of an R2C - C2R transform * * It performs * - forward transform * - printing and scaling of the entries * - inverse transform */ void run_r2c_c2r(size_t nx, size_t ny, size_t nz, std::vector<float>& cpu_data, const int rank, const int size, MPI_Comm comm) { // Allocate GPU memory, copy CPU data to GPU // Data is initially distributed according to CUFFT_XT_FORMAT_INPLACE hipComplex* gpu_data = (hipComplex*)nvshmem_malloc(cpu_data.size() * sizeof(float)); CUDA_CHECK(hipMemcpy(gpu_data, cpu_data.data(), cpu_data.size() * sizeof(float), hipMemcpyDefault)); // Initialize plans and stream hipfftHandle plan_r2c = 0; hipfftHandle plan_c2r = 0; hipStream_t stream = nullptr; CUDA_CHECK(hipStreamCreate(&stream)); CUFFT_CHECK(hipfftCreate(&plan_r2c)); CUFFT_CHECK(hipfftCreate(&plan_c2r)); // Attach the MPI communicator to the plans CUFFT_CHECK(cufftMpAttachComm(plan_r2c, CUFFT_COMM_MPI, &comm)); CUFFT_CHECK(cufftMpAttachComm(plan_c2r, CUFFT_COMM_MPI, &comm)); // Set the stream CUFFT_CHECK(hipfftSetStream(plan_r2c, stream)); CUFFT_CHECK(hipfftSetStream(plan_c2r, stream)); // Set default subformats CUFFT_CHECK(cufftXtSetSubformatDefault(plan_r2c, CUFFT_XT_FORMAT_INPLACE, CUFFT_XT_FORMAT_INPLACE_SHUFFLED)); CUFFT_CHECK(cufftXtSetSubformatDefault(plan_c2r, CUFFT_XT_FORMAT_INPLACE, CUFFT_XT_FORMAT_INPLACE_SHUFFLED)); // Make the plan size_t workspace; CUFFT_CHECK(hipfftMakePlan3d(plan_r2c, nx, ny, nz, HIPFFT_R2C, &workspace)); CUFFT_CHECK(hipfftMakePlan3d(plan_c2r, nx, ny, nz, HIPFFT_C2R, &workspace)); // Run R2C // cufftXtSetSubformatDefault(plan_r2c, CUFFT_XT_FORMAT_INPLACE, CUFFT_XT_FORMAT_INPLACE_SHUFFLED) + HIPFFT_FORWARD // means gpu_data is distributed according to CUFFT_XT_FORMAT_INPLACE // Note: R2C transforms are implicitly forward CUFFT_CHECK(hipfftExecR2C(plan_r2c, (hipfftReal*)gpu_data, (hipfftComplex*)gpu_data)); // At this point, data is distributed according to CUFFT_XT_FORMAT_INPLACE_SHUFFLED auto [begin_d, end_d] = BoxIterators(CUFFT_XT_FORMAT_INPLACE_SHUFFLED, HIPFFT_R2C, rank, size, nx, ny, nz, gpu_data); const size_t num_elements = std::distance(begin_d, end_d); const size_t num_threads = 128; const size_t num_blocks = (num_elements + num_threads - 1) / num_threads; hipLaunchKernelGGL(( scaling_kernel), dim3(num_blocks), dim3(num_threads), 0, stream, begin_d, end_d, rank, size, nx, ny, nz); // Run C2R // cufftXtSetSubformatDefault(plan_c2r, CUFFT_XT_FORMAT_INPLACE, CUFFT_XT_FORMAT_INPLACE_SHUFFLED) + HIPFFT_BACKWARD // means gpu_data is distributed according to CUFFT_XT_FORMAT_INPLACE_SHUFFLED // Note: C2R transforms are implicitly inverse CUFFT_CHECK(hipfftExecC2R(plan_c2r, (hipfftComplex*)gpu_data, (hipfftReal*)gpu_data)); // Copy back to CPU and free // Data is again distributed according to CUFFT_XT_FORMAT_INPLACE CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipMemcpy(cpu_data.data(), gpu_data, cpu_data.size() * sizeof(float), hipMemcpyDefault)); CUFFT_CHECK(hipfftDestroy(plan_r2c)); CUFFT_CHECK(hipfftDestroy(plan_c2r)); CUDA_CHECK(hipStreamDestroy(stream)); nvshmem_free(gpu_data); }; int main(int argc, char** argv) { MPI_Init(&argc, &argv); int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); int ndevices; CUDA_CHECK(hipGetDeviceCount(&ndevices)); CUDA_CHECK(hipSetDevice(rank % ndevices)); printf("Hello from rank %d/%d using GPU %d\n", rank, size, rank % ndevices); nvshmemx_init_attr_t attr; MPI_Comm comm = MPI_COMM_WORLD; attr.mpi_comm = (void*)&comm; nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr); // Logical transform size size_t nx = size; // any value >= size is OK size_t ny = size; // any value >= size is OK size_t nz = 2 * size; // need to be even and >= size // We start with Slabs distributed along X (X-Slabs) // Ranks 0 ... (nx % size - 1) own 1 more element in the X dimension // All ranks own all element in the Y and Z dimension // The Z dimension has to be padded to accomodate the (nz / 2 + 1) // complex numbers assuming an in-place data layout. int ranks_with_onemore = nx % size; size_t my_nx = (nx / size) + (rank < ranks_with_onemore ? 1 : 0); size_t padded_nz = 2 * (nz / 2 + 1); // Local, distributed, data std::vector<float> data(my_nx * ny * padded_nz, 1.0); generate_random(data, rank); std::vector<float> ref = data; // R2C + scaling + C2R run_r2c_c2r(nx, ny, nz, data, rank, size, MPI_COMM_WORLD); // Compute error double error = compute_error(ref, data, buildBox3D(CUFFT_XT_FORMAT_INPLACE, HIPFFT_R2C, rank, size, nx, ny, nz)); nvshmem_finalize(); MPI_Finalize(); return assess_error(error); }
326052cd15aaa459b606f8ba78bc7b0cb9e76737.cu
#include <numeric> #include <vector> #include <algorithm> #include <stdio.h> #include <cufftMp.h> #include <mpi.h> #include <nvshmem.h> #include "../common/error_checks.hpp" #include "../common/generate_random.hpp" #include "../common/scaling.cuh" #include "../iterators/box_iterator.hpp" /** * This samples illustrates a basic use of cuFFTMp using the built-in, optimized, data distributions * in the case of an R2C - C2R transform * * It performs * - forward transform * - printing and scaling of the entries * - inverse transform */ void run_r2c_c2r(size_t nx, size_t ny, size_t nz, std::vector<float>& cpu_data, const int rank, const int size, MPI_Comm comm) { // Allocate GPU memory, copy CPU data to GPU // Data is initially distributed according to CUFFT_XT_FORMAT_INPLACE cuComplex* gpu_data = (cuComplex*)nvshmem_malloc(cpu_data.size() * sizeof(float)); CUDA_CHECK(cudaMemcpy(gpu_data, cpu_data.data(), cpu_data.size() * sizeof(float), cudaMemcpyDefault)); // Initialize plans and stream cufftHandle plan_r2c = 0; cufftHandle plan_c2r = 0; cudaStream_t stream = nullptr; CUDA_CHECK(cudaStreamCreate(&stream)); CUFFT_CHECK(cufftCreate(&plan_r2c)); CUFFT_CHECK(cufftCreate(&plan_c2r)); // Attach the MPI communicator to the plans CUFFT_CHECK(cufftMpAttachComm(plan_r2c, CUFFT_COMM_MPI, &comm)); CUFFT_CHECK(cufftMpAttachComm(plan_c2r, CUFFT_COMM_MPI, &comm)); // Set the stream CUFFT_CHECK(cufftSetStream(plan_r2c, stream)); CUFFT_CHECK(cufftSetStream(plan_c2r, stream)); // Set default subformats CUFFT_CHECK(cufftXtSetSubformatDefault(plan_r2c, CUFFT_XT_FORMAT_INPLACE, CUFFT_XT_FORMAT_INPLACE_SHUFFLED)); CUFFT_CHECK(cufftXtSetSubformatDefault(plan_c2r, CUFFT_XT_FORMAT_INPLACE, CUFFT_XT_FORMAT_INPLACE_SHUFFLED)); // Make the plan size_t workspace; CUFFT_CHECK(cufftMakePlan3d(plan_r2c, nx, ny, nz, CUFFT_R2C, &workspace)); CUFFT_CHECK(cufftMakePlan3d(plan_c2r, nx, ny, nz, CUFFT_C2R, &workspace)); // Run R2C // cufftXtSetSubformatDefault(plan_r2c, CUFFT_XT_FORMAT_INPLACE, CUFFT_XT_FORMAT_INPLACE_SHUFFLED) + CUFFT_FORWARD // means gpu_data is distributed according to CUFFT_XT_FORMAT_INPLACE // Note: R2C transforms are implicitly forward CUFFT_CHECK(cufftExecR2C(plan_r2c, (cufftReal*)gpu_data, (cufftComplex*)gpu_data)); // At this point, data is distributed according to CUFFT_XT_FORMAT_INPLACE_SHUFFLED auto [begin_d, end_d] = BoxIterators(CUFFT_XT_FORMAT_INPLACE_SHUFFLED, CUFFT_R2C, rank, size, nx, ny, nz, gpu_data); const size_t num_elements = std::distance(begin_d, end_d); const size_t num_threads = 128; const size_t num_blocks = (num_elements + num_threads - 1) / num_threads; scaling_kernel<<<num_blocks, num_threads, 0, stream>>>(begin_d, end_d, rank, size, nx, ny, nz); // Run C2R // cufftXtSetSubformatDefault(plan_c2r, CUFFT_XT_FORMAT_INPLACE, CUFFT_XT_FORMAT_INPLACE_SHUFFLED) + CUFFT_INVERSE // means gpu_data is distributed according to CUFFT_XT_FORMAT_INPLACE_SHUFFLED // Note: C2R transforms are implicitly inverse CUFFT_CHECK(cufftExecC2R(plan_c2r, (cufftComplex*)gpu_data, (cufftReal*)gpu_data)); // Copy back to CPU and free // Data is again distributed according to CUFFT_XT_FORMAT_INPLACE CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaMemcpy(cpu_data.data(), gpu_data, cpu_data.size() * sizeof(float), cudaMemcpyDefault)); CUFFT_CHECK(cufftDestroy(plan_r2c)); CUFFT_CHECK(cufftDestroy(plan_c2r)); CUDA_CHECK(cudaStreamDestroy(stream)); nvshmem_free(gpu_data); }; int main(int argc, char** argv) { MPI_Init(&argc, &argv); int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); int ndevices; CUDA_CHECK(cudaGetDeviceCount(&ndevices)); CUDA_CHECK(cudaSetDevice(rank % ndevices)); printf("Hello from rank %d/%d using GPU %d\n", rank, size, rank % ndevices); nvshmemx_init_attr_t attr; MPI_Comm comm = MPI_COMM_WORLD; attr.mpi_comm = (void*)&comm; nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr); // Logical transform size size_t nx = size; // any value >= size is OK size_t ny = size; // any value >= size is OK size_t nz = 2 * size; // need to be even and >= size // We start with Slabs distributed along X (X-Slabs) // Ranks 0 ... (nx % size - 1) own 1 more element in the X dimension // All ranks own all element in the Y and Z dimension // The Z dimension has to be padded to accomodate the (nz / 2 + 1) // complex numbers assuming an in-place data layout. int ranks_with_onemore = nx % size; size_t my_nx = (nx / size) + (rank < ranks_with_onemore ? 1 : 0); size_t padded_nz = 2 * (nz / 2 + 1); // Local, distributed, data std::vector<float> data(my_nx * ny * padded_nz, 1.0); generate_random(data, rank); std::vector<float> ref = data; // R2C + scaling + C2R run_r2c_c2r(nx, ny, nz, data, rank, size, MPI_COMM_WORLD); // Compute error double error = compute_error(ref, data, buildBox3D(CUFFT_XT_FORMAT_INPLACE, CUFFT_R2C, rank, size, nx, ny, nz)); nvshmem_finalize(); MPI_Finalize(); return assess_error(error); }
66c00cd276edbeac4c66672b841af266cbf3573a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "MacroHoliday.h" #ifdef HOLIDAY_GPU #include "HolidayBlobGpu.h" #include "HolidayCommonCuda.h" #include "HolidayFeatureMap.h" HolidayBlobGpu::~HolidayBlobGpu() { } HolidayBlobGpu::HolidayBlobGpu() { memory_size = 0; data_size = 0; pbyData_cpu = 0; pfData_cpu = 0; pfData_gpu = 0; } template<class T> __global__ static void gMultiMatrixTrans_kernel(T *pDataIn, float *pfDataOut, int dwWidth, int dwHeight, int dwNum, int dwPitchIn, int dwPitchOut) { __shared__ float pfTmp[TRANS_BLOCK_DIM][TRANS_BLOCK_DIM + 1]; //TRANS_BLOCK_DIM + 1 for bank conflict int dwIdxN = blockIdx.y / CUDA_BLOCK(dwHeight, TRANS_BLOCK_DIM); int dwBlockS = blockDim.x * blockIdx.x; int dwBlockRC = blockDim.y * (blockIdx.y % CUDA_BLOCK(dwHeight, TRANS_BLOCK_DIM)); int dwIdxX = dwBlockS + threadIdx.x; int dwIdxY = dwBlockRC + threadIdx.y; if (dwIdxX < dwWidth && dwIdxY < dwHeight) { pfTmp[threadIdx.y][threadIdx.x] = pDataIn[dwIdxN * dwHeight * dwPitchIn + dwIdxY * dwPitchIn + dwIdxX]; } __syncthreads(); dwIdxX = dwBlockS + threadIdx.y; dwIdxY = dwBlockRC + threadIdx.x; if (dwIdxX < dwWidth && dwIdxY < dwHeight) { pfDataOut[dwIdxN * dwWidth * dwPitchOut + dwIdxX * dwPitchOut + dwIdxY] = pfTmp[threadIdx.x][threadIdx.y]; } } __global__ static void gCharToFloat_kernel(unsigned char *pbyDataIn, float *pfDataOut, int dwSize) { int dwIdx = threadIdx.x + blockIdx.x * blockDim.x; if (dwIdx < dwSize) { pfDataOut[dwIdx] = pbyDataIn[dwIdx]; } } int HolidayBlobGpu::Gpu_init(void *pNetResourceGpu) { memory_size = 1; for (int i = 0; i < this->shape_.size(); i++) { memory_size *= shape_[i]; } HolidayNetResourceGpu *ptNetResourceGpu = (HolidayNetResourceGpu*)pNetResourceGpu; GlobleBufferMallocGpu(ptNetResourceGpu, memory_size* sizeof(float)); if (!pfData_gpu) { CUDA_ERROR(SafeCudaMalloc((void **)&pfData_gpu, memory_size * sizeof(float))); data_size = 0; } return CUDA_RETURN_VALUE; } int HolidayBlobGpu::Gpu_DataIn(void *pNetResourceGpu, int dwStorageType,void *pDataIn) { int n_pitch; int n_slice; int n_height; int n_width; int n_num; if (shape_.size()==4) { n_pitch = shape_[1]; n_slice = shape_[1]; n_height= shape_[2]; n_width = shape_[3]; n_num = shape_[0]; } else if (shape_.size()==2) { n_pitch = shape_[1]; n_slice = shape_[1]; n_height = 1; n_width = 1; n_num = shape_[0]; } else { } data_size = 1; for (int i = 0; i < this->shape_.size(); i++) { data_size *= shape_[i]; } HolidayNetResourceGpu *ptNetResourceGpu = (HolidayNetResourceGpu *)pNetResourceGpu; switch (dwStorageType) { case DATA_CPU_WIDTH: { CUDA_ERROR(hipMemcpyAsync(pfData_gpu, pDataIn, data_size * sizeof(float), hipMemcpyHostToDevice, ptNetResourceGpu->main_stream)); hipStreamSynchronize(ptNetResourceGpu->main_stream); break; } case DATA_CPU_WIDTH_CHAR: { unsigned char *pubyTmp = (unsigned char *)ptNetResourceGpu->pubyConvTmpBuffer; CUDA_ERROR(hipMemcpyAsync(pubyTmp, pbyData_cpu, data_size * sizeof(unsigned char), hipMemcpyHostToDevice, ptNetResourceGpu->main_stream)); gCharToFloat_kernel << <CUDA_BLOCK(data_size, CUDA_THREAD_NUM), CUDA_THREAD_NUM, 0, ptNetResourceGpu->main_stream>> >(pubyTmp, pfData_gpu, data_size); break; } case DATA_CPU_SLICE_CHAR: { data_size = shape_[0] * shape_[1] * shape_[2] * shape_[3]; gTmpBuffer_gpu(ptNetResourceGpu, data_size * sizeof(unsigned char)); unsigned char *pubyTmp = (unsigned char*)ptNetResourceGpu->pubyConvTmpBuffer; CUDA_ERROR(hipMemcpyAsync(pubyTmp, pDataIn, data_size * sizeof(unsigned char), hipMemcpyHostToDevice, ptNetResourceGpu->main_stream)); dim3 blocksize(CUDA_BLOCK(shape_[1], TRANS_BLOCK_DIM), shape_[0] * CUDA_BLOCK(shape_[3] * shape_[2], TRANS_BLOCK_DIM)); dim3 threadsize(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM); gMultiMatrixTrans_kernel<unsigned char> << <blocksize, threadsize, 0, ptNetResourceGpu->main_stream>> >(pubyTmp, (float *)pfData_gpu, shape_[1], shape_[3] * shape_[2], shape_[0], shape_[1], shape_[3] * shape_[2]); //unsigned char *pubyTmp = (unsigned char *)pNetResourceGpu->pubyConvTmpBuffer; //CUDA_ERROR(hipMemcpy(pubyTmp, pbyData_cpu, data_size* sizeof(unsigned char), hipMemcpyHostToDevice)); //dim3 blocksize(CUDA_BLOCK(n_slice, TRANS_BLOCK_DIM), n_num * CUDA_BLOCK(n_height * n_width, TRANS_BLOCK_DIM)); //dim3 threadsize(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM); //gMultiMatrixTrans_kernel << <blocksize, threadsize, 0, pNetResourceGpu->main_stream>> >(pubyTmp, pfData_gpu, n_slice, n_height * n_width, n_num, n_pitch, n_height * n_width); break; } case DATA_GPU: break; case DATA_OPENCL: break; default: break; } return CUDA_RETURN_VALUE; } int HolidayBlobGpu::Gpu_free() { if (pfData_gpu) hipFree(pfData_gpu); pfData_gpu = NULL; return CUDA_RETURN_VALUE; } int HolidayBlobGpu::Gpu_DataOut(void *pNetResourceGpu_in, int dwStorageType, float *out) { int n_pitch; int n_slice; int n_height; int n_width; int n_num; if (shape_.size() == 4) { n_pitch = shape_[1]; n_slice = shape_[1]; n_height = shape_[2]; n_width = shape_[3]; n_num = shape_[0]; } else if (shape_.size() == 2) { n_pitch = shape_[1]; n_slice = shape_[1]; n_height = 1; n_width = 1; n_num = shape_[0]; } else { } HolidayNetResourceGpu *pNetResourceGpu = (HolidayNetResourceGpu *)pNetResourceGpu_in; switch (dwStorageType) { case DATA_CPU_WIDTH: { CUDA_ERROR(hipMemcpyAsync(out, pfData_gpu, n_slice * n_height * n_width * n_num * sizeof(float), hipMemcpyDeviceToHost, pNetResourceGpu->main_stream)); hipStreamSynchronize(pNetResourceGpu->main_stream); break; } case DATA_CPU_SLICE: { float *pfTmp = (float *)pNetResourceGpu->pubyConvTmpBuffer; dim3 blocksize(CUDA_BLOCK(n_height * n_width, TRANS_BLOCK_DIM), n_num * CUDA_BLOCK(n_slice, TRANS_BLOCK_DIM)); dim3 threadsize(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM); gMultiMatrixTrans_kernel<float> << <blocksize, threadsize, 0, pNetResourceGpu->main_stream>> >(pfData_gpu, pfTmp, n_height * n_width, n_slice, n_num, n_height * n_width, n_pitch); CUDA_ERROR(hipMemcpyAsync(out, pfTmp, n_pitch * n_height * n_width * n_num * sizeof(float), hipMemcpyDeviceToHost, pNetResourceGpu->main_stream)); hipStreamSynchronize(pNetResourceGpu->main_stream); break; } case DATA_GPU: break; case DATA_OPENCL: break; default: break; } return CUDA_RETURN_VALUE; } #endif // HOLIDAY_GPU
66c00cd276edbeac4c66672b841af266cbf3573a.cu
#include "MacroHoliday.h" #ifdef HOLIDAY_GPU #include "HolidayBlobGpu.h" #include "HolidayCommonCuda.h" #include "HolidayFeatureMap.h" HolidayBlobGpu::~HolidayBlobGpu() { } HolidayBlobGpu::HolidayBlobGpu() { memory_size = 0; data_size = 0; pbyData_cpu = 0; pfData_cpu = 0; pfData_gpu = 0; } template<class T> __global__ static void gMultiMatrixTrans_kernel(T *pDataIn, float *pfDataOut, int dwWidth, int dwHeight, int dwNum, int dwPitchIn, int dwPitchOut) { __shared__ float pfTmp[TRANS_BLOCK_DIM][TRANS_BLOCK_DIM + 1]; //TRANS_BLOCK_DIM + 1 for bank conflict int dwIdxN = blockIdx.y / CUDA_BLOCK(dwHeight, TRANS_BLOCK_DIM); int dwBlockS = blockDim.x * blockIdx.x; int dwBlockRC = blockDim.y * (blockIdx.y % CUDA_BLOCK(dwHeight, TRANS_BLOCK_DIM)); int dwIdxX = dwBlockS + threadIdx.x; int dwIdxY = dwBlockRC + threadIdx.y; if (dwIdxX < dwWidth && dwIdxY < dwHeight) { pfTmp[threadIdx.y][threadIdx.x] = pDataIn[dwIdxN * dwHeight * dwPitchIn + dwIdxY * dwPitchIn + dwIdxX]; } __syncthreads(); dwIdxX = dwBlockS + threadIdx.y; dwIdxY = dwBlockRC + threadIdx.x; if (dwIdxX < dwWidth && dwIdxY < dwHeight) { pfDataOut[dwIdxN * dwWidth * dwPitchOut + dwIdxX * dwPitchOut + dwIdxY] = pfTmp[threadIdx.x][threadIdx.y]; } } __global__ static void gCharToFloat_kernel(unsigned char *pbyDataIn, float *pfDataOut, int dwSize) { int dwIdx = threadIdx.x + blockIdx.x * blockDim.x; if (dwIdx < dwSize) { pfDataOut[dwIdx] = pbyDataIn[dwIdx]; } } int HolidayBlobGpu::Gpu_init(void *pNetResourceGpu) { memory_size = 1; for (int i = 0; i < this->shape_.size(); i++) { memory_size *= shape_[i]; } HolidayNetResourceGpu *ptNetResourceGpu = (HolidayNetResourceGpu*)pNetResourceGpu; GlobleBufferMallocGpu(ptNetResourceGpu, memory_size* sizeof(float)); if (!pfData_gpu) { CUDA_ERROR(SafeCudaMalloc((void **)&pfData_gpu, memory_size * sizeof(float))); data_size = 0; } return CUDA_RETURN_VALUE; } int HolidayBlobGpu::Gpu_DataIn(void *pNetResourceGpu, int dwStorageType,void *pDataIn) { int n_pitch; int n_slice; int n_height; int n_width; int n_num; if (shape_.size()==4) { n_pitch = shape_[1]; n_slice = shape_[1]; n_height= shape_[2]; n_width = shape_[3]; n_num = shape_[0]; } else if (shape_.size()==2) { n_pitch = shape_[1]; n_slice = shape_[1]; n_height = 1; n_width = 1; n_num = shape_[0]; } else { } data_size = 1; for (int i = 0; i < this->shape_.size(); i++) { data_size *= shape_[i]; } HolidayNetResourceGpu *ptNetResourceGpu = (HolidayNetResourceGpu *)pNetResourceGpu; switch (dwStorageType) { case DATA_CPU_WIDTH: { CUDA_ERROR(cudaMemcpyAsync(pfData_gpu, pDataIn, data_size * sizeof(float), cudaMemcpyHostToDevice, ptNetResourceGpu->main_stream)); cudaStreamSynchronize(ptNetResourceGpu->main_stream); break; } case DATA_CPU_WIDTH_CHAR: { unsigned char *pubyTmp = (unsigned char *)ptNetResourceGpu->pubyConvTmpBuffer; CUDA_ERROR(cudaMemcpyAsync(pubyTmp, pbyData_cpu, data_size * sizeof(unsigned char), cudaMemcpyHostToDevice, ptNetResourceGpu->main_stream)); gCharToFloat_kernel << <CUDA_BLOCK(data_size, CUDA_THREAD_NUM), CUDA_THREAD_NUM, 0, ptNetResourceGpu->main_stream>> >(pubyTmp, pfData_gpu, data_size); break; } case DATA_CPU_SLICE_CHAR: { data_size = shape_[0] * shape_[1] * shape_[2] * shape_[3]; gTmpBuffer_gpu(ptNetResourceGpu, data_size * sizeof(unsigned char)); unsigned char *pubyTmp = (unsigned char*)ptNetResourceGpu->pubyConvTmpBuffer; CUDA_ERROR(cudaMemcpyAsync(pubyTmp, pDataIn, data_size * sizeof(unsigned char), cudaMemcpyHostToDevice, ptNetResourceGpu->main_stream)); dim3 blocksize(CUDA_BLOCK(shape_[1], TRANS_BLOCK_DIM), shape_[0] * CUDA_BLOCK(shape_[3] * shape_[2], TRANS_BLOCK_DIM)); dim3 threadsize(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM); gMultiMatrixTrans_kernel<unsigned char> << <blocksize, threadsize, 0, ptNetResourceGpu->main_stream>> >(pubyTmp, (float *)pfData_gpu, shape_[1], shape_[3] * shape_[2], shape_[0], shape_[1], shape_[3] * shape_[2]); //unsigned char *pubyTmp = (unsigned char *)pNetResourceGpu->pubyConvTmpBuffer; //CUDA_ERROR(cudaMemcpy(pubyTmp, pbyData_cpu, data_size* sizeof(unsigned char), cudaMemcpyHostToDevice)); //dim3 blocksize(CUDA_BLOCK(n_slice, TRANS_BLOCK_DIM), n_num * CUDA_BLOCK(n_height * n_width, TRANS_BLOCK_DIM)); //dim3 threadsize(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM); //gMultiMatrixTrans_kernel << <blocksize, threadsize, 0, pNetResourceGpu->main_stream>> >(pubyTmp, pfData_gpu, n_slice, n_height * n_width, n_num, n_pitch, n_height * n_width); break; } case DATA_GPU: break; case DATA_OPENCL: break; default: break; } return CUDA_RETURN_VALUE; } int HolidayBlobGpu::Gpu_free() { if (pfData_gpu) cudaFree(pfData_gpu); pfData_gpu = NULL; return CUDA_RETURN_VALUE; } int HolidayBlobGpu::Gpu_DataOut(void *pNetResourceGpu_in, int dwStorageType, float *out) { int n_pitch; int n_slice; int n_height; int n_width; int n_num; if (shape_.size() == 4) { n_pitch = shape_[1]; n_slice = shape_[1]; n_height = shape_[2]; n_width = shape_[3]; n_num = shape_[0]; } else if (shape_.size() == 2) { n_pitch = shape_[1]; n_slice = shape_[1]; n_height = 1; n_width = 1; n_num = shape_[0]; } else { } HolidayNetResourceGpu *pNetResourceGpu = (HolidayNetResourceGpu *)pNetResourceGpu_in; switch (dwStorageType) { case DATA_CPU_WIDTH: { CUDA_ERROR(cudaMemcpyAsync(out, pfData_gpu, n_slice * n_height * n_width * n_num * sizeof(float), cudaMemcpyDeviceToHost, pNetResourceGpu->main_stream)); cudaStreamSynchronize(pNetResourceGpu->main_stream); break; } case DATA_CPU_SLICE: { float *pfTmp = (float *)pNetResourceGpu->pubyConvTmpBuffer; dim3 blocksize(CUDA_BLOCK(n_height * n_width, TRANS_BLOCK_DIM), n_num * CUDA_BLOCK(n_slice, TRANS_BLOCK_DIM)); dim3 threadsize(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM); gMultiMatrixTrans_kernel<float> << <blocksize, threadsize, 0, pNetResourceGpu->main_stream>> >(pfData_gpu, pfTmp, n_height * n_width, n_slice, n_num, n_height * n_width, n_pitch); CUDA_ERROR(cudaMemcpyAsync(out, pfTmp, n_pitch * n_height * n_width * n_num * sizeof(float), cudaMemcpyDeviceToHost, pNetResourceGpu->main_stream)); cudaStreamSynchronize(pNetResourceGpu->main_stream); break; } case DATA_GPU: break; case DATA_OPENCL: break; default: break; } return CUDA_RETURN_VALUE; } #endif // HOLIDAY_GPU
dcf641427c3fd4905ac9c63d96b4f768bc9c30ce.hip
// !!! This is a file automatically generated by hipify!!! /* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include <assert.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include "../error/error.h" #include <hip/hip_runtime.h> __global__ static void spt_MatrixDotMulSeqKernel( sptIndex const mode, sptIndex const nmodes, sptIndex const rank, sptIndex const stride, sptValue ** dev_ata) { const sptIndex tidx = (sptIndex)threadIdx.x; const sptIndex tidy = (sptIndex)threadIdx.y; sptValue * ovals = dev_ata[nmodes]; ovals[tidx * stride + tidy] = 1; __syncthreads(); for(sptIndex m=1; m < nmodes; ++m) { sptIndex const pm = (mode + m) % nmodes; sptValue const * vals = dev_ata[pm]; ovals[tidx * stride + tidy] *= vals[tidx * stride + tidy]; } __syncthreads(); } int sptCudaMatrixDotMulSeq( sptIndex const mode, sptIndex const nmodes, sptIndex const rank, sptIndex const stride, sptValue ** dev_ata) { dim3 nthreads(rank, rank); // rank <= 16 dim3 nblocks(1, 1); hipLaunchKernelGGL(( spt_MatrixDotMulSeqKernel), dim3(nblocks), dim3(nthreads), 0, 0, mode, nmodes, rank, stride, dev_ata); int result = hipDeviceSynchronize(); spt_CheckCudaError(result != 0, "CUDA Matrix sptCudaMatrixDotMulSeq"); return 0; } __global__ static void spt_Matrix2NormKernel( sptIndex const nrows, sptIndex const ncols, sptIndex const stride, sptValue * const dev_vals, sptValue * const dev_lambda) { const sptIndex tidx = (sptIndex)threadIdx.x; const sptIndex tidy = (sptIndex)threadIdx.y; const sptIndex bidx = (sptIndex)blockIdx.x; const sptIndex i = bidx * blockDim.x + tidx; if(i < nrows) atomicAdd(&(dev_lambda[tidy]), dev_vals[i*stride + tidy] * dev_vals[i*stride + tidy]); __syncthreads(); dev_lambda[tidy] = sqrt(dev_lambda[tidy]); __syncthreads(); if(i < nrows) dev_vals[i*stride + tidy] /= dev_lambda[tidy]; __syncthreads(); } int sptCudaMatrix2Norm( sptIndex const nrows, sptIndex const ncols, sptIndex const stride, sptValue * const dev_vals, sptValue * const dev_lambda) { dim3 nthreads(16, ncols); // ncols <= 16 dim3 nblocks((nrows + 16 -1) / 16); hipLaunchKernelGGL(( spt_Matrix2NormKernel), dim3(nblocks), dim3(nthreads), 0, 0, nrows, ncols, stride, dev_vals, dev_lambda); int result = hipDeviceSynchronize(); spt_CheckCudaError(result != 0, "CUDA Matrix sptCudaMatrix2Norm"); return 0; }
dcf641427c3fd4905ac9c63d96b4f768bc9c30ce.cu
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include <assert.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include "../error/error.h" #include <cuda_runtime.h> __global__ static void spt_MatrixDotMulSeqKernel( sptIndex const mode, sptIndex const nmodes, sptIndex const rank, sptIndex const stride, sptValue ** dev_ata) { const sptIndex tidx = (sptIndex)threadIdx.x; const sptIndex tidy = (sptIndex)threadIdx.y; sptValue * ovals = dev_ata[nmodes]; ovals[tidx * stride + tidy] = 1; __syncthreads(); for(sptIndex m=1; m < nmodes; ++m) { sptIndex const pm = (mode + m) % nmodes; sptValue const * vals = dev_ata[pm]; ovals[tidx * stride + tidy] *= vals[tidx * stride + tidy]; } __syncthreads(); } int sptCudaMatrixDotMulSeq( sptIndex const mode, sptIndex const nmodes, sptIndex const rank, sptIndex const stride, sptValue ** dev_ata) { dim3 nthreads(rank, rank); // rank <= 16 dim3 nblocks(1, 1); spt_MatrixDotMulSeqKernel<<<nblocks, nthreads>>> (mode, nmodes, rank, stride, dev_ata); int result = cudaThreadSynchronize(); spt_CheckCudaError(result != 0, "CUDA Matrix sptCudaMatrixDotMulSeq"); return 0; } __global__ static void spt_Matrix2NormKernel( sptIndex const nrows, sptIndex const ncols, sptIndex const stride, sptValue * const dev_vals, sptValue * const dev_lambda) { const sptIndex tidx = (sptIndex)threadIdx.x; const sptIndex tidy = (sptIndex)threadIdx.y; const sptIndex bidx = (sptIndex)blockIdx.x; const sptIndex i = bidx * blockDim.x + tidx; if(i < nrows) atomicAdd(&(dev_lambda[tidy]), dev_vals[i*stride + tidy] * dev_vals[i*stride + tidy]); __syncthreads(); dev_lambda[tidy] = sqrt(dev_lambda[tidy]); __syncthreads(); if(i < nrows) dev_vals[i*stride + tidy] /= dev_lambda[tidy]; __syncthreads(); } int sptCudaMatrix2Norm( sptIndex const nrows, sptIndex const ncols, sptIndex const stride, sptValue * const dev_vals, sptValue * const dev_lambda) { dim3 nthreads(16, ncols); // ncols <= 16 dim3 nblocks((nrows + 16 -1) / 16); spt_Matrix2NormKernel<<<nblocks, nthreads>>>(nrows, ncols, stride, dev_vals, dev_lambda); int result = cudaThreadSynchronize(); spt_CheckCudaError(result != 0, "CUDA Matrix sptCudaMatrix2Norm"); return 0; }
75cc7d2ca47a11ee58b5b51895f5d502f68be9d7.hip
// !!! This is a file automatically generated by hipify!!! #include "config/config.hpp" #if __HIPCC__ >= 200 # define TGB_MAX_CUDA_CONSTANT_SIZE 65536 #else # define TGB_MAX_CUDA_CONSTANT_SIZE 65536 #endif namespace test_gaussian_blur { // constant memory use to cache kernel data // define in *.cu file to avoid duplicate definition __constant__ unsigned char c_const[TGB_MAX_CUDA_CONSTANT_SIZE]; }
75cc7d2ca47a11ee58b5b51895f5d502f68be9d7.cu
#include "config/config.hpp" #if __CUDACC__ >= 200 # define TGB_MAX_CUDA_CONSTANT_SIZE 65536 #else # define TGB_MAX_CUDA_CONSTANT_SIZE 65536 #endif namespace test_gaussian_blur { // constant memory use to cache kernel data // define in *.cu file to avoid duplicate definition __constant__ unsigned char c_const[TGB_MAX_CUDA_CONSTANT_SIZE]; }
2094aa938e383eea92ea70b573d5aefc13fd3eb3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/integral_image_op.h" namespace caffe2 { namespace { __global__ void RowPassKernel( int count, int rows_out, int cols_out, int chans, const float* in, float* out) { CUDA_1D_KERNEL_LOOP(i, count) { // Figure out which row, channel, and batch element we're processing int row = i % rows_out; int chan = (i / rows_out) % chans; int ind = i / rows_out / chans; // Input is (H, W) and output is (H + 1, W + 1) int rows_in = rows_out - 1; int cols_in = cols_out - 1; // Row pointer to input data // Input data is shift (-1, -1) relative to output data, hence row - 1 const float* row_in_data = in + cols_in * ((row - 1) + rows_in * (chan + ind * chans)); // Row pointer to output data float* row_out_data = out + cols_out * (row + rows_out * (chan + ind * chans)); // The first row and first column of the output is all zeros row_out_data[0] = 0.; if (row == 0) { for (int i = 1; i < cols_out; ++i) { row_out_data[i] = 0.; } } else { for (int i = 1; i < cols_out; ++i) { // Recall that input data is shift (-1, -1) relative to the output, // hence i - 1 row_out_data[i] = row_out_data[i - 1] + row_in_data[i - 1]; } } } } __global__ void RowPassGradientKernel( int count, int rows_out, int cols_out, int chans, const float* in, float* out) { CUDA_1D_KERNEL_LOOP(i, count) { // Figure out which row, channel, and batch element we're processing int row = i % rows_out; int chan = (i / rows_out) % chans; int ind = i / rows_out / chans; // Input in (H + 1, W + 1) and output is (H + 1, W) int rows_in = rows_out; int cols_in = cols_out + 1; // Col pointer to input data const float* row_in_data = in + cols_in * (row + rows_in * (chan + ind * chans)); // Col pointer to output data float* row_out_data = out + cols_out * (row + rows_out * (chan + ind * chans)); row_out_data[0] = row_in_data[0]; for (int i = 1; i < cols_out; ++i) { row_out_data[i] = row_out_data[i - 1] + row_in_data[i]; } } } __global__ void ColPassKernel(int count, int rows_out, int cols_out, int chans, float* out) { CUDA_1D_KERNEL_LOOP(i, count) { // Figure out which col, channel, and batch element we're processing int col = i % cols_out; int chan = (i / cols_out) % chans; int ind = i / cols_out / chans; float* col_out_data = out + col + cols_out * rows_out * (chan + ind * chans); for (int i = 1; i < rows_out; ++i) { col_out_data[i * cols_out] += col_out_data[(i - 1) * cols_out]; } } } __global__ void ColPassGradientKernel( int count, int rows_out, int cols_out, int chans, const float* in, float* out) { CUDA_1D_KERNEL_LOOP(i, count) { // Figure out which col, channel, and batch element we're processing int col = i % cols_out; int chan = (i / cols_out) % chans; int ind = i / cols_out / chans; // Input is (H + 1, W) and output is (H, W) int rows_in = rows_out + 1; int cols_in = cols_out; // Col pointer to input data const float* col_in_data = in + col + cols_in * rows_in * (chan + ind * chans); // Col pointer to output data float* col_out_data = out + col + cols_out * rows_out * (chan + ind * chans); col_out_data[0] = col_in_data[0]; for (int i = 1; i < rows_out; ++i) { col_out_data[i * cols_out] = col_out_data[(i - 1) * cols_out] + col_in_data[i * cols_in]; } } } } // namespace template <> bool IntegralImageOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); CAFFE_ENFORCE(X.ndim() == 4, "Only supports 4D tensors for the momement"); // Input is (N, C, H, W) // Output is (N, C, H + 1, W + 1) vector<TIndex> out_shape(X.dims()); out_shape[2] += 1; // H + 1 output size out_shape[3] += 1; // W + 1 output size Y->Resize(out_shape); const int chans = X.dim32(1); const int rows_out = Y->dim32(2); const int cols_out = Y->dim32(3); // Integral image over rows of input X const int row_pass_size = X.dim32(0) * chans * rows_out; hipLaunchKernelGGL(( RowPassKernel), dim3(CAFFE_GET_BLOCKS(row_pass_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), row_pass_size, rows_out, cols_out, chans, X.data<float>(), Y->template mutable_data<float>()); // Integral image over columns of the integral image over rows const int col_pass_size = X.dim32(0) * chans * cols_out; hipLaunchKernelGGL(( ColPassKernel), dim3(CAFFE_GET_BLOCKS(col_pass_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), col_pass_size, rows_out, cols_out, chans, Y->template mutable_data<float>()); return true; } template <> bool IntegralImageGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Original input to "forward" op auto& dY = Input(1); // Gradient of net w.r.t. output of "forward" op // (aka "gradOutput") auto* dX = Output(0); // Gradient of net w.r.t. input to // "forward" op (aka "gradInput") dX->ResizeLike(X); // Row pass reduces shape of dY from (N, C, H + 1, W + 1) // to (N, C, H + 1, W) // Col pass reduces shape to (N, C, H, W) vector<TIndex> row_pass_shape(dY.dims()); row_pass_shape[3] -= 1; row_pass_buffer_.Resize(row_pass_shape); const int chans = row_pass_buffer_.dim32(1); const int rows_out = row_pass_buffer_.dim32(2); const int cols_out = row_pass_buffer_.dim32(3); // Integral image over rows of input X const int row_pass_size = X.dim32(0) * chans * rows_out; hipLaunchKernelGGL(( RowPassGradientKernel), dim3(CAFFE_GET_BLOCKS(row_pass_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), row_pass_size, rows_out, cols_out, chans, dY.data<float>(), row_pass_buffer_.mutable_data<float>()); // Integral image over columns of the integral image over rows const int col_pass_size = X.dim32(0) * chans * cols_out; hipLaunchKernelGGL(( ColPassGradientKernel), dim3(CAFFE_GET_BLOCKS(col_pass_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), col_pass_size, rows_out - 1, cols_out, chans, row_pass_buffer_.data<float>(), dX->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(IntegralImage, IntegralImageOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( IntegralImageGradient, IntegralImageGradientOp<float, CUDAContext>); } // namespace caffe2
2094aa938e383eea92ea70b573d5aefc13fd3eb3.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/integral_image_op.h" namespace caffe2 { namespace { __global__ void RowPassKernel( int count, int rows_out, int cols_out, int chans, const float* in, float* out) { CUDA_1D_KERNEL_LOOP(i, count) { // Figure out which row, channel, and batch element we're processing int row = i % rows_out; int chan = (i / rows_out) % chans; int ind = i / rows_out / chans; // Input is (H, W) and output is (H + 1, W + 1) int rows_in = rows_out - 1; int cols_in = cols_out - 1; // Row pointer to input data // Input data is shift (-1, -1) relative to output data, hence row - 1 const float* row_in_data = in + cols_in * ((row - 1) + rows_in * (chan + ind * chans)); // Row pointer to output data float* row_out_data = out + cols_out * (row + rows_out * (chan + ind * chans)); // The first row and first column of the output is all zeros row_out_data[0] = 0.; if (row == 0) { for (int i = 1; i < cols_out; ++i) { row_out_data[i] = 0.; } } else { for (int i = 1; i < cols_out; ++i) { // Recall that input data is shift (-1, -1) relative to the output, // hence i - 1 row_out_data[i] = row_out_data[i - 1] + row_in_data[i - 1]; } } } } __global__ void RowPassGradientKernel( int count, int rows_out, int cols_out, int chans, const float* in, float* out) { CUDA_1D_KERNEL_LOOP(i, count) { // Figure out which row, channel, and batch element we're processing int row = i % rows_out; int chan = (i / rows_out) % chans; int ind = i / rows_out / chans; // Input in (H + 1, W + 1) and output is (H + 1, W) int rows_in = rows_out; int cols_in = cols_out + 1; // Col pointer to input data const float* row_in_data = in + cols_in * (row + rows_in * (chan + ind * chans)); // Col pointer to output data float* row_out_data = out + cols_out * (row + rows_out * (chan + ind * chans)); row_out_data[0] = row_in_data[0]; for (int i = 1; i < cols_out; ++i) { row_out_data[i] = row_out_data[i - 1] + row_in_data[i]; } } } __global__ void ColPassKernel(int count, int rows_out, int cols_out, int chans, float* out) { CUDA_1D_KERNEL_LOOP(i, count) { // Figure out which col, channel, and batch element we're processing int col = i % cols_out; int chan = (i / cols_out) % chans; int ind = i / cols_out / chans; float* col_out_data = out + col + cols_out * rows_out * (chan + ind * chans); for (int i = 1; i < rows_out; ++i) { col_out_data[i * cols_out] += col_out_data[(i - 1) * cols_out]; } } } __global__ void ColPassGradientKernel( int count, int rows_out, int cols_out, int chans, const float* in, float* out) { CUDA_1D_KERNEL_LOOP(i, count) { // Figure out which col, channel, and batch element we're processing int col = i % cols_out; int chan = (i / cols_out) % chans; int ind = i / cols_out / chans; // Input is (H + 1, W) and output is (H, W) int rows_in = rows_out + 1; int cols_in = cols_out; // Col pointer to input data const float* col_in_data = in + col + cols_in * rows_in * (chan + ind * chans); // Col pointer to output data float* col_out_data = out + col + cols_out * rows_out * (chan + ind * chans); col_out_data[0] = col_in_data[0]; for (int i = 1; i < rows_out; ++i) { col_out_data[i * cols_out] = col_out_data[(i - 1) * cols_out] + col_in_data[i * cols_in]; } } } } // namespace template <> bool IntegralImageOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); CAFFE_ENFORCE(X.ndim() == 4, "Only supports 4D tensors for the momement"); // Input is (N, C, H, W) // Output is (N, C, H + 1, W + 1) vector<TIndex> out_shape(X.dims()); out_shape[2] += 1; // H + 1 output size out_shape[3] += 1; // W + 1 output size Y->Resize(out_shape); const int chans = X.dim32(1); const int rows_out = Y->dim32(2); const int cols_out = Y->dim32(3); // Integral image over rows of input X const int row_pass_size = X.dim32(0) * chans * rows_out; RowPassKernel<<< CAFFE_GET_BLOCKS(row_pass_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( row_pass_size, rows_out, cols_out, chans, X.data<float>(), Y->template mutable_data<float>()); // Integral image over columns of the integral image over rows const int col_pass_size = X.dim32(0) * chans * cols_out; ColPassKernel<<< CAFFE_GET_BLOCKS(col_pass_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( col_pass_size, rows_out, cols_out, chans, Y->template mutable_data<float>()); return true; } template <> bool IntegralImageGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Original input to "forward" op auto& dY = Input(1); // Gradient of net w.r.t. output of "forward" op // (aka "gradOutput") auto* dX = Output(0); // Gradient of net w.r.t. input to // "forward" op (aka "gradInput") dX->ResizeLike(X); // Row pass reduces shape of dY from (N, C, H + 1, W + 1) // to (N, C, H + 1, W) // Col pass reduces shape to (N, C, H, W) vector<TIndex> row_pass_shape(dY.dims()); row_pass_shape[3] -= 1; row_pass_buffer_.Resize(row_pass_shape); const int chans = row_pass_buffer_.dim32(1); const int rows_out = row_pass_buffer_.dim32(2); const int cols_out = row_pass_buffer_.dim32(3); // Integral image over rows of input X const int row_pass_size = X.dim32(0) * chans * rows_out; RowPassGradientKernel<<< CAFFE_GET_BLOCKS(row_pass_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( row_pass_size, rows_out, cols_out, chans, dY.data<float>(), row_pass_buffer_.mutable_data<float>()); // Integral image over columns of the integral image over rows const int col_pass_size = X.dim32(0) * chans * cols_out; ColPassGradientKernel<<< CAFFE_GET_BLOCKS(col_pass_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( col_pass_size, rows_out - 1, cols_out, chans, row_pass_buffer_.data<float>(), dX->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(IntegralImage, IntegralImageOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( IntegralImageGradient, IntegralImageGradientOp<float, CUDAContext>); } // namespace caffe2
252c462a6ff59ec655df37cd2091965dd5ac16c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <iomanip> #include <assert.h> #include "rocblas.h" #define prec_save 10 using namespace std; #include "cublasWrappers.cuh" #define BLOCKSIZEMEMSET 256 #define BLOCKSIZEMULTIPLY 256 /*******************/ /* iDivUp FUNCTION */ /*******************/ int iDivUp(int a, int d_f) { return ((a % d_f) != 0) ? (a / d_f + 1) : (a / d_f); } /********************/ /* CUDA ERROR CHECK */ /********************/ // --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) { exit(code); } } } void gpuErrchk(hipError_t ans) { gpuAssert((ans), __FILE__, __LINE__); } /*************************/ /* CUBLAS ERROR CHECKING */ /*************************/ static const char *_cublasGetErrorEnum(hipblasStatus_t error) { switch (error) { case HIPBLAS_STATUS_SUCCESS: return "HIPBLAS_STATUS_SUCCESS"; case HIPBLAS_STATUS_NOT_INITIALIZED: return "HIPBLAS_STATUS_NOT_INITIALIZED"; case HIPBLAS_STATUS_ALLOC_FAILED: return "HIPBLAS_STATUS_ALLOC_FAILED"; case HIPBLAS_STATUS_INVALID_VALUE: return "HIPBLAS_STATUS_INVALID_VALUE"; case HIPBLAS_STATUS_ARCH_MISMATCH: return "HIPBLAS_STATUS_ARCH_MISMATCH"; case HIPBLAS_STATUS_MAPPING_ERROR: return "HIPBLAS_STATUS_MAPPING_ERROR"; case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED"; case HIPBLAS_STATUS_INTERNAL_ERROR: return "HIPBLAS_STATUS_INTERNAL_ERROR"; case HIPBLAS_STATUS_NOT_SUPPORTED: return "HIPBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; } return "<unknown>"; } inline void __cublasSafeCall(hipblasStatus_t err, const char *file, const int line) { if (HIPBLAS_STATUS_SUCCESS != err) { fprintf(stderr, "CUBLAS error in file '%s', line %d, error: %s\nterminating!\n", __FILE__, __LINE__, \ _cublasGetErrorEnum(err)); \ assert(0); \ } } extern "C" void cublasSafeCall(hipblasStatus_t err) { __cublasSafeCall(err, __FILE__, __LINE__); } /*****************/ /* DEVICE MEMSET */ /*****************/ template<class T> __global__ void deviceMemsetKernel(T * const devPtr, T const value, size_t const N) { size_t const tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid >= N) return; devPtr[tid] = value; } template<class T> void deviceMemset(T * const devPtr, T const value, size_t const N) { deviceMemsetKernel<T> << <iDivUp(N, BLOCKSIZEMEMSET), BLOCKSIZEMEMSET >> >(devPtr, value, N); } /****************************/ /* Ax DEVICE MULTIPLICATION */ /****************************/ template<class T> __global__ void AxKernel(int const M, T const * const d_x, T * const d_b) { int M2 = M * M; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= M2) return; T value = (T(4)) * d_x[tid]; if ( tid % M != 0) value -= d_x[tid - 1]; if ((tid + 1) % M != 0) value -= d_x[tid + 1]; if ( tid + M < M2) value -= d_x[tid + M]; if (tid - M >= 0) value -= d_x[tid - M]; d_b[tid] = value; } /*****************************/ /* CONJUGATE GRADIENT SOLVER */ /*****************************/ template<class T> int conjugateGradientPoisson(hipblasHandle_t const cublasHandle, int const M, T const * const d_b, T * const d_x, int maxIter, T tol) { T *d_p; gpuErrchk(hipMalloc(&d_p, M * M * sizeof(T))); T *d_r; gpuErrchk(hipMalloc(&d_r, M * M * sizeof(T))); T *d_h; gpuErrchk(hipMalloc(&d_h, M * M * sizeof(T))); T *d_Ax; gpuErrchk(hipMalloc(&d_Ax, M * M * sizeof(T))); T *d_q; gpuErrchk(hipMalloc(&d_q, M * M * sizeof(T))); T beta, c, ph; T const T_ONE(1); T const T_MINUS_ONE(-1); // --- Solution initialization d_x = 0 deviceMemset<T>(d_x, T(0), M * M); // --- d_Ax = A * d_x AxKernel<T> << <iDivUp(M * M, 1024), 1024 >> >(M, d_x, d_Ax); // --- d_r = d_b cublasSafeCall(cublasTcopy(cublasHandle, M * M, d_b, 1, d_r, 1)); // --- d_r = d_r - d_Ax = d_b - d_Ax cublasSafeCall(cublasTaxpy(cublasHandle, M * M, &T_MINUS_ONE, d_Ax, 1, d_r, 1)); // --- norm0 = ||d_r|| T norm0; cublasSafeCall(cublasTnrm2(cublasHandle, M * M, d_r, 1, &norm0)); // --- d_p = d_r cublasSafeCall(cublasTcopy(cublasHandle, M * M, d_r, 1, d_p, 1)); int numIter; for (numIter = 1; numIter <= maxIter; ++numIter) { // --- beta = <d_r, d_r> cublasSafeCall(cublasTdot(cublasHandle, M * M, d_r, 1, d_r, 1, &beta)); // --- d_h = Ap AxKernel<T> << <iDivUp(M * M, BLOCKSIZEMULTIPLY), BLOCKSIZEMULTIPLY >> >(M, d_p, d_h); // --- ph = <d_p, d_h> cublasSafeCall(cublasTdot(cublasHandle, M * M, d_p, 1, d_h, 1, &ph)); c = beta / ph; // --- d_x = d_x + c * d_p cublasSafeCall(cublasTaxpy(cublasHandle, M * M, &c, d_p, 1, d_x, 1)); // --- d_r = d_r - c * d_h T minus_c = -c; cublasSafeCall(cublasTaxpy(cublasHandle, M * M, &minus_c, d_h, 1, d_r, 1)); T norm; cublasSafeCall(cublasTnrm2(cublasHandle, M * M, d_r, 1, &norm)); if (norm <= tol * norm0) break; // --- rr = <d_r, d_r> T rr; cublasSafeCall(cublasTdot(cublasHandle, M * M, d_r, 1, d_r, 1, &rr)); beta = rr / beta; // --- d_p = beta * d_p cublasSafeCall(cublasTscal(cublasHandle, M * M, &beta, d_p, 1)); cublasSafeCall(cublasTaxpy(cublasHandle, M * M, &T_ONE, d_r, 1, d_p, 1)); } gpuErrchk(hipFree(d_p)); gpuErrchk(hipFree(d_r)); gpuErrchk(hipFree(d_h)); gpuErrchk(hipFree(d_Ax)); gpuErrchk(hipFree(d_q)); return numIter; } /*************************************/ /* SAVE FLOAT ARRAY FROM GPU TO FILE */ /*************************************/ template <class T> void saveGPUrealtxt(const T * d_in, const char *filename, const int M) { T *h_in = (T *)malloc(M * sizeof(T)); gpuErrchk(hipMemcpy(h_in, d_in, M * sizeof(T), hipMemcpyDeviceToHost)); std::ofstream outfile; outfile.open(filename); for (int i = 0; i < M; i++) outfile << std::setprecision(prec_save) << h_in[i] << "\n"; outfile.close(); } /********/ /* MAIN */ /********/ int main() { hipblasHandle_t cublasHandle; hipblasCreate(&cublasHandle); const int M = 128; // --- Number of discretization points along d_u and y const int maxIter = 10000; // --- Maximum number of iterations const double tol = 0.0000001; // --- Conjugate gradient convergence tol // --- Equation right-hand side double *d_f; gpuErrchk(hipMalloc(&d_f, (M * M) * sizeof(double))); deviceMemset<double>(d_f, (double)1.0, M * M); // --- Equation unknown double *d_u; gpuErrchk(hipMalloc(&d_u, (M *M) * sizeof(double))); int numIter = conjugateGradientPoisson<double>(cublasHandle, M, d_f, d_u, maxIter, tol); cout << "Number of performed iterations performed " << numIter << endl; saveGPUrealtxt(d_u, ".\\d_result_x.txt", M * M); saveGPUrealtxt(d_f, ".\\d_result_b.txt", M * M); }
252c462a6ff59ec655df37cd2091965dd5ac16c3.cu
#include <iostream> #include <fstream> #include <iomanip> #include <assert.h> #include "cublas.h" #define prec_save 10 using namespace std; #include "cublasWrappers.cuh" #define BLOCKSIZEMEMSET 256 #define BLOCKSIZEMULTIPLY 256 /*******************/ /* iDivUp FUNCTION */ /*******************/ int iDivUp(int a, int d_f) { return ((a % d_f) != 0) ? (a / d_f + 1) : (a / d_f); } /********************/ /* CUDA ERROR CHECK */ /********************/ // --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } void gpuErrchk(cudaError_t ans) { gpuAssert((ans), __FILE__, __LINE__); } /*************************/ /* CUBLAS ERROR CHECKING */ /*************************/ static const char *_cublasGetErrorEnum(cublasStatus_t error) { switch (error) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; } return "<unknown>"; } inline void __cublasSafeCall(cublasStatus_t err, const char *file, const int line) { if (CUBLAS_STATUS_SUCCESS != err) { fprintf(stderr, "CUBLAS error in file '%s', line %d, error: %s\nterminating!\n", __FILE__, __LINE__, \ _cublasGetErrorEnum(err)); \ assert(0); \ } } extern "C" void cublasSafeCall(cublasStatus_t err) { __cublasSafeCall(err, __FILE__, __LINE__); } /*****************/ /* DEVICE MEMSET */ /*****************/ template<class T> __global__ void deviceMemsetKernel(T * const devPtr, T const value, size_t const N) { size_t const tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid >= N) return; devPtr[tid] = value; } template<class T> void deviceMemset(T * const devPtr, T const value, size_t const N) { deviceMemsetKernel<T> << <iDivUp(N, BLOCKSIZEMEMSET), BLOCKSIZEMEMSET >> >(devPtr, value, N); } /****************************/ /* Ax DEVICE MULTIPLICATION */ /****************************/ template<class T> __global__ void AxKernel(int const M, T const * const d_x, T * const d_b) { int M2 = M * M; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= M2) return; T value = (T(4)) * d_x[tid]; if ( tid % M != 0) value -= d_x[tid - 1]; if ((tid + 1) % M != 0) value -= d_x[tid + 1]; if ( tid + M < M2) value -= d_x[tid + M]; if (tid - M >= 0) value -= d_x[tid - M]; d_b[tid] = value; } /*****************************/ /* CONJUGATE GRADIENT SOLVER */ /*****************************/ template<class T> int conjugateGradientPoisson(cublasHandle_t const cublasHandle, int const M, T const * const d_b, T * const d_x, int maxIter, T tol) { T *d_p; gpuErrchk(cudaMalloc(&d_p, M * M * sizeof(T))); T *d_r; gpuErrchk(cudaMalloc(&d_r, M * M * sizeof(T))); T *d_h; gpuErrchk(cudaMalloc(&d_h, M * M * sizeof(T))); T *d_Ax; gpuErrchk(cudaMalloc(&d_Ax, M * M * sizeof(T))); T *d_q; gpuErrchk(cudaMalloc(&d_q, M * M * sizeof(T))); T beta, c, ph; T const T_ONE(1); T const T_MINUS_ONE(-1); // --- Solution initialization d_x = 0 deviceMemset<T>(d_x, T(0), M * M); // --- d_Ax = A * d_x AxKernel<T> << <iDivUp(M * M, 1024), 1024 >> >(M, d_x, d_Ax); // --- d_r = d_b cublasSafeCall(cublasTcopy(cublasHandle, M * M, d_b, 1, d_r, 1)); // --- d_r = d_r - d_Ax = d_b - d_Ax cublasSafeCall(cublasTaxpy(cublasHandle, M * M, &T_MINUS_ONE, d_Ax, 1, d_r, 1)); // --- norm0 = ||d_r|| T norm0; cublasSafeCall(cublasTnrm2(cublasHandle, M * M, d_r, 1, &norm0)); // --- d_p = d_r cublasSafeCall(cublasTcopy(cublasHandle, M * M, d_r, 1, d_p, 1)); int numIter; for (numIter = 1; numIter <= maxIter; ++numIter) { // --- beta = <d_r, d_r> cublasSafeCall(cublasTdot(cublasHandle, M * M, d_r, 1, d_r, 1, &beta)); // --- d_h = Ap AxKernel<T> << <iDivUp(M * M, BLOCKSIZEMULTIPLY), BLOCKSIZEMULTIPLY >> >(M, d_p, d_h); // --- ph = <d_p, d_h> cublasSafeCall(cublasTdot(cublasHandle, M * M, d_p, 1, d_h, 1, &ph)); c = beta / ph; // --- d_x = d_x + c * d_p cublasSafeCall(cublasTaxpy(cublasHandle, M * M, &c, d_p, 1, d_x, 1)); // --- d_r = d_r - c * d_h T minus_c = -c; cublasSafeCall(cublasTaxpy(cublasHandle, M * M, &minus_c, d_h, 1, d_r, 1)); T norm; cublasSafeCall(cublasTnrm2(cublasHandle, M * M, d_r, 1, &norm)); if (norm <= tol * norm0) break; // --- rr = <d_r, d_r> T rr; cublasSafeCall(cublasTdot(cublasHandle, M * M, d_r, 1, d_r, 1, &rr)); beta = rr / beta; // --- d_p = beta * d_p cublasSafeCall(cublasTscal(cublasHandle, M * M, &beta, d_p, 1)); cublasSafeCall(cublasTaxpy(cublasHandle, M * M, &T_ONE, d_r, 1, d_p, 1)); } gpuErrchk(cudaFree(d_p)); gpuErrchk(cudaFree(d_r)); gpuErrchk(cudaFree(d_h)); gpuErrchk(cudaFree(d_Ax)); gpuErrchk(cudaFree(d_q)); return numIter; } /*************************************/ /* SAVE FLOAT ARRAY FROM GPU TO FILE */ /*************************************/ template <class T> void saveGPUrealtxt(const T * d_in, const char *filename, const int M) { T *h_in = (T *)malloc(M * sizeof(T)); gpuErrchk(cudaMemcpy(h_in, d_in, M * sizeof(T), cudaMemcpyDeviceToHost)); std::ofstream outfile; outfile.open(filename); for (int i = 0; i < M; i++) outfile << std::setprecision(prec_save) << h_in[i] << "\n"; outfile.close(); } /********/ /* MAIN */ /********/ int main() { cublasHandle_t cublasHandle; cublasCreate(&cublasHandle); const int M = 128; // --- Number of discretization points along d_u and y const int maxIter = 10000; // --- Maximum number of iterations const double tol = 0.0000001; // --- Conjugate gradient convergence tol // --- Equation right-hand side double *d_f; gpuErrchk(cudaMalloc(&d_f, (M * M) * sizeof(double))); deviceMemset<double>(d_f, (double)1.0, M * M); // --- Equation unknown double *d_u; gpuErrchk(cudaMalloc(&d_u, (M *M) * sizeof(double))); int numIter = conjugateGradientPoisson<double>(cublasHandle, M, d_f, d_u, maxIter, tol); cout << "Number of performed iterations performed " << numIter << endl; saveGPUrealtxt(d_u, ".\\d_result_x.txt", M * M); saveGPUrealtxt(d_f, ".\\d_result_b.txt", M * M); }
3c7b1dfa14c88185da45bdf706005e542ea3e575.hip
// !!! This is a file automatically generated by hipify!!! /*--------------------------------------------------------------------------*/ // A GPU-accelerated Adaptive Non-Local Means Filter for Denoising 3D Monte // Carlo Photon Transport Simulations // filterGPU_s.cu is the version of the single ANLM filter (B+Opt2+Opt3) /*--------------------------------------------------------------------------*/ // Yaoshen Yuan - yuan.yaos at husky.neu.edu // Qianqian Fang - q.fang at neu.edu // Computational Optics & Translational Imaging Lab // Northeastern University // Publication: // Yaoshen Yuan, Leiming Yu, Zafer Dogan, and Qianqian Fang, "Graphics processing // units-accelerated adaptive nonlocal means filter for denoising three-dimensional // Monte Carlo photon transport simulations," J. of Biomedical Optics, 23(12), 121618 (2018). // https://doi.org/10.1117/1.JBO.23.12.121618 // Copyright (C) 2018 Yaoshen Yuan, Qianqian Fang #include <stdio.h> #include <math.h> /* floor */ #include "filterGPU.h" #include <time.h> #include <hip/hip_runtime.h> #define CUDA_ASSERT(a) cuda_assess((a),__FILE__,__LINE__) void cuda_assess(hipError_t cuerr,const char *file, const int linenum); __constant__ FilterParam gcfg[1]; texture<float,hipTextureType3D,hipReadModeElementType> ima_tex; texture<float,hipTextureType3D,hipReadModeElementType> means_tex; texture<float,hipTextureType3D,hipReadModeElementType> variances_tex; texture<float,hipTextureType3D,hipReadModeElementType> R_tex; __device__ inline static float * distance(float *d, int x,int y,int z,int sx,int sy,int sz, float *ima_space) { // d=distance(ima,i,j,k,ni,nj,nk,f,cols,rows,slices); /* ima: the unfiltered image. medias: the image filtered by 3x3 box filter. x, y, z: the location of the "center of the local patch" in shared memory. nx, ny, nz: the location of the "center of the non-local patch" in the full-image. sx, sy, sz: the location of the "center of the non-local patch" in shared memory f: patch size. gcfg->dimx, gcfg->dimy, gcfg->dimz: the size of the image. */ float dt,distancetotal; int i,j,k,ni1,nj1,nk1,ni4,nj4,nk4,f1; f1=gcfg->patchsize; distancetotal=0.f; for(k=-f1;k<=f1;k++) { nk1=z+k; // local in shared memory nk4=sz+k; // non-local in shared memory for(j=-f1;j<=f1;j++) { nj1=y+j; nj4=sy+j; for(i=-f1;i<=f1;i++) { ni1=x+i; ni4=sx+i; d[0] = ima_space[nk1*gcfg->sharedSlice+nj1*gcfg->sharedwidth_x+ni1]-ima_space[nk4*gcfg->sharedSlice+nj4*gcfg->sharedwidth_x+ni4]; dt = d[0]*d[0]; distancetotal = distancetotal + dt; } } } d[0]=distancetotal*gcfg->rpatchnomalize; return d; } __device__ inline static float * distance2(float *d, int x,int y,int z, int fx, int fy, int fz, int nx,int ny,int nz, int sx, int sy, int sz, float *ima_space) { // local in shared local in full image non-local in full image non-local in shared // d=distance2(ima,means,i,j,k,ni,nj,nk,f,cols,rows,slices); /* ima: the unfiltered image. medias: the image filtered by 3x3 box filter. x, y, z: the location of the "center of the local patch" in shared memory. fx, fy, fz: the location of the "center of the local patch" in full image. nx, ny, nz: the location of the "center of the non-local patch" in the full-image. sx, sy, sz: the location of the "center of the non-local patch" in shared memory f: patch size. gcfg->dimx, gcfg->dimy, gcfg->dimz: the size of the image. */ float dt,distancetotal; int i,j,k,ni1,nj1,nk1,ni2,nj2,nk2,ni3,nj3,nk3,ni4,nj4,nk4,f1; f1=gcfg->patchsize; distancetotal=0; for(k=-f1;k<=f1;k++) // 1D { nk1=z+k; // local in shared memory nk2=nz+k; // non-local in full image nk3=fz+k; // local in full image nk4=sz+k; // non-local in shared memory for(j=-f1;j<=f1;j++) // 2D { nj1=y+j; nj2=ny+j; nj3=fy+j; nj4=sy+j; for(i=-f1;i<=f1;i++) // 3D { ni1=x+i; ni2=nx+i; ni3=fx+i; ni4=sx+i; // Load whole search area into shared memory d[0]=(ima_space[nk1*(gcfg->sharedSlice)+(nj1*gcfg->sharedwidth)+ni1]-tex3D(means_tex,ni3,nj3,nk3))-(ima_space[nk4*(gcfg->sharedSlice)+(nj4*gcfg->sharedwidth)+ni4]-tex3D(means_tex,ni2,nj2,nk2)); dt = d[0]*d[0]; distancetotal = distancetotal + dt; } } } d[0]=distancetotal*gcfg->rpatchnomalize; return d; } __global__ static void ANLMfilter(float *Estimate) { /* ima: the input unfiltered image. means: the mean value of ima by using 3x3 block filter. variance: the variance of ima by using 3x3 block filter. average: save the value of weighted summation for patch i. Estimate: save the sum of all the filtered values for each voxel. Label: save the count of how many filtered values are computed for each voxel. v: the searching area. rows, cols, slices: the size of the image (x, y, z). gcfg->maxval: MAXimum value of the image. */ // declare shared memory extern __shared__ float ima_space[]; // extern indicates the dynamic memory allocation. int i,j,k,rc,ii,jj,kk,ni,nj,nk,is,js,ks,istart,jstart,kstart,icount,jcount,kcount,threadIdx_x,threadIdx_y,threadIdx_z,i_Fl,j_Fl,k_Fl,i_fl,j_fl,k_fl,i_sl,j_sl,k_sl; float totalweight,t1,t1i,t2,w,distanciaminima,estimate,means_t,variances_t,ima_tt,means_tt,variances_tt; float d[2]; /* Parameters setting */ // const float pi = 3.14159265359f; const float mu1 = 0.95f; const float var1 = 0.5f; const float rmu1= 1.f/mu1; const float rvar1= 1.f/var1; rc=gcfg->dimy*gcfg->dimx; estimate = 0.0f; d[0]=0; d[1]=0; threadIdx_x = threadIdx.x; threadIdx_y = threadIdx.y; threadIdx_z = threadIdx.z; totalweight=0.0f; distanciaminima=100000000000000.f; i = blockIdx.x*gcfg->blockwidth+threadIdx_x; // The coordinate of local patch in the original image (image that does NOT includes the apron) j = blockIdx.y*gcfg->blockwidth+threadIdx_y; k = blockIdx.z*gcfg->blockwidth+threadIdx_z; i_Fl = i+gcfg->apronFull; // The coordinate of local patch in the super full-image (image that includes the apron+s) j_Fl = j+gcfg->apronFull; k_Fl = k+gcfg->apronFull; i_fl = i+gcfg->apron; // The coordinate of local patch in the full-image (image that includes the apron) j_fl = j+gcfg->apron; k_fl = k+gcfg->apron; i_sl = threadIdx_x+gcfg->apronShared; // The coordinate of local patch in the shared memory j_sl = threadIdx_y+gcfg->apronShared; k_sl = threadIdx_z+gcfg->apronShared; // return if the thread number exceeds the dimension if(i>=gcfg->dimx || j>=gcfg->dimy || k>=gcfg->dimz) return; if(threadIdx_z==0){ kstart = -gcfg->apronShared; kcount = gcfg->apronShared+1; } else if(threadIdx_z==gcfg->blockwidth-1 || k==gcfg->dimz-1){ kstart = 0; kcount = gcfg->apronShared+1; } else{ kstart = 0; kcount = 1; } if(threadIdx_y==0){ jstart = -gcfg->apronShared; jcount = gcfg->apronShared+1; } else if(threadIdx_y==gcfg->blockwidth-1 || j==gcfg->dimy-1){ jstart = 0; jcount = gcfg->apronShared+1; } else{ jstart = 0; jcount = 1; } if(threadIdx_x==0){ istart = -gcfg->apronShared; icount = gcfg->apronShared+1; } else if(threadIdx_x==gcfg->blockwidth-1 || i==gcfg->dimx-1){ istart = 0; icount = gcfg->apronShared+1; } else{ istart = 0; icount = 1; } /* special case */ if(threadIdx_x==0 && i==gcfg->dimx-1){ istart = -gcfg->apronShared; icount = 2*gcfg->apronShared+1; } if(threadIdx_y==0 && j==gcfg->dimy-1){ jstart = -gcfg->apronShared; jcount = 2*gcfg->apronShared+1; } if(threadIdx_z==0 && k==gcfg->dimz-1){ kstart = -gcfg->apronShared; kcount = 2*gcfg->apronShared+1; } for(ks=0;ks<kcount;ks++){ for(js=0;js<jcount;js++){ for(is=0;is<icount;is++){ // load the image data into shared memory ima_space[(k_sl+kstart+ks)*gcfg->sharedSlice+(j_sl+jstart+js)*gcfg->sharedwidth_x+(i_sl+istart+is)] = tex3D(ima_tex,i_Fl+istart+is,j_Fl+jstart+js,k_Fl+kstart+ks); } } } __syncthreads(); Estimate[k*rc+(j*gcfg->dimx)+i] = 0.0f; means_t = tex3D(means_tex,i_fl,j_fl,k_fl); variances_t = tex3D(variances_tex,i_fl,j_fl,k_fl); /* COMPUTE ADAPTIVE PARAMTER */ for(kk=-gcfg->searchsize; kk<=gcfg->searchsize; kk++) { nk=k_fl+kk; // here nk, ni, nj mean the coordinates of the central voxel of patch j for(jj=-gcfg->searchsize; jj<=gcfg->searchsize; jj++) { nj=j_fl+jj; for(ii=-gcfg->searchsize; ii<=gcfg->searchsize; ii++) { ni=i_fl+ii; if(ii==0 && jj==0 && kk==0) continue; // Skip the patch when i==j if(ni-gcfg->apron>=0 && nj-gcfg->apron>=0 && nk-gcfg->apron>=0 && ni-gcfg->apron<gcfg->dimx && nj-gcfg->apron<gcfg->dimy && nk-gcfg->apron<gcfg->dimz) { ima_tt = ima_space[(k_sl+kk)*(gcfg->sharedSlice)+((j_sl+jj)*gcfg->sharedwidth_x)+(i_sl+ii)]; means_tt = tex3D(means_tex,ni,nj,nk); variances_tt = tex3D(variances_tex,ni,nj,nk); // The purpose is to set the threshold to eliminate the patches (j) that are too far away from the patch i t1 = means_t/means_tt; t1i= (gcfg->maxval-means_t)/(gcfg->maxval-means_tt); t2 = (variances_t)/(variances_tt); if( (t1>mu1 && t1<rmu1) || (t1i>mu1 && t1i<rmu1) && t2>var1 && t2<rvar1) { // d: save Euclidean distance; coordinates in shared memory; coordinates in full image. // distance2(d,i_sl,j_sl,k_sl,ni,nj,nk,i_sl+ii,j_sl+jj,k_sl+kk, ima_space); distance2(d,i_sl,j_sl,k_sl,i_fl,j_fl,k_fl,ni,nj,nk,i_sl+ii,j_sl+jj,k_sl+kk,ima_space); if(d[0]<distanciaminima) distanciaminima=d[0]; // Get the minimum distance in order to calculate the adaptive variance } } } } } if(distanciaminima==0) distanciaminima=1; /* FILTERING PROCESS */ if(gcfg->rician==0) // No rician noise { for(kk=-gcfg->searchsize; kk<=gcfg->searchsize; kk++) { nk=k_fl+kk; // here nk, ni, nj mean the coordinates of the central voxel of patch j for(jj=-gcfg->searchsize; jj<=gcfg->searchsize; jj++) { nj=j_fl+jj; for(ii=-gcfg->searchsize; ii<=gcfg->searchsize; ii++) { ni=i_fl+ii; if(ni-gcfg->apron>=0 && nj-gcfg->apron>=0 && nk-gcfg->apron>=0 && ni-gcfg->apron<gcfg->dimx && nj-gcfg->apron<gcfg->dimy && nk-gcfg->apron<gcfg->dimz) { ima_tt = ima_space[(k_sl+kk)*(gcfg->sharedSlice)+((j_sl+jj)*gcfg->sharedwidth_x)+(i_sl+ii)]; means_tt = tex3D(means_tex,ni,nj,nk); variances_tt = tex3D(variances_tex,ni,nj,nk); t1 = (means_t)/(means_tt); t1i= (gcfg->maxval-means_t)/(gcfg->maxval-means_tt); t2 = (variances_t)/(variances_tt); if( (t1>mu1 && t1<rmu1) || (t1i>mu1 && t1i<rmu1) && t2>var1 && t2<rvar1) { distance(d,i_sl,j_sl,k_sl,i_sl+ii,j_sl+jj,k_sl+kk, ima_space); w = expf(-d[0]/distanciaminima); estimate = estimate + w*ima_tt; totalweight = totalweight + w; } } } } } estimate = estimate/totalweight; } else // Consider rician noise { for(kk=-gcfg->searchsize; kk<=gcfg->searchsize; kk++) { nk=k_fl+kk; // here nk, ni, nj mean the coordinates of the central voxel of patch j for(jj=-gcfg->searchsize; jj<=gcfg->searchsize; jj++) { nj=j_fl+jj; for(ii=-gcfg->searchsize; ii<=gcfg->searchsize; ii++) { ni=i_fl+ii; if(ni-gcfg->apron>=0 && nj-gcfg->apron>=0 && nk-gcfg->apron>=0 && ni-gcfg->apron<gcfg->dimx && nj-gcfg->apron<gcfg->dimy && nk-gcfg->apron<gcfg->dimz) { ima_tt = ima_space[(k_sl+kk)*(gcfg->sharedSlice)+((j_sl+jj)*gcfg->sharedwidth_x)+(i_sl+ii)]; means_tt = tex3D(means_tex,ni,nj,nk); variances_tt = tex3D(variances_tex,ni,nj,nk); t1 = (means_t)/(means_tt); t1i= (gcfg->maxval-means_t)/(gcfg->maxval-means_tt); t2 = (variances_t)/(variances_tt); if( (t1>mu1 && t1<rmu1) || (t1i>mu1 && t1i<rmu1) && t2>var1 && t2<rvar1) { distance(d,i_sl,j_sl,k_sl,i_sl+ii,j_sl+jj,k_sl+kk,ima_space); w = expf(-d[0]/distanciaminima); estimate = estimate + w*ima_tt*ima_tt; totalweight = totalweight + w; } } } } } estimate = estimate/totalweight; estimate = estimate-2.0f*distanciaminima; estimate = (estimate>0.0f)?estimate:0.0f; estimate = sqrtf(estimate); } Estimate[k*rc+(j*gcfg->dimx)+i] = estimate; __syncthreads(); } __global__ static void preProcess(hipPitchedPtr mean, hipPitchedPtr R, hipPitchedPtr var, int dimfull_x, int dimfull_y, int dimfull_z, int s, int blockwidth) { extern __shared__ float ima_shared[]; int sharedwidthSlice, sharedwidth, threadIdx_x, threadIdx_y, threadIdx_z, istart, jstart, kstart, icount, jcount, kcount, i, j, k, ii, jj, kk, i_fl, j_fl, k_fl, i_sl, j_sl, k_sl, is, js, ks; int N = (2*s+1)*(2*s+1)*(2*s+1); // size of the filter box sharedwidth = blockwidth+2*s; sharedwidthSlice = sharedwidth*sharedwidth; threadIdx_x = threadIdx.x; threadIdx_y = threadIdx.y; threadIdx_z = threadIdx.z; i = blockIdx.x*blockwidth+threadIdx_x; // The coordinate of local patch in the original image (image that does NOT includes the apron) j = blockIdx.y*blockwidth+threadIdx_y; k = blockIdx.z*blockwidth+threadIdx_z; i_fl = i+s; // The coordinate of local patch in the full-image (image that includes the apron) j_fl = j+s; k_fl = k+s; i_sl = threadIdx_x+s; // The coordinate of local patch in the shared memory j_sl = threadIdx_y+s; k_sl = threadIdx_z+s; // return if the thread number exceeds the dimension if(i>=dimfull_x || j>=dimfull_y || k>=dimfull_z) return; /* general case */ if(threadIdx_z==blockwidth-1 || k==dimfull_z-1){ kstart = 0; kcount = s+1; } else if(threadIdx_z==0){ kstart = -s; kcount = s+1; } else{ kstart = 0; kcount = 1; } if(threadIdx_y==blockwidth-1 || j==dimfull_y-1){ jstart = 0; jcount = s+1; } else if(threadIdx_y==0){ jstart = -s; jcount = s+1; } else{ jstart = 0; jcount = 1; } if(threadIdx_x==blockwidth-1 || i==dimfull_x-1){ istart = 0; icount = s+1; } else if(threadIdx_x==0){ istart = -s; icount = s+1; } else{ istart = 0; icount = 1; } /* special case */ if(threadIdx_x==0 && i==dimfull_x-1){ istart = -s; icount = 2*s+1; } if(threadIdx_y==0 && j==dimfull_y-1){ jstart = -s; jcount = 2*s+1; } if(threadIdx_z==0 && k==dimfull_z-1){ kstart = -s; kcount = 2*s+1; } for(ks=0;ks<kcount;ks++){ for(js=0;js<jcount;js++){ for(is=0;is<icount;is++){ // load the image data into shared memory ima_shared[(k_sl+kstart+ks)*sharedwidthSlice+(j_sl+jstart+js)*sharedwidth+(i_sl+istart+is)] = tex3D(ima_tex,i_fl+istart+is,j_fl+jstart+js,k_fl+kstart+ks); } } } __syncthreads(); float Mt, Vt=0; // mean matrix char* Ptr = (char *) mean.ptr; // location in mean size_t pitch = mean.pitch; // dimx size_t slicePitch = pitch*dimfull_y; // dimx*dimy char* slice = Ptr+k*slicePitch; float* row = (float *) (slice+j*pitch); char* Ptr2 = (char *) R.ptr; // location in R char* slice2 = Ptr2+k*slicePitch; float* row2 = (float *) (slice2+j*pitch); Mt = 0; for(kk=-s; kk<=s; kk++) for(jj=-s;jj<=s;jj++) for(ii=-s;ii<=s;ii++) { Mt = Mt+ima_shared[(k_sl+kk)*sharedwidthSlice+(j_sl+jj)*sharedwidth+(i_sl+ii)]; } Mt = Mt/N; // mean value for voxel at (i,j,k) row[i] = Mt; // Save mean value into global mean matrix row2[i] = ima_shared[k_sl*sharedwidthSlice+j_sl*sharedwidth+i_sl]-Mt; // Save R (image-mean) value into global R memory // __syncthreads(); char* Ptr3 = (char *) var.ptr; // location in var char* slice3 = Ptr3+k*slicePitch; float* row3 = (float *) (slice3+j*pitch); float t = 0; for(kk=-s; kk<=s; kk++) for(jj=-s;jj<=s;jj++) for(ii=-s;ii<=s;ii++) { t = ima_shared[(k_sl+kk)*sharedwidthSlice+(j_sl+jj)*sharedwidth+(i_sl+ii)]-Mt; Vt = Vt+t*t; } Vt = Vt/(N-1); // variance value for voxel at (i,j,k) row3[i] = Vt; // Save variance value into global var matrix __syncthreads(); } void runFilter_s(float * ima_input, float * Estimate1, int f1, int v, int dimx, int dimy, int dimz, float MAX, int width2, int width, int s, int gpuid, bool rician) { /* ima_input: input image Estimate1: output image f1: patch size for the first filtering v: searching area dimx, dimy, dimz: the size of each dimension of the image MAX: max value of the input image width2: block width width: pre-process blockwidth s: patch radius for computing mean, R, variance matrix gpuid: GPU id */ int nDevices; hipGetDeviceCount(&nDevices); printf("====== Designed for single filtering ======\n"); printf("Baseline+Opt2+Opt3\n"); hipDeviceProp_t prop; hipGetDeviceProperties(&prop,gpuid); printf("Device name: %s\n",prop.name); //@@@@@@@@@@@@ Pre-processing kernel starts @@@@@@@@@@@@/ int dimPrecom_x, dimPrecom_y, dimPrecom_z, dimfull_x, dimfull_y, dimfull_z, widthPrecom, blockPrecom_x, blockPrecom_y, blockPrecom_z, sharedsizePre; hipArray *ima=0, *meansArray=0, *variancesArray=0, *RArray=0; hipEvent_t start, stop; float elapsedTime; widthPrecom = width; dimPrecom_x = dimx+2*(f1+v+s); // Size of the input image dimPrecom_y = dimy+2*(f1+v+s); dimPrecom_z = dimz+2*(f1+v+s); dimfull_x = dimx+2*(f1+v); // Size of the mean, R and variance matrix dimfull_y = dimy+2*(f1+v); dimfull_z = dimz+2*(f1+v); printf("dim_x=%d\tdim_y=%d\tdim_z=%d\n",dimx,dimy,dimz); // const int sizePre = (dimfull_x*dimfull_y*dimfull_z)*sizeof(float); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipExtent imaSize = make_hipExtent(dimPrecom_x,dimPrecom_y,dimPrecom_z); // Load ima_input into texture memory // hipMalloc3DArray allocate hipArray which is only for texture memory hipMalloc3DArray(&ima, &channelDesc, imaSize); hipMemcpy3DParms copyParams1 = {0}; // make_hipPitchedPtr returns hipPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParams1.srcPtr = make_hipPitchedPtr((void*)ima_input, imaSize.width*sizeof(float), imaSize.width, imaSize.height); copyParams1.dstArray = ima; // destination array copyParams1.extent = imaSize; // dimensions of the transferred area in elements copyParams1.kind = hipMemcpyHostToDevice; // copy from host to device hipMemcpy3D(&copyParams1); ima_tex.normalized = false; ima_tex.filterMode = hipFilterModePoint; //hipFilterModePoint; hipFilterModeLinear; ima_tex.addressMode[0] = hipAddressModeClamp; ima_tex.addressMode[1] = hipAddressModeClamp; ima_tex.addressMode[2] = hipAddressModeClamp; hipBindTextureToArray(ima_tex, ima, channelDesc); // Allocate global memory for mean, R and variance hipExtent imaApron = make_hipExtent(dimfull_x*sizeof(float),dimfull_y,dimfull_z); hipPitchedPtr mean, R, var; // hipMalloc3D allocate global memory hipMalloc3D(&mean, imaApron); hipMalloc3D(&R, imaApron); hipMalloc3D(&var, imaApron); sharedsizePre = (widthPrecom+2*s)*(widthPrecom+2*s)*(widthPrecom+2*s); blockPrecom_x = (dimfull_x+widthPrecom-1)/widthPrecom; blockPrecom_y = (dimfull_y+widthPrecom-1)/widthPrecom; blockPrecom_z = (dimfull_z+widthPrecom-1)/widthPrecom; printf("shared size for pre-computation=%fkB\n",float(sharedsizePre*sizeof(float))/1024); if(sharedsizePre*sizeof(float)>48*1024){ printf("The memory requirement for pre-computation is larger than the size of shared memory!"); exit(1); } //@@@@@@@@@@ Pre-processing kernel time start @@@@@@@@@@/ CUDA_ASSERT(hipEventCreate(&start)); CUDA_ASSERT(hipEventRecord(start,0)); dim3 dimBlockPre(widthPrecom,widthPrecom,widthPrecom); dim3 dimGridPre(blockPrecom_x,blockPrecom_y,blockPrecom_z); // preProcess(hipPitchedPtr mean, hipPitchedPtr R, hipPitchedPtr var, int dimfull_x, int dimfull_y, int dimfull_z, int s, int blockwidth) hipLaunchKernelGGL(( preProcess), dim3(dimGridPre),dim3(dimBlockPre),sharedsizePre*sizeof(float), 0, mean, R, var, dimfull_x, dimfull_y, dimfull_z, s, widthPrecom); // preProcess<<<dimGridPre,dimBlockPre>>>(f1); CUDA_ASSERT(hipDeviceSynchronize()); // Synchronize until all mean, R and variance are finished CUDA_ASSERT(hipEventCreate(&stop)); CUDA_ASSERT(hipEventRecord(stop,0)); CUDA_ASSERT(hipEventSynchronize(stop)); CUDA_ASSERT(hipEventElapsedTime(&elapsedTime,start,stop)); printf("Pre-computation kernel time: %f ms\n" ,elapsedTime); //@@@@@@@@@@ Pre-processing kernel time end @@@@@@@@@@/ /* //@@@@@@@@@@ Copy from device to host start @@@@@@@@@@/ hipExtent imaOut = make_hipExtent(dimfull_x,dimfull_y,dimfull_z); hipMemcpy3DParms copyParamsOutput1 = {0}; // make_hipPitchedPtr returns hipPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParamsOutput1.srcPtr = mean; copyParamsOutput1.dstPtr = make_hipPitchedPtr((void*)Estimate1, imaOut.width*sizeof(float), imaOut.width, imaOut.height); copyParamsOutput1.extent = imaApron; // dimensions of the transferred area in elements copyParamsOutput1.kind = hipMemcpyDeviceToHost; // copy from host to device CUDA_ASSERT(hipMemcpy3D(&copyParamsOutput1)); hipMemcpy3DParms copyParamsOutput2 = {0}; // make_hipPitchedPtr returns hipPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParamsOutput2.srcPtr = var; copyParamsOutput2.dstPtr = make_hipPitchedPtr((void*)Estimate2, imaOut.width*sizeof(float), imaOut.width, imaOut.height); copyParamsOutput2.extent = imaApron; // dimensions of the transferred area in elements copyParamsOutput2.kind = hipMemcpyDeviceToHost; // copy from host to device CUDA_ASSERT(hipMemcpy3D(&copyParamsOutput2)); //@@@@@@@@@@ Copy from device to host end @@@@@@@@@@/ */ //@@@@@@@@@@@@ 1st kernel finished. Binding to texture memory starts @@@@@@@@@@@@/ CUDA_ASSERT(hipEventCreate(&start)); CUDA_ASSERT(hipEventRecord(start,0)); //@@@@@@@@@@@@ Binding pre-computed mean, R and variance to texture memory start @@@@@@@@@@@@/ hipExtent Size = make_hipExtent(dimfull_x,dimfull_y,dimfull_z); // mean hipMalloc3DArray(&meansArray, &channelDesc, Size); hipMemcpy3DParms copyParams2 = {0}; // make_hipPitchedPtr returns hipPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParams2.srcPtr = mean; copyParams2.dstArray = meansArray; // destination array copyParams2.extent = Size; // dimensions of the transferred area in elements copyParams2.kind = hipMemcpyDeviceToDevice; hipMemcpy3D(&copyParams2); means_tex.normalized = false; means_tex.filterMode = hipFilterModePoint; //hipFilterModePoint; hipFilterModeLinear; means_tex.addressMode[0] = hipAddressModeClamp; means_tex.addressMode[1] = hipAddressModeClamp; means_tex.addressMode[2] = hipAddressModeClamp; hipBindTextureToArray(means_tex, meansArray, channelDesc); // R hipMalloc3DArray(&RArray, &channelDesc, Size); hipMemcpy3DParms copyParams3 = {0}; // make_hipPitchedPtr returns hipPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParams3.srcPtr = R; copyParams3.dstArray = RArray; // destination array copyParams3.extent = Size; // dimensions of the transferred area in elements copyParams3.kind = hipMemcpyDeviceToDevice; hipMemcpy3D(&copyParams3); R_tex.normalized = false; R_tex.filterMode = hipFilterModePoint; //hipFilterModePoint; hipFilterModeLinear; R_tex.addressMode[0] = hipAddressModeClamp; R_tex.addressMode[1] = hipAddressModeClamp; R_tex.addressMode[2] = hipAddressModeClamp; hipBindTextureToArray(R_tex, RArray, channelDesc); // variance hipMalloc3DArray(&variancesArray, &channelDesc, Size); hipMemcpy3DParms copyParams4 = {0}; // make_hipPitchedPtr returns hipPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParams4.srcPtr = var; copyParams4.dstArray = variancesArray; // destination array copyParams4.extent = Size; // dimensions of the transferred area in elements copyParams4.kind = hipMemcpyDeviceToDevice; hipMemcpy3D(&copyParams4); variances_tex.normalized = false; variances_tex.filterMode = hipFilterModePoint; //hipFilterModePoint; hipFilterModeLinear; variances_tex.addressMode[0] = hipAddressModeClamp; variances_tex.addressMode[1] = hipAddressModeClamp; variances_tex.addressMode[2] = hipAddressModeClamp; hipBindTextureToArray(variances_tex, variancesArray, channelDesc); CUDA_ASSERT(hipEventCreate(&stop)); CUDA_ASSERT(hipEventRecord(stop,0)); CUDA_ASSERT(hipEventSynchronize(stop)); CUDA_ASSERT(hipEventElapsedTime(&elapsedTime, start,stop)); printf("Elapsed time of texture binding: %f ms\n" ,elapsedTime); //@@@@@@@@@@@@ Binding pre-computed mean, R and variance to texture memory end @@@@@@@@@@@@/ //@@@@@@@@@@@@ Filtering process starts @@@@@@@@@@@@/ FilterParam param={0,v,0.f,dimx,dimy,dimz,0,0,0,MAX,width,0,0,0,0,0,0,rician}; float *EstimateKernel; int Ndim,sharedsize; // The total size of the input image Ndim = dimx*dimy*dimz; const int size = Ndim*sizeof(float); CUDA_ASSERT(hipSetDevice(gpuid)); CUDA_ASSERT(hipMalloc((void**)&EstimateKernel, size)); // copy from EstimateKernel (device) to Estimate (host) //&&&&&&&&&& Parameter setup for the 1st filtering (large) &&&&&&&&&&/ width = width2; sharedsize = (width+2*(f1+v))*(width+2*(f1+v))*(width+2*(f1+v)); // The shared memory size needed for each block printf("Shared size for filtering=%fkB\n",float(sharedsize*sizeof(float))/1024); printf("width=%d\n",width); if(width*width*width>1024){ printf("Error: The number of threads in a block is larger than 1024!"); exit(1); } param.patchsize = f1; param.rpatchnomalize = 1.f/((f1<<1)+1); param.rpatchnomalize = param.rpatchnomalize*param.rpatchnomalize*param.rpatchnomalize; param.blockdimx = (dimx+width-1)/width; param.blockdimy = (dimy+width-1)/width; param.blockdimz = (dimy+width-1)/width; param.blockwidth = width; param.sharedwidth_x = width+2*(f1+v); param.sharedwidth = width+2*(f1+v); // The shared width at other dimension is still not changed. param.sharedSlice = param.sharedwidth_x*param.sharedwidth; param.apron = f1+v; param.apronFull = f1+v+s; param.apronShared = f1+v; CUDA_ASSERT(hipMemcpyToSymbol(gcfg, &param, sizeof(FilterParam), 0, hipMemcpyHostToDevice)); printf("1st: searchsize=%d\n", param.searchsize); printf("1st: patchsize=%d\n", param.patchsize); //@@@@@@@@@@ 1st filtering (large): Time for filtering kernel start @@@@@@@@@@/ CUDA_ASSERT(hipEventCreate(&start)); CUDA_ASSERT(hipEventRecord(start,0)); dim3 dimBlock(width,width,width); dim3 dimGrid(param.blockdimx,param.blockdimy,param.blockdimz); // First filtering hipLaunchKernelGGL(( ANLMfilter), dim3(dimGrid), dim3(dimBlock),sharedsize*sizeof(float), 0, EstimateKernel); CUDA_ASSERT(hipDeviceSynchronize()); CUDA_ASSERT(hipEventCreate(&stop)); CUDA_ASSERT(hipEventRecord(stop,0)); CUDA_ASSERT(hipEventSynchronize(stop)); CUDA_ASSERT(hipEventElapsedTime(&elapsedTime, start,stop)); printf("1st filtering (large) kernel time: %f ms\n\n\n" ,elapsedTime); //@@@@@@@@@@ 1st filtering (large): Time for filtering kernel end @@@@@@@@@@/ //@@@@@@@@@@ Free memory @@@@@@@@@@/ CUDA_ASSERT(hipMemcpy(Estimate1, EstimateKernel, size, hipMemcpyDeviceToHost)); CUDA_ASSERT(hipFree(EstimateKernel)); // *ima=0, *means_Tex=0, *variances_Tex=0, *R_Tex=0; CUDA_ASSERT(hipFreeArray(meansArray)); CUDA_ASSERT(hipFreeArray(variancesArray)); CUDA_ASSERT(hipFreeArray(RArray)); CUDA_ASSERT(hipFreeArray(ima)); CUDA_ASSERT(hipFree(mean.ptr)); CUDA_ASSERT(hipFree(R.ptr)); CUDA_ASSERT(hipFree(var.ptr)); CUDA_ASSERT(hipDeviceReset()); }
3c7b1dfa14c88185da45bdf706005e542ea3e575.cu
/*--------------------------------------------------------------------------*/ // A GPU-accelerated Adaptive Non-Local Means Filter for Denoising 3D Monte // Carlo Photon Transport Simulations // filterGPU_s.cu is the version of the single ANLM filter (B+Opt2+Opt3) /*--------------------------------------------------------------------------*/ // Yaoshen Yuan - yuan.yaos at husky.neu.edu // Qianqian Fang - q.fang at neu.edu // Computational Optics & Translational Imaging Lab // Northeastern University // Publication: // Yaoshen Yuan, Leiming Yu, Zafer Dogan, and Qianqian Fang, "Graphics processing // units-accelerated adaptive nonlocal means filter for denoising three-dimensional // Monte Carlo photon transport simulations," J. of Biomedical Optics, 23(12), 121618 (2018). // https://doi.org/10.1117/1.JBO.23.12.121618 // Copyright (C) 2018 Yaoshen Yuan, Qianqian Fang #include <stdio.h> #include <math.h> /* floor */ #include "filterGPU.h" #include <time.h> #include <cuda.h> #define CUDA_ASSERT(a) cuda_assess((a),__FILE__,__LINE__) void cuda_assess(cudaError_t cuerr,const char *file, const int linenum); __constant__ FilterParam gcfg[1]; texture<float,cudaTextureType3D,cudaReadModeElementType> ima_tex; texture<float,cudaTextureType3D,cudaReadModeElementType> means_tex; texture<float,cudaTextureType3D,cudaReadModeElementType> variances_tex; texture<float,cudaTextureType3D,cudaReadModeElementType> R_tex; __device__ inline static float * distance(float *d, int x,int y,int z,int sx,int sy,int sz, float *ima_space) { // d=distance(ima,i,j,k,ni,nj,nk,f,cols,rows,slices); /* ima: the unfiltered image. medias: the image filtered by 3x3 box filter. x, y, z: the location of the "center of the local patch" in shared memory. nx, ny, nz: the location of the "center of the non-local patch" in the full-image. sx, sy, sz: the location of the "center of the non-local patch" in shared memory f: patch size. gcfg->dimx, gcfg->dimy, gcfg->dimz: the size of the image. */ float dt,distancetotal; int i,j,k,ni1,nj1,nk1,ni4,nj4,nk4,f1; f1=gcfg->patchsize; distancetotal=0.f; for(k=-f1;k<=f1;k++) { nk1=z+k; // local in shared memory nk4=sz+k; // non-local in shared memory for(j=-f1;j<=f1;j++) { nj1=y+j; nj4=sy+j; for(i=-f1;i<=f1;i++) { ni1=x+i; ni4=sx+i; d[0] = ima_space[nk1*gcfg->sharedSlice+nj1*gcfg->sharedwidth_x+ni1]-ima_space[nk4*gcfg->sharedSlice+nj4*gcfg->sharedwidth_x+ni4]; dt = d[0]*d[0]; distancetotal = distancetotal + dt; } } } d[0]=distancetotal*gcfg->rpatchnomalize; return d; } __device__ inline static float * distance2(float *d, int x,int y,int z, int fx, int fy, int fz, int nx,int ny,int nz, int sx, int sy, int sz, float *ima_space) { // local in shared local in full image non-local in full image non-local in shared // d=distance2(ima,means,i,j,k,ni,nj,nk,f,cols,rows,slices); /* ima: the unfiltered image. medias: the image filtered by 3x3 box filter. x, y, z: the location of the "center of the local patch" in shared memory. fx, fy, fz: the location of the "center of the local patch" in full image. nx, ny, nz: the location of the "center of the non-local patch" in the full-image. sx, sy, sz: the location of the "center of the non-local patch" in shared memory f: patch size. gcfg->dimx, gcfg->dimy, gcfg->dimz: the size of the image. */ float dt,distancetotal; int i,j,k,ni1,nj1,nk1,ni2,nj2,nk2,ni3,nj3,nk3,ni4,nj4,nk4,f1; f1=gcfg->patchsize; distancetotal=0; for(k=-f1;k<=f1;k++) // 1D { nk1=z+k; // local in shared memory nk2=nz+k; // non-local in full image nk3=fz+k; // local in full image nk4=sz+k; // non-local in shared memory for(j=-f1;j<=f1;j++) // 2D { nj1=y+j; nj2=ny+j; nj3=fy+j; nj4=sy+j; for(i=-f1;i<=f1;i++) // 3D { ni1=x+i; ni2=nx+i; ni3=fx+i; ni4=sx+i; // Load whole search area into shared memory d[0]=(ima_space[nk1*(gcfg->sharedSlice)+(nj1*gcfg->sharedwidth)+ni1]-tex3D(means_tex,ni3,nj3,nk3))-(ima_space[nk4*(gcfg->sharedSlice)+(nj4*gcfg->sharedwidth)+ni4]-tex3D(means_tex,ni2,nj2,nk2)); dt = d[0]*d[0]; distancetotal = distancetotal + dt; } } } d[0]=distancetotal*gcfg->rpatchnomalize; return d; } __global__ static void ANLMfilter(float *Estimate) { /* ima: the input unfiltered image. means: the mean value of ima by using 3x3 block filter. variance: the variance of ima by using 3x3 block filter. average: save the value of weighted summation for patch i. Estimate: save the sum of all the filtered values for each voxel. Label: save the count of how many filtered values are computed for each voxel. v: the searching area. rows, cols, slices: the size of the image (x, y, z). gcfg->maxval: MAXimum value of the image. */ // declare shared memory extern __shared__ float ima_space[]; // extern indicates the dynamic memory allocation. int i,j,k,rc,ii,jj,kk,ni,nj,nk,is,js,ks,istart,jstart,kstart,icount,jcount,kcount,threadIdx_x,threadIdx_y,threadIdx_z,i_Fl,j_Fl,k_Fl,i_fl,j_fl,k_fl,i_sl,j_sl,k_sl; float totalweight,t1,t1i,t2,w,distanciaminima,estimate,means_t,variances_t,ima_tt,means_tt,variances_tt; float d[2]; /* Parameters setting */ // const float pi = 3.14159265359f; const float mu1 = 0.95f; const float var1 = 0.5f; const float rmu1= 1.f/mu1; const float rvar1= 1.f/var1; rc=gcfg->dimy*gcfg->dimx; estimate = 0.0f; d[0]=0; d[1]=0; threadIdx_x = threadIdx.x; threadIdx_y = threadIdx.y; threadIdx_z = threadIdx.z; totalweight=0.0f; distanciaminima=100000000000000.f; i = blockIdx.x*gcfg->blockwidth+threadIdx_x; // The coordinate of local patch in the original image (image that does NOT includes the apron) j = blockIdx.y*gcfg->blockwidth+threadIdx_y; k = blockIdx.z*gcfg->blockwidth+threadIdx_z; i_Fl = i+gcfg->apronFull; // The coordinate of local patch in the super full-image (image that includes the apron+s) j_Fl = j+gcfg->apronFull; k_Fl = k+gcfg->apronFull; i_fl = i+gcfg->apron; // The coordinate of local patch in the full-image (image that includes the apron) j_fl = j+gcfg->apron; k_fl = k+gcfg->apron; i_sl = threadIdx_x+gcfg->apronShared; // The coordinate of local patch in the shared memory j_sl = threadIdx_y+gcfg->apronShared; k_sl = threadIdx_z+gcfg->apronShared; // return if the thread number exceeds the dimension if(i>=gcfg->dimx || j>=gcfg->dimy || k>=gcfg->dimz) return; if(threadIdx_z==0){ kstart = -gcfg->apronShared; kcount = gcfg->apronShared+1; } else if(threadIdx_z==gcfg->blockwidth-1 || k==gcfg->dimz-1){ kstart = 0; kcount = gcfg->apronShared+1; } else{ kstart = 0; kcount = 1; } if(threadIdx_y==0){ jstart = -gcfg->apronShared; jcount = gcfg->apronShared+1; } else if(threadIdx_y==gcfg->blockwidth-1 || j==gcfg->dimy-1){ jstart = 0; jcount = gcfg->apronShared+1; } else{ jstart = 0; jcount = 1; } if(threadIdx_x==0){ istart = -gcfg->apronShared; icount = gcfg->apronShared+1; } else if(threadIdx_x==gcfg->blockwidth-1 || i==gcfg->dimx-1){ istart = 0; icount = gcfg->apronShared+1; } else{ istart = 0; icount = 1; } /* special case */ if(threadIdx_x==0 && i==gcfg->dimx-1){ istart = -gcfg->apronShared; icount = 2*gcfg->apronShared+1; } if(threadIdx_y==0 && j==gcfg->dimy-1){ jstart = -gcfg->apronShared; jcount = 2*gcfg->apronShared+1; } if(threadIdx_z==0 && k==gcfg->dimz-1){ kstart = -gcfg->apronShared; kcount = 2*gcfg->apronShared+1; } for(ks=0;ks<kcount;ks++){ for(js=0;js<jcount;js++){ for(is=0;is<icount;is++){ // load the image data into shared memory ima_space[(k_sl+kstart+ks)*gcfg->sharedSlice+(j_sl+jstart+js)*gcfg->sharedwidth_x+(i_sl+istart+is)] = tex3D(ima_tex,i_Fl+istart+is,j_Fl+jstart+js,k_Fl+kstart+ks); } } } __syncthreads(); Estimate[k*rc+(j*gcfg->dimx)+i] = 0.0f; means_t = tex3D(means_tex,i_fl,j_fl,k_fl); variances_t = tex3D(variances_tex,i_fl,j_fl,k_fl); /* COMPUTE ADAPTIVE PARAMTER */ for(kk=-gcfg->searchsize; kk<=gcfg->searchsize; kk++) { nk=k_fl+kk; // here nk, ni, nj mean the coordinates of the central voxel of patch j for(jj=-gcfg->searchsize; jj<=gcfg->searchsize; jj++) { nj=j_fl+jj; for(ii=-gcfg->searchsize; ii<=gcfg->searchsize; ii++) { ni=i_fl+ii; if(ii==0 && jj==0 && kk==0) continue; // Skip the patch when i==j if(ni-gcfg->apron>=0 && nj-gcfg->apron>=0 && nk-gcfg->apron>=0 && ni-gcfg->apron<gcfg->dimx && nj-gcfg->apron<gcfg->dimy && nk-gcfg->apron<gcfg->dimz) { ima_tt = ima_space[(k_sl+kk)*(gcfg->sharedSlice)+((j_sl+jj)*gcfg->sharedwidth_x)+(i_sl+ii)]; means_tt = tex3D(means_tex,ni,nj,nk); variances_tt = tex3D(variances_tex,ni,nj,nk); // The purpose is to set the threshold to eliminate the patches (j) that are too far away from the patch i t1 = means_t/means_tt; t1i= (gcfg->maxval-means_t)/(gcfg->maxval-means_tt); t2 = (variances_t)/(variances_tt); if( (t1>mu1 && t1<rmu1) || (t1i>mu1 && t1i<rmu1) && t2>var1 && t2<rvar1) { // d: save Euclidean distance; coordinates in shared memory; coordinates in full image. // distance2(d,i_sl,j_sl,k_sl,ni,nj,nk,i_sl+ii,j_sl+jj,k_sl+kk, ima_space); distance2(d,i_sl,j_sl,k_sl,i_fl,j_fl,k_fl,ni,nj,nk,i_sl+ii,j_sl+jj,k_sl+kk,ima_space); if(d[0]<distanciaminima) distanciaminima=d[0]; // Get the minimum distance in order to calculate the adaptive variance } } } } } if(distanciaminima==0) distanciaminima=1; /* FILTERING PROCESS */ if(gcfg->rician==0) // No rician noise { for(kk=-gcfg->searchsize; kk<=gcfg->searchsize; kk++) { nk=k_fl+kk; // here nk, ni, nj mean the coordinates of the central voxel of patch j for(jj=-gcfg->searchsize; jj<=gcfg->searchsize; jj++) { nj=j_fl+jj; for(ii=-gcfg->searchsize; ii<=gcfg->searchsize; ii++) { ni=i_fl+ii; if(ni-gcfg->apron>=0 && nj-gcfg->apron>=0 && nk-gcfg->apron>=0 && ni-gcfg->apron<gcfg->dimx && nj-gcfg->apron<gcfg->dimy && nk-gcfg->apron<gcfg->dimz) { ima_tt = ima_space[(k_sl+kk)*(gcfg->sharedSlice)+((j_sl+jj)*gcfg->sharedwidth_x)+(i_sl+ii)]; means_tt = tex3D(means_tex,ni,nj,nk); variances_tt = tex3D(variances_tex,ni,nj,nk); t1 = (means_t)/(means_tt); t1i= (gcfg->maxval-means_t)/(gcfg->maxval-means_tt); t2 = (variances_t)/(variances_tt); if( (t1>mu1 && t1<rmu1) || (t1i>mu1 && t1i<rmu1) && t2>var1 && t2<rvar1) { distance(d,i_sl,j_sl,k_sl,i_sl+ii,j_sl+jj,k_sl+kk, ima_space); w = expf(-d[0]/distanciaminima); estimate = estimate + w*ima_tt; totalweight = totalweight + w; } } } } } estimate = estimate/totalweight; } else // Consider rician noise { for(kk=-gcfg->searchsize; kk<=gcfg->searchsize; kk++) { nk=k_fl+kk; // here nk, ni, nj mean the coordinates of the central voxel of patch j for(jj=-gcfg->searchsize; jj<=gcfg->searchsize; jj++) { nj=j_fl+jj; for(ii=-gcfg->searchsize; ii<=gcfg->searchsize; ii++) { ni=i_fl+ii; if(ni-gcfg->apron>=0 && nj-gcfg->apron>=0 && nk-gcfg->apron>=0 && ni-gcfg->apron<gcfg->dimx && nj-gcfg->apron<gcfg->dimy && nk-gcfg->apron<gcfg->dimz) { ima_tt = ima_space[(k_sl+kk)*(gcfg->sharedSlice)+((j_sl+jj)*gcfg->sharedwidth_x)+(i_sl+ii)]; means_tt = tex3D(means_tex,ni,nj,nk); variances_tt = tex3D(variances_tex,ni,nj,nk); t1 = (means_t)/(means_tt); t1i= (gcfg->maxval-means_t)/(gcfg->maxval-means_tt); t2 = (variances_t)/(variances_tt); if( (t1>mu1 && t1<rmu1) || (t1i>mu1 && t1i<rmu1) && t2>var1 && t2<rvar1) { distance(d,i_sl,j_sl,k_sl,i_sl+ii,j_sl+jj,k_sl+kk,ima_space); w = expf(-d[0]/distanciaminima); estimate = estimate + w*ima_tt*ima_tt; totalweight = totalweight + w; } } } } } estimate = estimate/totalweight; estimate = estimate-2.0f*distanciaminima; estimate = (estimate>0.0f)?estimate:0.0f; estimate = sqrtf(estimate); } Estimate[k*rc+(j*gcfg->dimx)+i] = estimate; __syncthreads(); } __global__ static void preProcess(cudaPitchedPtr mean, cudaPitchedPtr R, cudaPitchedPtr var, int dimfull_x, int dimfull_y, int dimfull_z, int s, int blockwidth) { extern __shared__ float ima_shared[]; int sharedwidthSlice, sharedwidth, threadIdx_x, threadIdx_y, threadIdx_z, istart, jstart, kstart, icount, jcount, kcount, i, j, k, ii, jj, kk, i_fl, j_fl, k_fl, i_sl, j_sl, k_sl, is, js, ks; int N = (2*s+1)*(2*s+1)*(2*s+1); // size of the filter box sharedwidth = blockwidth+2*s; sharedwidthSlice = sharedwidth*sharedwidth; threadIdx_x = threadIdx.x; threadIdx_y = threadIdx.y; threadIdx_z = threadIdx.z; i = blockIdx.x*blockwidth+threadIdx_x; // The coordinate of local patch in the original image (image that does NOT includes the apron) j = blockIdx.y*blockwidth+threadIdx_y; k = blockIdx.z*blockwidth+threadIdx_z; i_fl = i+s; // The coordinate of local patch in the full-image (image that includes the apron) j_fl = j+s; k_fl = k+s; i_sl = threadIdx_x+s; // The coordinate of local patch in the shared memory j_sl = threadIdx_y+s; k_sl = threadIdx_z+s; // return if the thread number exceeds the dimension if(i>=dimfull_x || j>=dimfull_y || k>=dimfull_z) return; /* general case */ if(threadIdx_z==blockwidth-1 || k==dimfull_z-1){ kstart = 0; kcount = s+1; } else if(threadIdx_z==0){ kstart = -s; kcount = s+1; } else{ kstart = 0; kcount = 1; } if(threadIdx_y==blockwidth-1 || j==dimfull_y-1){ jstart = 0; jcount = s+1; } else if(threadIdx_y==0){ jstart = -s; jcount = s+1; } else{ jstart = 0; jcount = 1; } if(threadIdx_x==blockwidth-1 || i==dimfull_x-1){ istart = 0; icount = s+1; } else if(threadIdx_x==0){ istart = -s; icount = s+1; } else{ istart = 0; icount = 1; } /* special case */ if(threadIdx_x==0 && i==dimfull_x-1){ istart = -s; icount = 2*s+1; } if(threadIdx_y==0 && j==dimfull_y-1){ jstart = -s; jcount = 2*s+1; } if(threadIdx_z==0 && k==dimfull_z-1){ kstart = -s; kcount = 2*s+1; } for(ks=0;ks<kcount;ks++){ for(js=0;js<jcount;js++){ for(is=0;is<icount;is++){ // load the image data into shared memory ima_shared[(k_sl+kstart+ks)*sharedwidthSlice+(j_sl+jstart+js)*sharedwidth+(i_sl+istart+is)] = tex3D(ima_tex,i_fl+istart+is,j_fl+jstart+js,k_fl+kstart+ks); } } } __syncthreads(); float Mt, Vt=0; // mean matrix char* Ptr = (char *) mean.ptr; // location in mean size_t pitch = mean.pitch; // dimx size_t slicePitch = pitch*dimfull_y; // dimx*dimy char* slice = Ptr+k*slicePitch; float* row = (float *) (slice+j*pitch); char* Ptr2 = (char *) R.ptr; // location in R char* slice2 = Ptr2+k*slicePitch; float* row2 = (float *) (slice2+j*pitch); Mt = 0; for(kk=-s; kk<=s; kk++) for(jj=-s;jj<=s;jj++) for(ii=-s;ii<=s;ii++) { Mt = Mt+ima_shared[(k_sl+kk)*sharedwidthSlice+(j_sl+jj)*sharedwidth+(i_sl+ii)]; } Mt = Mt/N; // mean value for voxel at (i,j,k) row[i] = Mt; // Save mean value into global mean matrix row2[i] = ima_shared[k_sl*sharedwidthSlice+j_sl*sharedwidth+i_sl]-Mt; // Save R (image-mean) value into global R memory // __syncthreads(); char* Ptr3 = (char *) var.ptr; // location in var char* slice3 = Ptr3+k*slicePitch; float* row3 = (float *) (slice3+j*pitch); float t = 0; for(kk=-s; kk<=s; kk++) for(jj=-s;jj<=s;jj++) for(ii=-s;ii<=s;ii++) { t = ima_shared[(k_sl+kk)*sharedwidthSlice+(j_sl+jj)*sharedwidth+(i_sl+ii)]-Mt; Vt = Vt+t*t; } Vt = Vt/(N-1); // variance value for voxel at (i,j,k) row3[i] = Vt; // Save variance value into global var matrix __syncthreads(); } void runFilter_s(float * ima_input, float * Estimate1, int f1, int v, int dimx, int dimy, int dimz, float MAX, int width2, int width, int s, int gpuid, bool rician) { /* ima_input: input image Estimate1: output image f1: patch size for the first filtering v: searching area dimx, dimy, dimz: the size of each dimension of the image MAX: max value of the input image width2: block width width: pre-process blockwidth s: patch radius for computing mean, R, variance matrix gpuid: GPU id */ int nDevices; cudaGetDeviceCount(&nDevices); printf("====== Designed for single filtering ======\n"); printf("Baseline+Opt2+Opt3\n"); cudaDeviceProp prop; cudaGetDeviceProperties(&prop,gpuid); printf("Device name: %s\n",prop.name); //@@@@@@@@@@@@ Pre-processing kernel starts @@@@@@@@@@@@/ int dimPrecom_x, dimPrecom_y, dimPrecom_z, dimfull_x, dimfull_y, dimfull_z, widthPrecom, blockPrecom_x, blockPrecom_y, blockPrecom_z, sharedsizePre; cudaArray *ima=0, *meansArray=0, *variancesArray=0, *RArray=0; cudaEvent_t start, stop; float elapsedTime; widthPrecom = width; dimPrecom_x = dimx+2*(f1+v+s); // Size of the input image dimPrecom_y = dimy+2*(f1+v+s); dimPrecom_z = dimz+2*(f1+v+s); dimfull_x = dimx+2*(f1+v); // Size of the mean, R and variance matrix dimfull_y = dimy+2*(f1+v); dimfull_z = dimz+2*(f1+v); printf("dim_x=%d\tdim_y=%d\tdim_z=%d\n",dimx,dimy,dimz); // const int sizePre = (dimfull_x*dimfull_y*dimfull_z)*sizeof(float); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaExtent imaSize = make_cudaExtent(dimPrecom_x,dimPrecom_y,dimPrecom_z); // Load ima_input into texture memory // cudaMalloc3DArray allocate cudaArray which is only for texture memory cudaMalloc3DArray(&ima, &channelDesc, imaSize); cudaMemcpy3DParms copyParams1 = {0}; // make_cudaPitchedPtr returns cudaPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParams1.srcPtr = make_cudaPitchedPtr((void*)ima_input, imaSize.width*sizeof(float), imaSize.width, imaSize.height); copyParams1.dstArray = ima; // destination array copyParams1.extent = imaSize; // dimensions of the transferred area in elements copyParams1.kind = cudaMemcpyHostToDevice; // copy from host to device cudaMemcpy3D(&copyParams1); ima_tex.normalized = false; ima_tex.filterMode = cudaFilterModePoint; //cudaFilterModePoint; cudaFilterModeLinear; ima_tex.addressMode[0] = cudaAddressModeClamp; ima_tex.addressMode[1] = cudaAddressModeClamp; ima_tex.addressMode[2] = cudaAddressModeClamp; cudaBindTextureToArray(ima_tex, ima, channelDesc); // Allocate global memory for mean, R and variance cudaExtent imaApron = make_cudaExtent(dimfull_x*sizeof(float),dimfull_y,dimfull_z); cudaPitchedPtr mean, R, var; // cudaMalloc3D allocate global memory cudaMalloc3D(&mean, imaApron); cudaMalloc3D(&R, imaApron); cudaMalloc3D(&var, imaApron); sharedsizePre = (widthPrecom+2*s)*(widthPrecom+2*s)*(widthPrecom+2*s); blockPrecom_x = (dimfull_x+widthPrecom-1)/widthPrecom; blockPrecom_y = (dimfull_y+widthPrecom-1)/widthPrecom; blockPrecom_z = (dimfull_z+widthPrecom-1)/widthPrecom; printf("shared size for pre-computation=%fkB\n",float(sharedsizePre*sizeof(float))/1024); if(sharedsizePre*sizeof(float)>48*1024){ printf("The memory requirement for pre-computation is larger than the size of shared memory!"); exit(1); } //@@@@@@@@@@ Pre-processing kernel time start @@@@@@@@@@/ CUDA_ASSERT(cudaEventCreate(&start)); CUDA_ASSERT(cudaEventRecord(start,0)); dim3 dimBlockPre(widthPrecom,widthPrecom,widthPrecom); dim3 dimGridPre(blockPrecom_x,blockPrecom_y,blockPrecom_z); // preProcess(cudaPitchedPtr mean, cudaPitchedPtr R, cudaPitchedPtr var, int dimfull_x, int dimfull_y, int dimfull_z, int s, int blockwidth) preProcess<<<dimGridPre,dimBlockPre,sharedsizePre*sizeof(float)>>>(mean, R, var, dimfull_x, dimfull_y, dimfull_z, s, widthPrecom); // preProcess<<<dimGridPre,dimBlockPre>>>(f1); CUDA_ASSERT(cudaDeviceSynchronize()); // Synchronize until all mean, R and variance are finished CUDA_ASSERT(cudaEventCreate(&stop)); CUDA_ASSERT(cudaEventRecord(stop,0)); CUDA_ASSERT(cudaEventSynchronize(stop)); CUDA_ASSERT(cudaEventElapsedTime(&elapsedTime,start,stop)); printf("Pre-computation kernel time: %f ms\n" ,elapsedTime); //@@@@@@@@@@ Pre-processing kernel time end @@@@@@@@@@/ /* //@@@@@@@@@@ Copy from device to host start @@@@@@@@@@/ cudaExtent imaOut = make_cudaExtent(dimfull_x,dimfull_y,dimfull_z); cudaMemcpy3DParms copyParamsOutput1 = {0}; // make_cudaPitchedPtr returns cudaPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParamsOutput1.srcPtr = mean; copyParamsOutput1.dstPtr = make_cudaPitchedPtr((void*)Estimate1, imaOut.width*sizeof(float), imaOut.width, imaOut.height); copyParamsOutput1.extent = imaApron; // dimensions of the transferred area in elements copyParamsOutput1.kind = cudaMemcpyDeviceToHost; // copy from host to device CUDA_ASSERT(cudaMemcpy3D(&copyParamsOutput1)); cudaMemcpy3DParms copyParamsOutput2 = {0}; // make_cudaPitchedPtr returns cudaPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParamsOutput2.srcPtr = var; copyParamsOutput2.dstPtr = make_cudaPitchedPtr((void*)Estimate2, imaOut.width*sizeof(float), imaOut.width, imaOut.height); copyParamsOutput2.extent = imaApron; // dimensions of the transferred area in elements copyParamsOutput2.kind = cudaMemcpyDeviceToHost; // copy from host to device CUDA_ASSERT(cudaMemcpy3D(&copyParamsOutput2)); //@@@@@@@@@@ Copy from device to host end @@@@@@@@@@/ */ //@@@@@@@@@@@@ 1st kernel finished. Binding to texture memory starts @@@@@@@@@@@@/ CUDA_ASSERT(cudaEventCreate(&start)); CUDA_ASSERT(cudaEventRecord(start,0)); //@@@@@@@@@@@@ Binding pre-computed mean, R and variance to texture memory start @@@@@@@@@@@@/ cudaExtent Size = make_cudaExtent(dimfull_x,dimfull_y,dimfull_z); // mean cudaMalloc3DArray(&meansArray, &channelDesc, Size); cudaMemcpy3DParms copyParams2 = {0}; // make_cudaPitchedPtr returns cudaPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParams2.srcPtr = mean; copyParams2.dstArray = meansArray; // destination array copyParams2.extent = Size; // dimensions of the transferred area in elements copyParams2.kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(&copyParams2); means_tex.normalized = false; means_tex.filterMode = cudaFilterModePoint; //cudaFilterModePoint; cudaFilterModeLinear; means_tex.addressMode[0] = cudaAddressModeClamp; means_tex.addressMode[1] = cudaAddressModeClamp; means_tex.addressMode[2] = cudaAddressModeClamp; cudaBindTextureToArray(means_tex, meansArray, channelDesc); // R cudaMalloc3DArray(&RArray, &channelDesc, Size); cudaMemcpy3DParms copyParams3 = {0}; // make_cudaPitchedPtr returns cudaPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParams3.srcPtr = R; copyParams3.dstArray = RArray; // destination array copyParams3.extent = Size; // dimensions of the transferred area in elements copyParams3.kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(&copyParams3); R_tex.normalized = false; R_tex.filterMode = cudaFilterModePoint; //cudaFilterModePoint; cudaFilterModeLinear; R_tex.addressMode[0] = cudaAddressModeClamp; R_tex.addressMode[1] = cudaAddressModeClamp; R_tex.addressMode[2] = cudaAddressModeClamp; cudaBindTextureToArray(R_tex, RArray, channelDesc); // variance cudaMalloc3DArray(&variancesArray, &channelDesc, Size); cudaMemcpy3DParms copyParams4 = {0}; // make_cudaPitchedPtr returns cudaPitchedPtr{pitch(pitch of the pointer),ptr(pointer to the allocated mem),xsize,ysize (logical width and height)} copyParams4.srcPtr = var; copyParams4.dstArray = variancesArray; // destination array copyParams4.extent = Size; // dimensions of the transferred area in elements copyParams4.kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(&copyParams4); variances_tex.normalized = false; variances_tex.filterMode = cudaFilterModePoint; //cudaFilterModePoint; cudaFilterModeLinear; variances_tex.addressMode[0] = cudaAddressModeClamp; variances_tex.addressMode[1] = cudaAddressModeClamp; variances_tex.addressMode[2] = cudaAddressModeClamp; cudaBindTextureToArray(variances_tex, variancesArray, channelDesc); CUDA_ASSERT(cudaEventCreate(&stop)); CUDA_ASSERT(cudaEventRecord(stop,0)); CUDA_ASSERT(cudaEventSynchronize(stop)); CUDA_ASSERT(cudaEventElapsedTime(&elapsedTime, start,stop)); printf("Elapsed time of texture binding: %f ms\n" ,elapsedTime); //@@@@@@@@@@@@ Binding pre-computed mean, R and variance to texture memory end @@@@@@@@@@@@/ //@@@@@@@@@@@@ Filtering process starts @@@@@@@@@@@@/ FilterParam param={0,v,0.f,dimx,dimy,dimz,0,0,0,MAX,width,0,0,0,0,0,0,rician}; float *EstimateKernel; int Ndim,sharedsize; // The total size of the input image Ndim = dimx*dimy*dimz; const int size = Ndim*sizeof(float); CUDA_ASSERT(cudaSetDevice(gpuid)); CUDA_ASSERT(cudaMalloc((void**)&EstimateKernel, size)); // copy from EstimateKernel (device) to Estimate (host) //&&&&&&&&&& Parameter setup for the 1st filtering (large) &&&&&&&&&&/ width = width2; sharedsize = (width+2*(f1+v))*(width+2*(f1+v))*(width+2*(f1+v)); // The shared memory size needed for each block printf("Shared size for filtering=%fkB\n",float(sharedsize*sizeof(float))/1024); printf("width=%d\n",width); if(width*width*width>1024){ printf("Error: The number of threads in a block is larger than 1024!"); exit(1); } param.patchsize = f1; param.rpatchnomalize = 1.f/((f1<<1)+1); param.rpatchnomalize = param.rpatchnomalize*param.rpatchnomalize*param.rpatchnomalize; param.blockdimx = (dimx+width-1)/width; param.blockdimy = (dimy+width-1)/width; param.blockdimz = (dimy+width-1)/width; param.blockwidth = width; param.sharedwidth_x = width+2*(f1+v); param.sharedwidth = width+2*(f1+v); // The shared width at other dimension is still not changed. param.sharedSlice = param.sharedwidth_x*param.sharedwidth; param.apron = f1+v; param.apronFull = f1+v+s; param.apronShared = f1+v; CUDA_ASSERT(cudaMemcpyToSymbol(gcfg, &param, sizeof(FilterParam), 0, cudaMemcpyHostToDevice)); printf("1st: searchsize=%d\n", param.searchsize); printf("1st: patchsize=%d\n", param.patchsize); //@@@@@@@@@@ 1st filtering (large): Time for filtering kernel start @@@@@@@@@@/ CUDA_ASSERT(cudaEventCreate(&start)); CUDA_ASSERT(cudaEventRecord(start,0)); dim3 dimBlock(width,width,width); dim3 dimGrid(param.blockdimx,param.blockdimy,param.blockdimz); // First filtering ANLMfilter<<<dimGrid, dimBlock,sharedsize*sizeof(float)>>>(EstimateKernel); CUDA_ASSERT(cudaDeviceSynchronize()); CUDA_ASSERT(cudaEventCreate(&stop)); CUDA_ASSERT(cudaEventRecord(stop,0)); CUDA_ASSERT(cudaEventSynchronize(stop)); CUDA_ASSERT(cudaEventElapsedTime(&elapsedTime, start,stop)); printf("1st filtering (large) kernel time: %f ms\n\n\n" ,elapsedTime); //@@@@@@@@@@ 1st filtering (large): Time for filtering kernel end @@@@@@@@@@/ //@@@@@@@@@@ Free memory @@@@@@@@@@/ CUDA_ASSERT(cudaMemcpy(Estimate1, EstimateKernel, size, cudaMemcpyDeviceToHost)); CUDA_ASSERT(cudaFree(EstimateKernel)); // *ima=0, *means_Tex=0, *variances_Tex=0, *R_Tex=0; CUDA_ASSERT(cudaFreeArray(meansArray)); CUDA_ASSERT(cudaFreeArray(variancesArray)); CUDA_ASSERT(cudaFreeArray(RArray)); CUDA_ASSERT(cudaFreeArray(ima)); CUDA_ASSERT(cudaFree(mean.ptr)); CUDA_ASSERT(cudaFree(R.ptr)); CUDA_ASSERT(cudaFree(var.ptr)); CUDA_ASSERT(cudaDeviceReset()); }
bb4352532d582261929bdb9d9eb92d932f8b9bac.hip
// !!! This is a file automatically generated by hipify!!! /* * flux_ML_iface.c * * Created on: Nov 25, 2015 * Author: erik */ #include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" #include "mpi.h" #include "hip/hip_runtime.h" #include "roctracer/roctx.h" #include "cudaCommon.h" #include "cudaFluidStep.h" #include "cudaFreeRadiation.h" #include "cudaSoundspeed.h" #include "cflTimestep.h" #include "sourceStep.h" #include "flux.h" // Only uncomment this if you plan to debug this file. // This will cause it to require output arguments to return data in, // and perturb code behavior by generating writes to the output debug arrays //#define DEBUGMODE FluidMethods mlmethodToEnum(int mlmethod); int fetchMinDensity(mxArray *mxFluids, int fluidNum, double *rhoMin); int calculateMaxTimestep(GridFluid *fluids, int nFluids, FluidStepParams *fsp, ParallelTopology *topo, MGArray *tempStorage, double *timestep); int performCompleteTimestep(GridFluid *fluids, int numFluids, FluidStepParams fsp, ParallelTopology topo, GravityData *gravdata, ParametricRadiation *rad, int srcType); #ifdef DEBUGMODE #include "debug_inserts.h" #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { int wanted_nlhs = 0; #ifdef DEBUGMODE wanted_nlhs = 1; #endif if ((nrhs!= 7) || (nlhs != wanted_nlhs)) mexErrMsgTxt("Wrong number of arguments: need sourceStep(run, fluid, bx, by, bz, xyvector, [order, step #, step method, tFraction])\n"); //MGArray fluid[5]; #ifdef USE_NVTX roctxRangePush("flux_multi step"); #endif double *scalars = mxGetPr(prhs[6]); if(mxGetNumberOfElements(prhs[6]) != 4) { DROP_MEX_ERROR("Must rx 4 parameters in params vector: [ order, step #, step method, tFraction]"); } int worked = SUCCESSFUL; const mxArray* theImogenManager = prhs[0]; double dt = derefXdotAdotB_scalar(theImogenManager, "time", "dTime"); int ishydro = (int)derefXdotAdotB_scalar(theImogenManager, "pureHydro", (const char *)NULL); // Load up the FluidStepParameters structure int sweepDirect = (int)scalars[0]; /* Identify if forwards (sweepDirect = 1) or backwards (-1) */ int stepNum = (int)scalars[1]; /* step number (used to pick the permutation of the fluid propagators) */ int stepMethod = (int)scalars[2]; /* 1=HLL, 2=HLLC, 3=Xin/Jin */ FluidStepParams fsp; fsp.dt = dt; fsp.onlyHydro = ishydro; fsp.stepDirection = sweepDirect; fsp.stepMethod = mlmethodToEnum(stepMethod); fsp.stepNumber = stepNum; fsp.cflPrefactor = derefXdotAdotB_scalar(theImogenManager, "time","CFL"); // Load up the radiation structure ParametricRadiation prad; int isRadiating = derefXdotAdotB_scalar(theImogenManager, "radiation", "type"); if(isRadiating) { prad.exponent = derefXdotAdotB_scalar(theImogenManager, "radiation", "exponent"); // FIXME NASTY HACK min temperature set by hard code (this copied from ./fluid/Radiation.m:124) prad.minTemperature = 1.05; prad.prefactor = derefXdotAdotB_scalar(theImogenManager, "radiation", "strength") * dt; } else { prad.prefactor = 0; } const mxArray *geo = mxGetProperty(prhs[0], 0, "geometry"); // Load up the topology structure ParallelTopology topo; const mxArray *mxtopo = mxGetProperty(geo, 0, "topology"); topoStructureToC(mxtopo, &topo); // Load up the geometry structure inside the FluidStepParams fsp.geometry = accessMatlabGeometryClass(geo); int numFluids = mxGetNumberOfElements(prhs[1]); if(numFluids > 1) { fsp.multifluidDragMethod = (int)derefXdotAdotB_scalar(theImogenManager, "multifluidDragMethod", (const char *)NULL); } else { fsp.multifluidDragMethod = 0; } // Access the potential field, if relevant GravityData gravdat; MGArray gravphi; int haveg = derefXdotAdotB_scalar(theImogenManager, "potentialField", "ACTIVE"); if(haveg) { const mxArray *gravfield; gravfield = derefXdotAdotB(theImogenManager, "potentialField", "field"); worked = MGA_accessMatlabArrays(&gravfield, 0, 0, &gravphi); gravdat.phi = &gravphi; double orderstmp[2]; derefXdotAdotB_vector(theImogenManager, "compositeSrcOrders", (const char *)NULL, &orderstmp[0], 2); gravdat.spaceOrder = (int)orderstmp[0]; gravdat.timeOrder = (int)orderstmp[1]; } else { gravdat.spaceOrder = 0; gravdat.timeOrder = 0; } if(worked != SUCCESSFUL) { DROP_MEX_ERROR("performCompleteTimestep crashing because of failure to fetch run.potentialField.field"); } // Access the fluids themselves GridFluid fluids[numFluids]; int fluidct; for(fluidct = 0; fluidct < numFluids; fluidct++) { MGA_accessFluidCanister(prhs[1], fluidct, &fluids[fluidct].data[0]); fluids[fluidct].thermo = accessMatlabThermoDetails(mxGetProperty(prhs[1], fluidct, "thermoDetails")); worked = fetchMinDensity((mxArray *)prhs[1], fluidct, &fluids[fluidct].rhoMin); } // Fetch the XY vectors MGArray XYvectors; worked = MGA_accessMatlabArrays(&prhs[5], 0, 0, &XYvectors); if(worked != SUCCESSFUL) { DROP_MEX_ERROR("performCompleteTimestep crashing because of failure to fetch input xyVectors (arg 6)"); } fsp.geometry.XYVector = &XYvectors; // Get the global domain rez for doing cfl double *globrez = mxGetPr(derefXdotAdotB(theImogenManager, "geometry", "globalDomainRez")); int i; for(i = 0; i < 3; i++) { fsp.geometry.globalRez[i] = globrez[i]; } // Determine what kind of source type we're going to do int cylcoords = (fsp.geometry.shape == CYLINDRICAL) || (fsp.geometry.shape == RZCYLINDRICAL); // sourcerFunction = (fsp.geometry. useCyl + 2*useRF + 4*usePhi + 8*use2F; int srcType = 1*(cylcoords == 1) + 2*(fsp.geometry.frameOmega != 0) + 4*(haveg) + 8*(numFluids > 1); double resultingTimestep; worked = CHECK_IMOGEN_ERROR(performCompleteTimestep(fluids, numFluids, fsp, topo, &gravdat, &prad, srcType)); if(worked != SUCCESSFUL) { DROP_MEX_ERROR("Big problem: performCompleteTimestep crashed! See compiled backtrace generated above."); } } int performCompleteTimestep(GridFluid *fluids, int numFluids, FluidStepParams fsp, ParallelTopology topo, GravityData *gravdata, ParametricRadiation *rad, int srcType) { int status = SUCCESSFUL; int fluidct; MGArray tempStorage; tempStorage.nGPUs = -1; // not allocated int numarrays; #ifdef DEBUGMODE numarrays = 6 + DBG_NUMARRAYS; #else #ifdef USE_RK3 numarrays = 11; #else numarrays = 6; #endif #endif if(tempStorage.nGPUs == -1) { roctxMark("flux_multi.cu:131 large malloc 6 arrays"); status = MGA_allocSlab(&fluids[0].data[0], &tempStorage, numarrays); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { return status; } } //double propdt; //status = calculateMaxTimestep(fluids, numFluids, &fsp, &topo, &tempStorage, &propdt); //printf("Input dt = %le, proposed dt = %le, diff = %le\n", fsp.dt, propdt, propdt - fsp.dt); // TAKE SOURCE HALF STEP fsp.dt *= 0.5; status = performSourceFunctions(srcType, fluids, numFluids, fsp, &topo, gravdata, rad, &tempStorage); fsp.dt *= 2; //rad->prefactor *= .5; //status = sourcefunction_OpticallyThinPowerLawRadiation(&fluidReorder[0], NULL, fsp.onlyHydro, fluids[0].thermo.gamma, rad); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { return status; } fsp.stepDirection = 1; // INITIATE FORWARD-ORDERED TRANSPORT STEP for(fluidct = 0; fluidct < numFluids; fluidct++) { fsp.minimumRho = fluids[fluidct].rhoMin; fsp.thermoGamma = fluids[fluidct].thermo.gamma; fsp.Cisothermal = fluids[fluidct].thermo.Cisothermal; if(fluids[fluidct].thermo.Cisothermal != -1) { fsp.thermoGamma = 2; // This makes the hydro pressure solver return internal energy when it multiplies eint by (gamma-1) } status = performFluidUpdate_3D(&fluids[fluidct].data[0], &topo, fsp, &tempStorage); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break; } if(status != SUCCESSFUL) { return status; } // TAKE FULL SOURCE STEP status = performSourceFunctions(srcType, fluids, numFluids, fsp, &topo, gravdata, rad, &tempStorage); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { return status; } // INITIATE BACKWARD-ORDERED TRANSPORT STEP fsp.stepDirection = -1; for(fluidct = 0; fluidct < numFluids; fluidct++) { fsp.minimumRho = fluids[fluidct].rhoMin; fsp.thermoGamma = fluids[fluidct].thermo.gamma; fsp.Cisothermal = fluids[fluidct].thermo.Cisothermal; if(fluids[fluidct].thermo.Cisothermal != -1) { fsp.thermoGamma = 2; } status = performFluidUpdate_3D(&fluids[fluidct].data[0], &topo, fsp, &tempStorage); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break; } if(status != SUCCESSFUL) { return status; } // TAKE FINAL SOURCE HALF STEP fsp.dt *= 0.5; status = performSourceFunctions(srcType, fluids, numFluids, fsp, &topo, gravdata, rad, &tempStorage); fsp.dt *= 2; if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { return status; } // CLEANUP // This was allocated & re-used many times in performFluidUpdate_3D if((tempStorage.nGPUs != -1) && (status == SUCCESSFUL)) { #ifdef USE_NVTX roctxMark("Large free flux_ML_iface.cu:144"); #endif status = CHECK_IMOGEN_ERROR(MGA_delete(&tempStorage)); if(status != SUCCESSFUL) { return status; } } #ifdef SYNCMEX MGA_sledgehammerSequentialize(&fluid[0]); #endif #ifdef USE_NVTX roctxRangePop(); #endif return SUCCESSFUL; } /* Computes the maximum permitted timestep allowed by the CFL constraint on the fluid method */ int calculateMaxTimestep(GridFluid *fluids, int nFluids, FluidStepParams *fsp, ParallelTopology *topo, MGArray *tempStorage, double *timestep) { int status = SUCCESSFUL; double dt = 1e38; double currentdt = dt; double tau; int i, j; // compute each fluid's min timestep on this node for(i = 0; i < nFluids; i++) { status = CHECK_IMOGEN_ERROR(calculateSoundspeed(&fluids[i].data[0], (MGArray *)NULL, tempStorage, fluids[i].thermo.gamma)); double globrez[3]; for(j = 0; j < 3; j++) { globrez[j] = fsp->geometry.globalRez[j]; } status = computeLocalCFLTimestep(&fluids[i].data[0], tempStorage, &fsp->geometry, fsp->stepMethod, &globrez[0], &tau); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { break; } // crash on invalid timesteps if(isnan(tau)) { PRINT_SIMPLE_FAULT("Calculated a timestep that is either infinity or NaN. Crashing\n!"); return ERROR_CRASH; } // This is computed globally by computeLocalCFLTimestep currentdt = (currentdt < tau) ? currentdt : tau; } *timestep = currentdt * fsp->cflPrefactor; return SUCCESSFUL; } int fetchMinDensity(mxArray *mxFluids, int fluidNum, double *rhoMin) { int status = SUCCESSFUL; mxArray *flprop = mxGetProperty(mxFluids, fluidNum, "MINMASS"); if(flprop != NULL) { rhoMin[0] = *((double *)mxGetPr(flprop)); } else { PRINT_FAULT_HEADER; printf("Unable to access fluid(%i).MINMASS property.\n", fluidNum); PRINT_FAULT_FOOTER; status = ERROR_NULL_POINTER; } return status; } FluidMethods mlmethodToEnum(int mlmethod) { FluidMethods f; switch(mlmethod) { case 1: f = METHOD_HLL; break; case 2: f = METHOD_HLLC; break; case 3: f = METHOD_XINJIN; break; } return f; }
bb4352532d582261929bdb9d9eb92d932f8b9bac.cu
/* * flux_ML_iface.c * * Created on: Nov 25, 2015 * Author: erik */ #include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" #include "mpi.h" #include "cuda.h" #include "nvToolsExt.h" #include "cudaCommon.h" #include "cudaFluidStep.h" #include "cudaFreeRadiation.h" #include "cudaSoundspeed.h" #include "cflTimestep.h" #include "sourceStep.h" #include "flux.h" // Only uncomment this if you plan to debug this file. // This will cause it to require output arguments to return data in, // and perturb code behavior by generating writes to the output debug arrays //#define DEBUGMODE FluidMethods mlmethodToEnum(int mlmethod); int fetchMinDensity(mxArray *mxFluids, int fluidNum, double *rhoMin); int calculateMaxTimestep(GridFluid *fluids, int nFluids, FluidStepParams *fsp, ParallelTopology *topo, MGArray *tempStorage, double *timestep); int performCompleteTimestep(GridFluid *fluids, int numFluids, FluidStepParams fsp, ParallelTopology topo, GravityData *gravdata, ParametricRadiation *rad, int srcType); #ifdef DEBUGMODE #include "debug_inserts.h" #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { int wanted_nlhs = 0; #ifdef DEBUGMODE wanted_nlhs = 1; #endif if ((nrhs!= 7) || (nlhs != wanted_nlhs)) mexErrMsgTxt("Wrong number of arguments: need sourceStep(run, fluid, bx, by, bz, xyvector, [order, step #, step method, tFraction])\n"); //MGArray fluid[5]; #ifdef USE_NVTX nvtxRangePush("flux_multi step"); #endif double *scalars = mxGetPr(prhs[6]); if(mxGetNumberOfElements(prhs[6]) != 4) { DROP_MEX_ERROR("Must rx 4 parameters in params vector: [ order, step #, step method, tFraction]"); } int worked = SUCCESSFUL; const mxArray* theImogenManager = prhs[0]; double dt = derefXdotAdotB_scalar(theImogenManager, "time", "dTime"); int ishydro = (int)derefXdotAdotB_scalar(theImogenManager, "pureHydro", (const char *)NULL); // Load up the FluidStepParameters structure int sweepDirect = (int)scalars[0]; /* Identify if forwards (sweepDirect = 1) or backwards (-1) */ int stepNum = (int)scalars[1]; /* step number (used to pick the permutation of the fluid propagators) */ int stepMethod = (int)scalars[2]; /* 1=HLL, 2=HLLC, 3=Xin/Jin */ FluidStepParams fsp; fsp.dt = dt; fsp.onlyHydro = ishydro; fsp.stepDirection = sweepDirect; fsp.stepMethod = mlmethodToEnum(stepMethod); fsp.stepNumber = stepNum; fsp.cflPrefactor = derefXdotAdotB_scalar(theImogenManager, "time","CFL"); // Load up the radiation structure ParametricRadiation prad; int isRadiating = derefXdotAdotB_scalar(theImogenManager, "radiation", "type"); if(isRadiating) { prad.exponent = derefXdotAdotB_scalar(theImogenManager, "radiation", "exponent"); // FIXME NASTY HACK min temperature set by hard code (this copied from ./fluid/Radiation.m:124) prad.minTemperature = 1.05; prad.prefactor = derefXdotAdotB_scalar(theImogenManager, "radiation", "strength") * dt; } else { prad.prefactor = 0; } const mxArray *geo = mxGetProperty(prhs[0], 0, "geometry"); // Load up the topology structure ParallelTopology topo; const mxArray *mxtopo = mxGetProperty(geo, 0, "topology"); topoStructureToC(mxtopo, &topo); // Load up the geometry structure inside the FluidStepParams fsp.geometry = accessMatlabGeometryClass(geo); int numFluids = mxGetNumberOfElements(prhs[1]); if(numFluids > 1) { fsp.multifluidDragMethod = (int)derefXdotAdotB_scalar(theImogenManager, "multifluidDragMethod", (const char *)NULL); } else { fsp.multifluidDragMethod = 0; } // Access the potential field, if relevant GravityData gravdat; MGArray gravphi; int haveg = derefXdotAdotB_scalar(theImogenManager, "potentialField", "ACTIVE"); if(haveg) { const mxArray *gravfield; gravfield = derefXdotAdotB(theImogenManager, "potentialField", "field"); worked = MGA_accessMatlabArrays(&gravfield, 0, 0, &gravphi); gravdat.phi = &gravphi; double orderstmp[2]; derefXdotAdotB_vector(theImogenManager, "compositeSrcOrders", (const char *)NULL, &orderstmp[0], 2); gravdat.spaceOrder = (int)orderstmp[0]; gravdat.timeOrder = (int)orderstmp[1]; } else { gravdat.spaceOrder = 0; gravdat.timeOrder = 0; } if(worked != SUCCESSFUL) { DROP_MEX_ERROR("performCompleteTimestep crashing because of failure to fetch run.potentialField.field"); } // Access the fluids themselves GridFluid fluids[numFluids]; int fluidct; for(fluidct = 0; fluidct < numFluids; fluidct++) { MGA_accessFluidCanister(prhs[1], fluidct, &fluids[fluidct].data[0]); fluids[fluidct].thermo = accessMatlabThermoDetails(mxGetProperty(prhs[1], fluidct, "thermoDetails")); worked = fetchMinDensity((mxArray *)prhs[1], fluidct, &fluids[fluidct].rhoMin); } // Fetch the XY vectors MGArray XYvectors; worked = MGA_accessMatlabArrays(&prhs[5], 0, 0, &XYvectors); if(worked != SUCCESSFUL) { DROP_MEX_ERROR("performCompleteTimestep crashing because of failure to fetch input xyVectors (arg 6)"); } fsp.geometry.XYVector = &XYvectors; // Get the global domain rez for doing cfl double *globrez = mxGetPr(derefXdotAdotB(theImogenManager, "geometry", "globalDomainRez")); int i; for(i = 0; i < 3; i++) { fsp.geometry.globalRez[i] = globrez[i]; } // Determine what kind of source type we're going to do int cylcoords = (fsp.geometry.shape == CYLINDRICAL) || (fsp.geometry.shape == RZCYLINDRICAL); // sourcerFunction = (fsp.geometry. useCyl + 2*useRF + 4*usePhi + 8*use2F; int srcType = 1*(cylcoords == 1) + 2*(fsp.geometry.frameOmega != 0) + 4*(haveg) + 8*(numFluids > 1); double resultingTimestep; worked = CHECK_IMOGEN_ERROR(performCompleteTimestep(fluids, numFluids, fsp, topo, &gravdat, &prad, srcType)); if(worked != SUCCESSFUL) { DROP_MEX_ERROR("Big problem: performCompleteTimestep crashed! See compiled backtrace generated above."); } } int performCompleteTimestep(GridFluid *fluids, int numFluids, FluidStepParams fsp, ParallelTopology topo, GravityData *gravdata, ParametricRadiation *rad, int srcType) { int status = SUCCESSFUL; int fluidct; MGArray tempStorage; tempStorage.nGPUs = -1; // not allocated int numarrays; #ifdef DEBUGMODE numarrays = 6 + DBG_NUMARRAYS; #else #ifdef USE_RK3 numarrays = 11; #else numarrays = 6; #endif #endif if(tempStorage.nGPUs == -1) { nvtxMark("flux_multi.cu:131 large malloc 6 arrays"); status = MGA_allocSlab(&fluids[0].data[0], &tempStorage, numarrays); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { return status; } } //double propdt; //status = calculateMaxTimestep(fluids, numFluids, &fsp, &topo, &tempStorage, &propdt); //printf("Input dt = %le, proposed dt = %le, diff = %le\n", fsp.dt, propdt, propdt - fsp.dt); // TAKE SOURCE HALF STEP fsp.dt *= 0.5; status = performSourceFunctions(srcType, fluids, numFluids, fsp, &topo, gravdata, rad, &tempStorage); fsp.dt *= 2; //rad->prefactor *= .5; //status = sourcefunction_OpticallyThinPowerLawRadiation(&fluidReorder[0], NULL, fsp.onlyHydro, fluids[0].thermo.gamma, rad); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { return status; } fsp.stepDirection = 1; // INITIATE FORWARD-ORDERED TRANSPORT STEP for(fluidct = 0; fluidct < numFluids; fluidct++) { fsp.minimumRho = fluids[fluidct].rhoMin; fsp.thermoGamma = fluids[fluidct].thermo.gamma; fsp.Cisothermal = fluids[fluidct].thermo.Cisothermal; if(fluids[fluidct].thermo.Cisothermal != -1) { fsp.thermoGamma = 2; // This makes the hydro pressure solver return internal energy when it multiplies eint by (gamma-1) } status = performFluidUpdate_3D(&fluids[fluidct].data[0], &topo, fsp, &tempStorage); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break; } if(status != SUCCESSFUL) { return status; } // TAKE FULL SOURCE STEP status = performSourceFunctions(srcType, fluids, numFluids, fsp, &topo, gravdata, rad, &tempStorage); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { return status; } // INITIATE BACKWARD-ORDERED TRANSPORT STEP fsp.stepDirection = -1; for(fluidct = 0; fluidct < numFluids; fluidct++) { fsp.minimumRho = fluids[fluidct].rhoMin; fsp.thermoGamma = fluids[fluidct].thermo.gamma; fsp.Cisothermal = fluids[fluidct].thermo.Cisothermal; if(fluids[fluidct].thermo.Cisothermal != -1) { fsp.thermoGamma = 2; } status = performFluidUpdate_3D(&fluids[fluidct].data[0], &topo, fsp, &tempStorage); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) break; } if(status != SUCCESSFUL) { return status; } // TAKE FINAL SOURCE HALF STEP fsp.dt *= 0.5; status = performSourceFunctions(srcType, fluids, numFluids, fsp, &topo, gravdata, rad, &tempStorage); fsp.dt *= 2; if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { return status; } // CLEANUP // This was allocated & re-used many times in performFluidUpdate_3D if((tempStorage.nGPUs != -1) && (status == SUCCESSFUL)) { #ifdef USE_NVTX nvtxMark("Large free flux_ML_iface.cu:144"); #endif status = CHECK_IMOGEN_ERROR(MGA_delete(&tempStorage)); if(status != SUCCESSFUL) { return status; } } #ifdef SYNCMEX MGA_sledgehammerSequentialize(&fluid[0]); #endif #ifdef USE_NVTX nvtxRangePop(); #endif return SUCCESSFUL; } /* Computes the maximum permitted timestep allowed by the CFL constraint on the fluid method */ int calculateMaxTimestep(GridFluid *fluids, int nFluids, FluidStepParams *fsp, ParallelTopology *topo, MGArray *tempStorage, double *timestep) { int status = SUCCESSFUL; double dt = 1e38; double currentdt = dt; double tau; int i, j; // compute each fluid's min timestep on this node for(i = 0; i < nFluids; i++) { status = CHECK_IMOGEN_ERROR(calculateSoundspeed(&fluids[i].data[0], (MGArray *)NULL, tempStorage, fluids[i].thermo.gamma)); double globrez[3]; for(j = 0; j < 3; j++) { globrez[j] = fsp->geometry.globalRez[j]; } status = computeLocalCFLTimestep(&fluids[i].data[0], tempStorage, &fsp->geometry, fsp->stepMethod, &globrez[0], &tau); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { break; } // crash on invalid timesteps if(isnan(tau)) { PRINT_SIMPLE_FAULT("Calculated a timestep that is either infinity or NaN. Crashing\n!"); return ERROR_CRASH; } // This is computed globally by computeLocalCFLTimestep currentdt = (currentdt < tau) ? currentdt : tau; } *timestep = currentdt * fsp->cflPrefactor; return SUCCESSFUL; } int fetchMinDensity(mxArray *mxFluids, int fluidNum, double *rhoMin) { int status = SUCCESSFUL; mxArray *flprop = mxGetProperty(mxFluids, fluidNum, "MINMASS"); if(flprop != NULL) { rhoMin[0] = *((double *)mxGetPr(flprop)); } else { PRINT_FAULT_HEADER; printf("Unable to access fluid(%i).MINMASS property.\n", fluidNum); PRINT_FAULT_FOOTER; status = ERROR_NULL_POINTER; } return status; } FluidMethods mlmethodToEnum(int mlmethod) { FluidMethods f; switch(mlmethod) { case 1: f = METHOD_HLL; break; case 2: f = METHOD_HLLC; break; case 3: f = METHOD_XINJIN; break; } return f; }
923ae9079e432adcd6ec9a400b18689caab981a5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cuConvertC4ToC3Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float4 *src = NULL; hipMalloc(&src, XSIZE*YSIZE); size_t src_stride = 2; float3 *dst = NULL; hipMalloc(&dst, XSIZE*YSIZE); size_t dst_stride = 2; int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cuConvertC4ToC3Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,src_stride,dst,dst_stride,width,height); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cuConvertC4ToC3Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,src_stride,dst,dst_stride,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cuConvertC4ToC3Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,src_stride,dst,dst_stride,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
923ae9079e432adcd6ec9a400b18689caab981a5.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cuConvertC4ToC3Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float4 *src = NULL; cudaMalloc(&src, XSIZE*YSIZE); size_t src_stride = 2; float3 *dst = NULL; cudaMalloc(&dst, XSIZE*YSIZE); size_t dst_stride = 2; int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cuConvertC4ToC3Kernel<<<gridBlock,threadBlock>>>(src,src_stride,dst,dst_stride,width,height); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cuConvertC4ToC3Kernel<<<gridBlock,threadBlock>>>(src,src_stride,dst,dst_stride,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cuConvertC4ToC3Kernel<<<gridBlock,threadBlock>>>(src,src_stride,dst,dst_stride,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bae6a791ae2b9f1ffcf7f67944c4c306c6407ee5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "partitioner.cpp" #include "irregular_runtime.h" #include "../lib/cu_util.h" #include "partition_cuda.h" #include "partition_cpu.h" #include "cpu_args.h" #include "parameters.h" #include "gpu_kernel.hip" #include <mpi.h> #include "../lib/macro.h" #include "cpu_kernel.h" #include <stdio.h> #include "reorder.cpp" #include "../lib/time_util.h" #include <math.h> IrregularRuntime::IrregularRuntime(IRIndex num_edges, IRIndex num_nodes, EDGE *edges, void *edge_data, void *node_data, int edge_data_elm_size, int node_data_elm_size, int reduction_elm_size, int node_num_dims, void *node_coordinates, int coordinate_size, void *parameter, int parameter_size, int num_procs, int num_iters): global_num_edges_(num_edges), global_num_nodes_(num_nodes), global_edges_(edges), global_edge_data_(edge_data), global_node_data_(node_data), edge_data_elm_size_(edge_data_elm_size), node_data_elm_size_(node_data_elm_size), reduction_elm_size_(reduction_elm_size), node_num_dims_(node_num_dims), node_coordinates_(node_coordinates), coordinate_size_(coordinate_size), num_procs_(num_procs), num_iters_(num_iters), parameter_size_(parameter_size), current_iter_(0), parameter_(parameter) {} void IrregularRuntime::IrregularInit() { //=====reorder input data to reduce crossing edges int *partitions = (int *)malloc(sizeof(int)*node_num_dims_); memset(partitions, 0, sizeof(int)*node_num_dims_); partitions[0] = num_procs_; partitioner_ = new Reorder(global_num_nodes_, global_num_edges_, node_num_dims_, partitions); if(coordinate_size_==4) partitioner_->partition<float>((float *)node_coordinates_); else if(coordinate_size_==8) partitioner_->partition<double>((double *)node_coordinates_); partitioner_->reorder_edges(global_edges_); partitioner_->reorder_satellite_data(global_node_data_, node_data_elm_size_); //========= //MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank_); printf("my rank is: %d\n", my_rank_); MPI_Barrier(MPI_COMM_WORLD); this->num_gpus_ = GetGPUNumber(); this->num_devices_ = num_gpus_ + 1; pv_ = new partition_view(num_procs_, my_rank_, global_num_nodes_, global_num_edges_, global_edges_, global_edge_data_, global_node_data_, edge_data_elm_size_, node_data_elm_size_, reduction_elm_size_); //printf("pv_ created...\n"); //create per node partition p_mpi_ = pv_->CreatePartition(); MPI_Barrier(MPI_COMM_WORLD); //printf("p_mpi created...\n"); hipHostMalloc((void **)&part_index, sizeof(int) * p_mpi_->my_num_nodes(), hipHostMallocPortable|hipHostMallocMapped); hipHostGetDevicePointer((void **)&part_index_d, part_index, 0); //init part_index //allocate reduction result space reduction_result_ = malloc(reduction_elm_size_ * p_mpi_->my_num_nodes()); p_cpu_ = NULL; //allocate gpu partiiton pointers p_cuda_ = (partition_cuda**)malloc(num_gpus_ * sizeof(partition_cuda*)); memset(p_cuda_, 0, num_gpus_ * sizeof(partition_cuda *)); speeds_ = (double *)malloc(sizeof(double) * num_devices_); device_node_start_ = (int *)malloc(sizeof(int)*num_devices_); device_node_sum_ = (int *)malloc(sizeof(int)*num_devices_); //initial speeds are equal for(int i = 0; i < num_devices_; i++) { speeds_[i] = 1; } //initialize gpu reduction objects rog_ = (Gobject **)malloc(sizeof(Gobject *)*num_gpus_); task_offset_d_ = (int **)malloc(sizeof(int *)*num_gpus_); parameter_d_ = (void **)malloc(sizeof(void *)*num_gpus_); map_idx_ = 0; reduce_idx_ = 0; Gobject *rogh = (Gobject *)malloc(sizeof(Gobject)); for(int i = 0; i < NUM_BUCKETS_G; i++) { (rogh->keys)[i] = EMPTY_BUCKET_VALUE; //(rogh->values)[i] = EMPTY_BUCKET_VALUE; (rogh->locks)[i] = 0; (rogh->pairs_per_bucket)[i] = 0; } //printf("here........\n"); for(int i = 0; i < num_gpus_; i++) { //printf("gpu id: %d+++++++++\n", i); CUDA_SAFE_CALL(hipSetDevice(i)); hipMalloc((void **)&rog_[i], sizeof(Gobject)); hipMemcpy(rog_[i], rogh, sizeof(Gobject), hipMemcpyHostToDevice); int init_val = 0; hipMalloc((void **)&task_offset_d_[i], sizeof(int)); hipMemcpy(task_offset_d_[i], &init_val, sizeof(int), hipMemcpyHostToDevice); //printf("copy task offset done++++++++++\n"); //copy parameters hipMalloc((void **)&parameter_d_[i], parameter_size_); hipMemcpy(parameter_d_[i], parameter_, parameter_size_, hipMemcpyHostToDevice); //printf("copy parameter done++++++++++\n"); } free(rogh); //initialize reduction objects for CPU cores hipHostMalloc((void **)&roc_, sizeof(Cobject) * CPU_THREADS, hipHostMallocMapped); for(int i = 0; i < NUM_BUCKETS_G; i++) { (roc_[0].keys)[i] = EMPTY_BUCKET_VALUE; //(roc->values)[i] = EMPTY_BUCKET_VALUE; (roc_[0].locks)[i] = 0; (roc_[0].pairs_per_bucket)[i] = 0; } for(int i = 1; i<CPU_THREADS; i++) { memcpy(&roc_[i], &roc_[0], sizeof(Cobject)); } cpu_edge_offset_ = (int *)malloc(sizeof(int)); *cpu_edge_offset_ = 0; } void IrregularRuntime::split() { //split the reduction space based on the speed of each device //split into cpu partition and gpu partitions //Partitioner partitioner(part_index, p_mpi_->my_num_nodes(), p_mpi_->my_num_edges(), num_devices_, speeds_, reduction_elm_size_, node_num_dims_, my_rank_); Partitioner partitioner(part_index, p_mpi_->my_own_num_nodes(), p_mpi_->my_num_edges(), num_devices_, speeds_, reduction_elm_size_, node_num_dims_, my_rank_); printf("my num nodes: %d\n", p_mpi_->my_num_nodes()); //printf("coordinate size: %d****************\n", coordinate_size_); if(coordinate_size_ == 4) partitioner.partition_device_nodes<float>((float *)node_coordinates_ + pv_->my_node_start() * node_num_dims_); else if(coordinate_size_ == 8) partitioner.partition_device_nodes<double>((double *)node_coordinates_ + pv_->my_node_start() * node_num_dims_); //printf("before edges generatedoooooooooooooooooooooooooooo...................\n"); partitioner.generate_device_edges(p_mpi_->my_edges()); //partitioner.reorder_satellite_data(p_mpi_->my_node_data(), node_data_elm_size_); //printf("edges generatedoooooooooooooooooooooooooooo...................\n"); if(p_cpu_) { delete p_cpu_; printf("CPU deleted....\n"); } p_cpu_ = new partition_cpu ( p_mpi_->my_num_nodes(), p_mpi_->my_node_data(), partitioner.get_cpu_num_edges(), partitioner.get_cpu_edges(), p_mpi_->my_edge_data(), p_mpi_->node_data_elm_size(), p_mpi_->edge_data_elm_size(), p_mpi_->reduction_elm_size(), partitioner.get_cpu_num_parts(), partitioner.get_cpu_parts() ); for(int i = 0; i < num_devices_; i++) { device_node_start_[i] = partitioner.get_node_start(i); device_node_sum_[i] = partitioner.get_node_sum(i); } //gpu partitions for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(hipSetDevice(i)); if(p_cuda_[i]) { delete p_cuda_[i]; printf("GPU %d deleted....\n", i); } p_cuda_[i] = new partition_cuda (p_mpi_->my_num_nodes(), p_mpi_->my_node_data(), p_mpi_->my_node_data_d(), partitioner.get_gpu_num_edges()[i], partitioner.get_gpu_edges()[i], p_mpi_->my_edge_data(), p_mpi_->node_data_elm_size(), p_mpi_->edge_data_elm_size(), p_mpi_->reduction_elm_size(), partitioner.get_gpu_num_parts()[i], partitioner.get_gpu_parts()[i]); p_cuda_[i]->Allocate(); } } void *IrregularRuntime::launch(void *arg) { IrregularRuntime *runtime = (IrregularRuntime *)arg; pthread_mutex_t mutex; pthread_mutex_init(&mutex, NULL); //create CPU_THREAD threads //pthread_t tid[CPU_THREADS]; CpuArgs args[CPU_THREADS]; pthread_t tid[CPU_THREADS]; double before_launch = rtclock(); for(int i = 0; i < CPU_THREADS; i++) { args[i].tid = i; args[i].runtime = runtime; args[i].mutex = &mutex; pthread_create(&tid[i], NULL, compute_cpu, &args[i]); } for(int j = 0; j < CPU_THREADS; j++) { pthread_join(tid[j], NULL); } double after_launch = rtclock(); printf("CPU time: %f\n", after_launch - before_launch); //runtime->speeds_[0] = sqrt(1/(after_launch - before_launch)); runtime->speeds_[0] = (1/(after_launch - before_launch))/3; return (void *)0; } void IrregularRuntime::IrregularStart() { if(current_iter_!=0) { re_init(); } if(current_iter_==0||current_iter_==1) { split(); printf("%d split done....\n", my_rank_); } if(current_iter_==0||current_iter_==1) { //printf("oooooooooooo exchanging halo size ooooooooooooo\n"); pv_ -> exchange_halo_size_info(); //printf("oooooooooooo exchanging halo size info done...oooooooooooo\n"); pv_ -> exchange_halo_node_info(); } printf("oooooooooooo exchanging halo node data ooooooooooooo\n"); double before_halo = rtclock(); pv_ -> exchange_halo_node_data(p_mpi_); double after_halo = rtclock(); printf("=============>exchange halo time: %f\n", after_halo - before_halo); //printf("+O+O+O+O+O+O+O exchange done +O+O+O+O+O+O\n"); //launch CPU pthread_t tid; pthread_create(&tid, NULL, launch, this); //launch GPU double before_gpu = rtclock(); for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(hipSetDevice(i)); dim3 grid(GPU_BLOCKS, 1, 1); dim3 block(GPU_THREADS, 1, 1); hipLaunchKernelGGL(( compute_gpu), dim3(grid), dim3(block), 0, 0, p_cuda_[i]->my_parts_d(), p_cuda_[i]->my_node_data_device(), part_index_d, p_cuda_[i]->my_edges_d(), parameter_d_[i], //TASK OFFSET task_offset_d_[i], p_cuda_[i]->my_num_parts(), rog_[i], map_idx_, reduce_idx_ ); } for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(hipSetDevice(i)); hipDeviceSynchronize(); } checkCUDAError("~~~internal error checking..."); double after_gpu = rtclock(); printf("gpu time: %f\n", after_gpu - before_gpu); for(int i = 0; i < num_gpus_; i++) { //speeds_[i + 1] = sqrt(1/(after_gpu - before_gpu)); speeds_[i + 1] = (1/(after_gpu - before_gpu)); } pthread_join(tid, NULL); current_iter_++; } void IrregularRuntime::get_reduction_result(void *buffer) { //first, combine cpu reduction objects merge_cobjects(); //then, copy cpu result memcpy(reduction_result_, &(roc_[0].values), sizeof(VALUE)*device_node_sum_[0]); //then, copy gpu results for(int i = 0; i < num_gpus_; i++) { hipSetDevice(i); hipMemcpy((char *)reduction_result_ + device_node_start_[i+1] * reduction_elm_size_, &(rog_[i]->values), device_node_sum_[i+1]*reduction_elm_size_, hipMemcpyDeviceToHost); } memcpy((char *)buffer + pv_->my_node_start()*reduction_elm_size_, reduction_result_, reduction_elm_size_ * pv_->my_num_nodes()); } void IrregularRuntime::merge_cobjects() { pthread_t tid[CPU_THREADS]; struct merge_args merge_args[CPU_THREADS]; for(int j = 1; j < 8; j++) { for(int i = 0; i < CPU_THREADS; i++) { merge_args[i].tid = i; merge_args[i].roc1 = &roc_[0]; merge_args[i].roc1 = &roc_[j]; pthread_create(&tid[i], NULL, mergetc, &merge_args[i]); } //join the threads for(int j = 0; j < CPU_THREADS; j++) { pthread_join(tid[j], NULL); } } } void IrregularRuntime::reset_node_data(void *node_data) { memcpy(p_mpi_->my_node_data(), (char *)node_data + pv_->my_node_start()*node_data_elm_size_, pv_->my_num_nodes() * reduction_elm_size_); } void IrregularRuntime::IrregularBarrier() { MPI_Barrier(MPI_COMM_WORLD); } IrregularRuntime::~IrregularRuntime() { delete pv_; delete p_mpi_; delete p_cpu_; delete [] p_cuda_; delete [] rog_; for(int i = 0; i < num_gpus_; i++) { hipFree(rog_[i]); } } void IrregularRuntime::re_init() { int init_val = 0; for(int i = 0; i < num_gpus_; i++) { hipSetDevice(i); hipMemcpy(task_offset_d_[i], &init_val, sizeof(int), hipMemcpyHostToDevice); dim3 grid(GPU_BLOCKS, 1, 1); dim3 block(GPU_THREADS, 1, 1); hipLaunchKernelGGL(( init_rog), dim3(grid), dim3(block), 0, 0, rog_[i]); hipDeviceSynchronize(); } //init cpu related *cpu_edge_offset_ = 0; pthread_t tid[CPU_THREADS]; struct init_args init_args[CPU_THREADS]; for(int i = 0; i < CPU_THREADS; i++) { init_args[i].tid = i; init_args[i].roc = &roc_[i]; pthread_create(&tid[i], NULL, init_roc, &init_args[i]); } //join the threads for(int j = 0; j < CPU_THREADS; j++) { pthread_join(tid[j], NULL); } } void IrregularRuntime::set_map_idx(int idx) { this->map_idx_ = idx; } void IrregularRuntime::set_reduce_idx(int idx) { this->reduce_idx_ = idx; }
bae6a791ae2b9f1ffcf7f67944c4c306c6407ee5.cu
#include "partitioner.cpp" #include "irregular_runtime.h" #include "../lib/cu_util.h" #include "partition_cuda.h" #include "partition_cpu.h" #include "cpu_args.h" #include "parameters.h" #include "gpu_kernel.cu" #include <mpi.h> #include "../lib/macro.h" #include "cpu_kernel.h" #include <stdio.h> #include "reorder.cpp" #include "../lib/time_util.h" #include <math.h> IrregularRuntime::IrregularRuntime(IRIndex num_edges, IRIndex num_nodes, EDGE *edges, void *edge_data, void *node_data, int edge_data_elm_size, int node_data_elm_size, int reduction_elm_size, int node_num_dims, void *node_coordinates, int coordinate_size, void *parameter, int parameter_size, int num_procs, int num_iters): global_num_edges_(num_edges), global_num_nodes_(num_nodes), global_edges_(edges), global_edge_data_(edge_data), global_node_data_(node_data), edge_data_elm_size_(edge_data_elm_size), node_data_elm_size_(node_data_elm_size), reduction_elm_size_(reduction_elm_size), node_num_dims_(node_num_dims), node_coordinates_(node_coordinates), coordinate_size_(coordinate_size), num_procs_(num_procs), num_iters_(num_iters), parameter_size_(parameter_size), current_iter_(0), parameter_(parameter) {} void IrregularRuntime::IrregularInit() { //=====reorder input data to reduce crossing edges int *partitions = (int *)malloc(sizeof(int)*node_num_dims_); memset(partitions, 0, sizeof(int)*node_num_dims_); partitions[0] = num_procs_; partitioner_ = new Reorder(global_num_nodes_, global_num_edges_, node_num_dims_, partitions); if(coordinate_size_==4) partitioner_->partition<float>((float *)node_coordinates_); else if(coordinate_size_==8) partitioner_->partition<double>((double *)node_coordinates_); partitioner_->reorder_edges(global_edges_); partitioner_->reorder_satellite_data(global_node_data_, node_data_elm_size_); //========= //MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank_); printf("my rank is: %d\n", my_rank_); MPI_Barrier(MPI_COMM_WORLD); this->num_gpus_ = GetGPUNumber(); this->num_devices_ = num_gpus_ + 1; pv_ = new partition_view(num_procs_, my_rank_, global_num_nodes_, global_num_edges_, global_edges_, global_edge_data_, global_node_data_, edge_data_elm_size_, node_data_elm_size_, reduction_elm_size_); //printf("pv_ created...\n"); //create per node partition p_mpi_ = pv_->CreatePartition(); MPI_Barrier(MPI_COMM_WORLD); //printf("p_mpi created...\n"); cudaHostAlloc((void **)&part_index, sizeof(int) * p_mpi_->my_num_nodes(), cudaHostAllocPortable|cudaHostAllocMapped); cudaHostGetDevicePointer((void **)&part_index_d, part_index, 0); //init part_index //allocate reduction result space reduction_result_ = malloc(reduction_elm_size_ * p_mpi_->my_num_nodes()); p_cpu_ = NULL; //allocate gpu partiiton pointers p_cuda_ = (partition_cuda**)malloc(num_gpus_ * sizeof(partition_cuda*)); memset(p_cuda_, 0, num_gpus_ * sizeof(partition_cuda *)); speeds_ = (double *)malloc(sizeof(double) * num_devices_); device_node_start_ = (int *)malloc(sizeof(int)*num_devices_); device_node_sum_ = (int *)malloc(sizeof(int)*num_devices_); //initial speeds are equal for(int i = 0; i < num_devices_; i++) { speeds_[i] = 1; } //initialize gpu reduction objects rog_ = (Gobject **)malloc(sizeof(Gobject *)*num_gpus_); task_offset_d_ = (int **)malloc(sizeof(int *)*num_gpus_); parameter_d_ = (void **)malloc(sizeof(void *)*num_gpus_); map_idx_ = 0; reduce_idx_ = 0; Gobject *rogh = (Gobject *)malloc(sizeof(Gobject)); for(int i = 0; i < NUM_BUCKETS_G; i++) { (rogh->keys)[i] = EMPTY_BUCKET_VALUE; //(rogh->values)[i] = EMPTY_BUCKET_VALUE; (rogh->locks)[i] = 0; (rogh->pairs_per_bucket)[i] = 0; } //printf("here........\n"); for(int i = 0; i < num_gpus_; i++) { //printf("gpu id: %d+++++++++\n", i); CUDA_SAFE_CALL(cudaSetDevice(i)); cudaMalloc((void **)&rog_[i], sizeof(Gobject)); cudaMemcpy(rog_[i], rogh, sizeof(Gobject), cudaMemcpyHostToDevice); int init_val = 0; cudaMalloc((void **)&task_offset_d_[i], sizeof(int)); cudaMemcpy(task_offset_d_[i], &init_val, sizeof(int), cudaMemcpyHostToDevice); //printf("copy task offset done++++++++++\n"); //copy parameters cudaMalloc((void **)&parameter_d_[i], parameter_size_); cudaMemcpy(parameter_d_[i], parameter_, parameter_size_, cudaMemcpyHostToDevice); //printf("copy parameter done++++++++++\n"); } free(rogh); //initialize reduction objects for CPU cores cudaHostAlloc((void **)&roc_, sizeof(Cobject) * CPU_THREADS, cudaHostAllocMapped); for(int i = 0; i < NUM_BUCKETS_G; i++) { (roc_[0].keys)[i] = EMPTY_BUCKET_VALUE; //(roc->values)[i] = EMPTY_BUCKET_VALUE; (roc_[0].locks)[i] = 0; (roc_[0].pairs_per_bucket)[i] = 0; } for(int i = 1; i<CPU_THREADS; i++) { memcpy(&roc_[i], &roc_[0], sizeof(Cobject)); } cpu_edge_offset_ = (int *)malloc(sizeof(int)); *cpu_edge_offset_ = 0; } void IrregularRuntime::split() { //split the reduction space based on the speed of each device //split into cpu partition and gpu partitions //Partitioner partitioner(part_index, p_mpi_->my_num_nodes(), p_mpi_->my_num_edges(), num_devices_, speeds_, reduction_elm_size_, node_num_dims_, my_rank_); Partitioner partitioner(part_index, p_mpi_->my_own_num_nodes(), p_mpi_->my_num_edges(), num_devices_, speeds_, reduction_elm_size_, node_num_dims_, my_rank_); printf("my num nodes: %d\n", p_mpi_->my_num_nodes()); //printf("coordinate size: %d****************\n", coordinate_size_); if(coordinate_size_ == 4) partitioner.partition_device_nodes<float>((float *)node_coordinates_ + pv_->my_node_start() * node_num_dims_); else if(coordinate_size_ == 8) partitioner.partition_device_nodes<double>((double *)node_coordinates_ + pv_->my_node_start() * node_num_dims_); //printf("before edges generatedoooooooooooooooooooooooooooo...................\n"); partitioner.generate_device_edges(p_mpi_->my_edges()); //partitioner.reorder_satellite_data(p_mpi_->my_node_data(), node_data_elm_size_); //printf("edges generatedoooooooooooooooooooooooooooo...................\n"); if(p_cpu_) { delete p_cpu_; printf("CPU deleted....\n"); } p_cpu_ = new partition_cpu ( p_mpi_->my_num_nodes(), p_mpi_->my_node_data(), partitioner.get_cpu_num_edges(), partitioner.get_cpu_edges(), p_mpi_->my_edge_data(), p_mpi_->node_data_elm_size(), p_mpi_->edge_data_elm_size(), p_mpi_->reduction_elm_size(), partitioner.get_cpu_num_parts(), partitioner.get_cpu_parts() ); for(int i = 0; i < num_devices_; i++) { device_node_start_[i] = partitioner.get_node_start(i); device_node_sum_[i] = partitioner.get_node_sum(i); } //gpu partitions for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(cudaSetDevice(i)); if(p_cuda_[i]) { delete p_cuda_[i]; printf("GPU %d deleted....\n", i); } p_cuda_[i] = new partition_cuda (p_mpi_->my_num_nodes(), p_mpi_->my_node_data(), p_mpi_->my_node_data_d(), partitioner.get_gpu_num_edges()[i], partitioner.get_gpu_edges()[i], p_mpi_->my_edge_data(), p_mpi_->node_data_elm_size(), p_mpi_->edge_data_elm_size(), p_mpi_->reduction_elm_size(), partitioner.get_gpu_num_parts()[i], partitioner.get_gpu_parts()[i]); p_cuda_[i]->Allocate(); } } void *IrregularRuntime::launch(void *arg) { IrregularRuntime *runtime = (IrregularRuntime *)arg; pthread_mutex_t mutex; pthread_mutex_init(&mutex, NULL); //create CPU_THREAD threads //pthread_t tid[CPU_THREADS]; CpuArgs args[CPU_THREADS]; pthread_t tid[CPU_THREADS]; double before_launch = rtclock(); for(int i = 0; i < CPU_THREADS; i++) { args[i].tid = i; args[i].runtime = runtime; args[i].mutex = &mutex; pthread_create(&tid[i], NULL, compute_cpu, &args[i]); } for(int j = 0; j < CPU_THREADS; j++) { pthread_join(tid[j], NULL); } double after_launch = rtclock(); printf("CPU time: %f\n", after_launch - before_launch); //runtime->speeds_[0] = sqrt(1/(after_launch - before_launch)); runtime->speeds_[0] = (1/(after_launch - before_launch))/3; return (void *)0; } void IrregularRuntime::IrregularStart() { if(current_iter_!=0) { re_init(); } if(current_iter_==0||current_iter_==1) { split(); printf("%d split done....\n", my_rank_); } if(current_iter_==0||current_iter_==1) { //printf("oooooooooooo exchanging halo size ooooooooooooo\n"); pv_ -> exchange_halo_size_info(); //printf("oooooooooooo exchanging halo size info done...oooooooooooo\n"); pv_ -> exchange_halo_node_info(); } printf("oooooooooooo exchanging halo node data ooooooooooooo\n"); double before_halo = rtclock(); pv_ -> exchange_halo_node_data(p_mpi_); double after_halo = rtclock(); printf("=============>exchange halo time: %f\n", after_halo - before_halo); //printf("+O+O+O+O+O+O+O exchange done +O+O+O+O+O+O\n"); //launch CPU pthread_t tid; pthread_create(&tid, NULL, launch, this); //launch GPU double before_gpu = rtclock(); for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(cudaSetDevice(i)); dim3 grid(GPU_BLOCKS, 1, 1); dim3 block(GPU_THREADS, 1, 1); compute_gpu<<<grid, block, 0>>> ( p_cuda_[i]->my_parts_d(), p_cuda_[i]->my_node_data_device(), part_index_d, p_cuda_[i]->my_edges_d(), parameter_d_[i], //TASK OFFSET task_offset_d_[i], p_cuda_[i]->my_num_parts(), rog_[i], map_idx_, reduce_idx_ ); } for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(cudaSetDevice(i)); cudaDeviceSynchronize(); } checkCUDAError("~~~internal error checking..."); double after_gpu = rtclock(); printf("gpu time: %f\n", after_gpu - before_gpu); for(int i = 0; i < num_gpus_; i++) { //speeds_[i + 1] = sqrt(1/(after_gpu - before_gpu)); speeds_[i + 1] = (1/(after_gpu - before_gpu)); } pthread_join(tid, NULL); current_iter_++; } void IrregularRuntime::get_reduction_result(void *buffer) { //first, combine cpu reduction objects merge_cobjects(); //then, copy cpu result memcpy(reduction_result_, &(roc_[0].values), sizeof(VALUE)*device_node_sum_[0]); //then, copy gpu results for(int i = 0; i < num_gpus_; i++) { cudaSetDevice(i); cudaMemcpy((char *)reduction_result_ + device_node_start_[i+1] * reduction_elm_size_, &(rog_[i]->values), device_node_sum_[i+1]*reduction_elm_size_, cudaMemcpyDeviceToHost); } memcpy((char *)buffer + pv_->my_node_start()*reduction_elm_size_, reduction_result_, reduction_elm_size_ * pv_->my_num_nodes()); } void IrregularRuntime::merge_cobjects() { pthread_t tid[CPU_THREADS]; struct merge_args merge_args[CPU_THREADS]; for(int j = 1; j < 8; j++) { for(int i = 0; i < CPU_THREADS; i++) { merge_args[i].tid = i; merge_args[i].roc1 = &roc_[0]; merge_args[i].roc1 = &roc_[j]; pthread_create(&tid[i], NULL, mergetc, &merge_args[i]); } //join the threads for(int j = 0; j < CPU_THREADS; j++) { pthread_join(tid[j], NULL); } } } void IrregularRuntime::reset_node_data(void *node_data) { memcpy(p_mpi_->my_node_data(), (char *)node_data + pv_->my_node_start()*node_data_elm_size_, pv_->my_num_nodes() * reduction_elm_size_); } void IrregularRuntime::IrregularBarrier() { MPI_Barrier(MPI_COMM_WORLD); } IrregularRuntime::~IrregularRuntime() { delete pv_; delete p_mpi_; delete p_cpu_; delete [] p_cuda_; delete [] rog_; for(int i = 0; i < num_gpus_; i++) { cudaFree(rog_[i]); } } void IrregularRuntime::re_init() { int init_val = 0; for(int i = 0; i < num_gpus_; i++) { cudaSetDevice(i); cudaMemcpy(task_offset_d_[i], &init_val, sizeof(int), cudaMemcpyHostToDevice); dim3 grid(GPU_BLOCKS, 1, 1); dim3 block(GPU_THREADS, 1, 1); init_rog<<<grid, block>>>(rog_[i]); cudaThreadSynchronize(); } //init cpu related *cpu_edge_offset_ = 0; pthread_t tid[CPU_THREADS]; struct init_args init_args[CPU_THREADS]; for(int i = 0; i < CPU_THREADS; i++) { init_args[i].tid = i; init_args[i].roc = &roc_[i]; pthread_create(&tid[i], NULL, init_roc, &init_args[i]); } //join the threads for(int j = 0; j < CPU_THREADS; j++) { pthread_join(tid[j], NULL); } } void IrregularRuntime::set_map_idx(int idx) { this->map_idx_ = idx; } void IrregularRuntime::set_reduce_idx(int idx) { this->reduce_idx_ = idx; }
392505a76a2a1a25b3b11c53d10f8fc07814c1de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void computeGradient_gpu( const float *center, const float *neighbour1, const float *neighbour2, const float *neighbour3, const float *cellCenter, const float *nb1Center, const float *nb2Center, const float *nb3Center, float *q, float *out) { if(center[0]> EPS_cuda){ float total, Rhs[8]; float dh[3], dz[3],du[3], dv[3], weights[3]; float Gram[2][2], inverse[2][2], delta[3][2]; float x = cellCenter[0]; float y = cellCenter[1]; delta[0][0] = (nb1Center[0] - x); delta[0][1] = (nb1Center[1] - y); delta[1][0] = (nb2Center[0] - x); delta[1][1] = (nb2Center[1] - y); if( (cellCenter[0] != nb3Center[0]) && (cellCenter[1] != nb3Center[1])){ delta[2][0] = (nb3Center[0] - x); delta[2][1] = (nb3Center[1] - y); } else { delta[2][0] = 0.5f*(delta[0][0] + delta[1][0]); delta[2][1] = 0.5f*(delta[0][1] + delta[1][1]); } weights[0] = sqrt(delta[0][0] * delta[0][0] + delta[0][1] * delta[0][1]); weights[1] = sqrt(delta[1][0] * delta[1][0] + delta[1][1] * delta[1][1]); weights[2] = sqrt(delta[2][0] * delta[2][0] + delta[2][1] * delta[2][1]); total = weights[0] + weights[1] + weights[2]; weights[0] = total/weights[0]; weights[1] = total/weights[1]; weights[2] = total/weights[2]; delta[0][0] *= weights[0]; delta[0][1] *= weights[0]; delta[1][0] *= weights[1]; delta[1][1] *= weights[1]; delta[2][0] *= weights[2]; delta[2][1] *= weights[2]; Gram[0][0] = ((delta[0][0]*delta[0][0]) + (delta[1][0] *delta[1][0]) + (delta[2][0] *delta[2][0])); Gram[0][1] = ((delta[0][0]*delta[0][1]) + (delta[1][0] *delta[1][1]) + (delta[2][0] *delta[2][1])); Gram[1][0] = ((delta[0][0]*delta[0][1]) + (delta[1][0] *delta[1][1]) + (delta[2][0] *delta[2][1])); Gram[1][1] = ((delta[0][1]*delta[0][1]) + (delta[1][1] *delta[1][1]) + (delta[2][1] *delta[2][1])); float det = 1.0 / (Gram[0][0]*Gram[1][1] - Gram[0][1]*Gram[1][0]); inverse[0][0] = det * Gram[1][1]; inverse[0][1] = det * (- Gram[0][1]); inverse[1][0] = det * (-Gram[1][0]); inverse[1][1] = det * Gram[0][0]; dh[0] = neighbour1[0] - center[0]; dh[1] = neighbour2[0] - center[0]; dh[2] = neighbour3[0] - center[0]; dh[0] *= weights[0]; dh[1] *= weights[1]; dh[2] *= weights[2]; dz[0] = neighbour1[3] - center[3]; dz[1] = neighbour2[3] - center[3]; dz[2] = neighbour3[3] - center[3]; dz[0] *= weights[0]; dz[1] *= weights[1]; dz[2] *= weights[2]; du[0] = neighbour1[1] - center[1]; du[1] = neighbour2[1] - center[1]; du[2] = neighbour3[1] - center[1]; du[0] *= weights[0]; du[1] *= weights[1]; du[2] *= weights[2]; dv[0] = neighbour1[2] - center[2]; dv[1] = neighbour2[2] - center[2]; dv[2] = neighbour3[2] - center[2]; dv[0] *= weights[0]; dv[1] *= weights[1]; dv[2] *= weights[2]; Rhs[0] = (delta[0][0]*dh[0]) + (delta[1][0]*dh[1]) + (delta[2][0]*dh[2]); Rhs[1] = (delta[0][1]*dh[0]) + (delta[1][1]*dh[1]) + (delta[2][1]*dh[2]); out[0] = (inverse[0][0] * Rhs[0]) + (inverse[0][1] * Rhs[1]); out[1] = (inverse[1][0] * Rhs[0]) + (inverse[1][1] * Rhs[1]); Rhs[2] = (delta[0][0]*du[0]) + (delta[1][0]*du[1]) + (delta[2][0]*du[2]); Rhs[3] = (delta[0][1]*du[0]) + (delta[1][1]*du[1]) + (delta[2][1]*du[2]); out[2] = (inverse[0][0] * Rhs[2]) + (inverse[0][1] * Rhs[3]); out[3] = (inverse[1][0] * Rhs[2]) + (inverse[1][1] * Rhs[3]); Rhs[4] = (delta[0][0]*dv[0]) + (delta[1][0]*dv[1]) + (delta[2][0]*dv[2]); Rhs[5] = (delta[0][1]*dv[0]) + (delta[1][1]*dv[1]) + (delta[2][1]*dv[2]); out[4] = (inverse[0][0] * Rhs[4]) + (inverse[0][1] * Rhs[5]); out[5] = (inverse[1][0] * Rhs[4]) + (inverse[1][1] * Rhs[5]); Rhs[6] = (delta[0][0]*dz[0]) + (delta[1][0]*dz[1]) + (delta[2][0]*dz[2]); Rhs[7] = (delta[0][1]*dz[0]) + (delta[1][1]*dz[1]) + (delta[2][1]*dz[2]); out[6] = (inverse[0][0] * Rhs[6]) + (inverse[0][1] * Rhs[7]); out[7] = (inverse[1][0] * Rhs[6]) + (inverse[1][1] * Rhs[7]); if((isnan(out[0])) || (isnan(out[1])) || (isnan(out[2])) || (isnan(out[3])) || (isnan(out[4])) || (isnan(out[5])) || (isnan(out[6])) || (isnan(out[7]))){ out[0] = 0.0f; out[1] = 0.0f; out[2] = 0.0f; out[3] = 0.0f; out[4] = 0.0f; out[5] = 0.0f; out[6] = 0.0f; out[7] = 0.0f; } } else { out[0] = 0.0f; out[1] = 0.0f; out[2] = 0.0f; out[3] = 0.0f; out[4] = 0.0f; out[5] = 0.0f; out[6] = 0.0f; out[7] = 0.0f; } q[0] = center[0] < neighbour1[0] ? center[0] : neighbour1[0]; q[0] = q[0] < neighbour2[0] ? q[0] : neighbour2[0]; q[0] = q[0] < neighbour3[0] ? q[0] : neighbour3[0]; q[1] = center[0] > neighbour1[0] ? center[0] : neighbour1[0]; q[1] = q[1] > neighbour2[0] ? q[1] : neighbour2[0]; q[1] = q[1] > neighbour3[0] ? q[1] : neighbour3[0]; q[2] = center[1] < neighbour1[1] ? center[1] : neighbour1[1]; q[2] = q[2] < neighbour2[1] ? q[2] : neighbour2[1]; q[2] = q[2] < neighbour3[1] ? q[2] : neighbour3[1]; q[3] = center[1] > neighbour1[1] ? center[1] : neighbour1[1]; q[3] = q[3] > neighbour2[1] ? q[3] : neighbour2[1]; q[3] = q[3] > neighbour3[1] ? q[3] : neighbour3[1]; q[4] = center[2] < neighbour1[2] ? center[2] : neighbour1[2]; q[4] = q[4] < neighbour2[2] ? q[4] : neighbour2[2]; q[4] = q[4] < neighbour3[2] ? q[4] : neighbour3[2]; q[5] = center[2] > neighbour1[2] ? center[2] : neighbour1[2]; q[5] = q[5] > neighbour2[2] ? q[5] : neighbour2[2]; q[5] = q[5] > neighbour3[2] ? q[5] : neighbour3[2]; q[6] = center[3] < neighbour1[3] ? center[3] : neighbour1[3]; q[6] = q[6] < neighbour2[3] ? q[6] : neighbour2[3]; q[6] = q[6] < neighbour3[3] ? q[6] : neighbour3[3]; q[7] = center[3] > neighbour1[3] ? center[3] : neighbour1[3]; q[7] = q[7] > neighbour2[3] ? q[7] : neighbour2[3]; q[7] = q[7] > neighbour3[3] ? q[7] : neighbour3[3]; } // CUDA kernel function __global__ void op_cuda_computeGradient( const float *__restrict ind_arg0, const float *__restrict ind_arg1, float *__restrict ind_arg2, const int *__restrict opDat1Map, const float *__restrict arg0, const float *__restrict arg4, float *arg8, float *arg9, int start, int end, int *col_reord, int set_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid + start < end) { int n = col_reord[tid + start]; //initialise local variables int map1idx; int map2idx; int map3idx; map1idx = opDat1Map[n + set_size * 0]; map2idx = opDat1Map[n + set_size * 1]; map3idx = opDat1Map[n + set_size * 2]; //user-supplied kernel call computeGradient_gpu(arg0+n*4, ind_arg0+map1idx*4, ind_arg0+map2idx*4, ind_arg0+map3idx*4, arg4+n*2, ind_arg1+map1idx*2, ind_arg1+map2idx*2, ind_arg2+map3idx*2, arg8+n*8, arg9+n*8); } } //host stub function void op_par_loop_computeGradient(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7, op_arg arg8, op_arg arg9){ int nargs = 10; op_arg args[10]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; args[8] = arg8; args[9] = arg9; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(22); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[22].name = name; OP_kernels[22].count += 1; int ninds = 3; int inds[10] = {-1,0,0,0,-1,1,1,2,-1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: computeGradient\n"); } //get plan #ifdef OP_PART_SIZE_22 int part_size = OP_PART_SIZE_22; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { op_plan *Plan = op_plan_get_stage(name,set,part_size,nargs,args,ninds,inds,OP_COLOR2); //execute plan for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_grouped(nargs, args, 2); } #ifdef OP_BLOCK_SIZE_22 int nthread = OP_BLOCK_SIZE_22; #else int nthread = OP_block_size; #endif int start = Plan->col_offsets[0][col]; int end = Plan->col_offsets[0][col+1]; int nblocks = (end - start - 1)/nthread + 1; hipLaunchKernelGGL(( op_cuda_computeGradient), dim3(nblocks),dim3(nthread), 0, 0, (float *)arg1.data_d, (float *)arg5.data_d, (float *)arg7.data_d, arg1.map_data_d, (float*)arg0.data_d, (float*)arg4.data_d, (float*)arg8.data_d, (float*)arg9.data_d, start, end, Plan->col_reord, set->size+set->exec_size); } OP_kernels[22].transfer += Plan->transfer; OP_kernels[22].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[22].time += wall_t2 - wall_t1; }
392505a76a2a1a25b3b11c53d10f8fc07814c1de.cu
// // auto-generated by op2.py // //user function __device__ void computeGradient_gpu( const float *center, const float *neighbour1, const float *neighbour2, const float *neighbour3, const float *cellCenter, const float *nb1Center, const float *nb2Center, const float *nb3Center, float *q, float *out) { if(center[0]> EPS_cuda){ float total, Rhs[8]; float dh[3], dz[3],du[3], dv[3], weights[3]; float Gram[2][2], inverse[2][2], delta[3][2]; float x = cellCenter[0]; float y = cellCenter[1]; delta[0][0] = (nb1Center[0] - x); delta[0][1] = (nb1Center[1] - y); delta[1][0] = (nb2Center[0] - x); delta[1][1] = (nb2Center[1] - y); if( (cellCenter[0] != nb3Center[0]) && (cellCenter[1] != nb3Center[1])){ delta[2][0] = (nb3Center[0] - x); delta[2][1] = (nb3Center[1] - y); } else { delta[2][0] = 0.5f*(delta[0][0] + delta[1][0]); delta[2][1] = 0.5f*(delta[0][1] + delta[1][1]); } weights[0] = sqrt(delta[0][0] * delta[0][0] + delta[0][1] * delta[0][1]); weights[1] = sqrt(delta[1][0] * delta[1][0] + delta[1][1] * delta[1][1]); weights[2] = sqrt(delta[2][0] * delta[2][0] + delta[2][1] * delta[2][1]); total = weights[0] + weights[1] + weights[2]; weights[0] = total/weights[0]; weights[1] = total/weights[1]; weights[2] = total/weights[2]; delta[0][0] *= weights[0]; delta[0][1] *= weights[0]; delta[1][0] *= weights[1]; delta[1][1] *= weights[1]; delta[2][0] *= weights[2]; delta[2][1] *= weights[2]; Gram[0][0] = ((delta[0][0]*delta[0][0]) + (delta[1][0] *delta[1][0]) + (delta[2][0] *delta[2][0])); Gram[0][1] = ((delta[0][0]*delta[0][1]) + (delta[1][0] *delta[1][1]) + (delta[2][0] *delta[2][1])); Gram[1][0] = ((delta[0][0]*delta[0][1]) + (delta[1][0] *delta[1][1]) + (delta[2][0] *delta[2][1])); Gram[1][1] = ((delta[0][1]*delta[0][1]) + (delta[1][1] *delta[1][1]) + (delta[2][1] *delta[2][1])); float det = 1.0 / (Gram[0][0]*Gram[1][1] - Gram[0][1]*Gram[1][0]); inverse[0][0] = det * Gram[1][1]; inverse[0][1] = det * (- Gram[0][1]); inverse[1][0] = det * (-Gram[1][0]); inverse[1][1] = det * Gram[0][0]; dh[0] = neighbour1[0] - center[0]; dh[1] = neighbour2[0] - center[0]; dh[2] = neighbour3[0] - center[0]; dh[0] *= weights[0]; dh[1] *= weights[1]; dh[2] *= weights[2]; dz[0] = neighbour1[3] - center[3]; dz[1] = neighbour2[3] - center[3]; dz[2] = neighbour3[3] - center[3]; dz[0] *= weights[0]; dz[1] *= weights[1]; dz[2] *= weights[2]; du[0] = neighbour1[1] - center[1]; du[1] = neighbour2[1] - center[1]; du[2] = neighbour3[1] - center[1]; du[0] *= weights[0]; du[1] *= weights[1]; du[2] *= weights[2]; dv[0] = neighbour1[2] - center[2]; dv[1] = neighbour2[2] - center[2]; dv[2] = neighbour3[2] - center[2]; dv[0] *= weights[0]; dv[1] *= weights[1]; dv[2] *= weights[2]; Rhs[0] = (delta[0][0]*dh[0]) + (delta[1][0]*dh[1]) + (delta[2][0]*dh[2]); Rhs[1] = (delta[0][1]*dh[0]) + (delta[1][1]*dh[1]) + (delta[2][1]*dh[2]); out[0] = (inverse[0][0] * Rhs[0]) + (inverse[0][1] * Rhs[1]); out[1] = (inverse[1][0] * Rhs[0]) + (inverse[1][1] * Rhs[1]); Rhs[2] = (delta[0][0]*du[0]) + (delta[1][0]*du[1]) + (delta[2][0]*du[2]); Rhs[3] = (delta[0][1]*du[0]) + (delta[1][1]*du[1]) + (delta[2][1]*du[2]); out[2] = (inverse[0][0] * Rhs[2]) + (inverse[0][1] * Rhs[3]); out[3] = (inverse[1][0] * Rhs[2]) + (inverse[1][1] * Rhs[3]); Rhs[4] = (delta[0][0]*dv[0]) + (delta[1][0]*dv[1]) + (delta[2][0]*dv[2]); Rhs[5] = (delta[0][1]*dv[0]) + (delta[1][1]*dv[1]) + (delta[2][1]*dv[2]); out[4] = (inverse[0][0] * Rhs[4]) + (inverse[0][1] * Rhs[5]); out[5] = (inverse[1][0] * Rhs[4]) + (inverse[1][1] * Rhs[5]); Rhs[6] = (delta[0][0]*dz[0]) + (delta[1][0]*dz[1]) + (delta[2][0]*dz[2]); Rhs[7] = (delta[0][1]*dz[0]) + (delta[1][1]*dz[1]) + (delta[2][1]*dz[2]); out[6] = (inverse[0][0] * Rhs[6]) + (inverse[0][1] * Rhs[7]); out[7] = (inverse[1][0] * Rhs[6]) + (inverse[1][1] * Rhs[7]); if((isnan(out[0])) || (isnan(out[1])) || (isnan(out[2])) || (isnan(out[3])) || (isnan(out[4])) || (isnan(out[5])) || (isnan(out[6])) || (isnan(out[7]))){ out[0] = 0.0f; out[1] = 0.0f; out[2] = 0.0f; out[3] = 0.0f; out[4] = 0.0f; out[5] = 0.0f; out[6] = 0.0f; out[7] = 0.0f; } } else { out[0] = 0.0f; out[1] = 0.0f; out[2] = 0.0f; out[3] = 0.0f; out[4] = 0.0f; out[5] = 0.0f; out[6] = 0.0f; out[7] = 0.0f; } q[0] = center[0] < neighbour1[0] ? center[0] : neighbour1[0]; q[0] = q[0] < neighbour2[0] ? q[0] : neighbour2[0]; q[0] = q[0] < neighbour3[0] ? q[0] : neighbour3[0]; q[1] = center[0] > neighbour1[0] ? center[0] : neighbour1[0]; q[1] = q[1] > neighbour2[0] ? q[1] : neighbour2[0]; q[1] = q[1] > neighbour3[0] ? q[1] : neighbour3[0]; q[2] = center[1] < neighbour1[1] ? center[1] : neighbour1[1]; q[2] = q[2] < neighbour2[1] ? q[2] : neighbour2[1]; q[2] = q[2] < neighbour3[1] ? q[2] : neighbour3[1]; q[3] = center[1] > neighbour1[1] ? center[1] : neighbour1[1]; q[3] = q[3] > neighbour2[1] ? q[3] : neighbour2[1]; q[3] = q[3] > neighbour3[1] ? q[3] : neighbour3[1]; q[4] = center[2] < neighbour1[2] ? center[2] : neighbour1[2]; q[4] = q[4] < neighbour2[2] ? q[4] : neighbour2[2]; q[4] = q[4] < neighbour3[2] ? q[4] : neighbour3[2]; q[5] = center[2] > neighbour1[2] ? center[2] : neighbour1[2]; q[5] = q[5] > neighbour2[2] ? q[5] : neighbour2[2]; q[5] = q[5] > neighbour3[2] ? q[5] : neighbour3[2]; q[6] = center[3] < neighbour1[3] ? center[3] : neighbour1[3]; q[6] = q[6] < neighbour2[3] ? q[6] : neighbour2[3]; q[6] = q[6] < neighbour3[3] ? q[6] : neighbour3[3]; q[7] = center[3] > neighbour1[3] ? center[3] : neighbour1[3]; q[7] = q[7] > neighbour2[3] ? q[7] : neighbour2[3]; q[7] = q[7] > neighbour3[3] ? q[7] : neighbour3[3]; } // CUDA kernel function __global__ void op_cuda_computeGradient( const float *__restrict ind_arg0, const float *__restrict ind_arg1, float *__restrict ind_arg2, const int *__restrict opDat1Map, const float *__restrict arg0, const float *__restrict arg4, float *arg8, float *arg9, int start, int end, int *col_reord, int set_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid + start < end) { int n = col_reord[tid + start]; //initialise local variables int map1idx; int map2idx; int map3idx; map1idx = opDat1Map[n + set_size * 0]; map2idx = opDat1Map[n + set_size * 1]; map3idx = opDat1Map[n + set_size * 2]; //user-supplied kernel call computeGradient_gpu(arg0+n*4, ind_arg0+map1idx*4, ind_arg0+map2idx*4, ind_arg0+map3idx*4, arg4+n*2, ind_arg1+map1idx*2, ind_arg1+map2idx*2, ind_arg2+map3idx*2, arg8+n*8, arg9+n*8); } } //host stub function void op_par_loop_computeGradient(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7, op_arg arg8, op_arg arg9){ int nargs = 10; op_arg args[10]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; args[8] = arg8; args[9] = arg9; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(22); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[22].name = name; OP_kernels[22].count += 1; int ninds = 3; int inds[10] = {-1,0,0,0,-1,1,1,2,-1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: computeGradient\n"); } //get plan #ifdef OP_PART_SIZE_22 int part_size = OP_PART_SIZE_22; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { op_plan *Plan = op_plan_get_stage(name,set,part_size,nargs,args,ninds,inds,OP_COLOR2); //execute plan for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_grouped(nargs, args, 2); } #ifdef OP_BLOCK_SIZE_22 int nthread = OP_BLOCK_SIZE_22; #else int nthread = OP_block_size; #endif int start = Plan->col_offsets[0][col]; int end = Plan->col_offsets[0][col+1]; int nblocks = (end - start - 1)/nthread + 1; op_cuda_computeGradient<<<nblocks,nthread>>>( (float *)arg1.data_d, (float *)arg5.data_d, (float *)arg7.data_d, arg1.map_data_d, (float*)arg0.data_d, (float*)arg4.data_d, (float*)arg8.data_d, (float*)arg9.data_d, start, end, Plan->col_reord, set->size+set->exec_size); } OP_kernels[22].transfer += Plan->transfer; OP_kernels[22].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[22].time += wall_t2 - wall_t1; }
7066d6c5953f08b4340bd3c1c13ca2c9a6390e6e.hip
// !!! This is a file automatically generated by hipify!!! /* * Discrete Cosine/Sine Transform(DCT/DST and IDCT/IDST one to four-all in one) * DCT/DST and IDCT/IDST I ---> IV * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=Discrete_Transform(A, , type of Transform (sine or cosine), type of Transform(direct/inverse), type of DCT/DST or IDCT/IDST, dimensions). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include "Discrete_Transform_kernel.cuh" #include "DCT_I_Column.cuh" #include "DCT_I_Row.cuh" #include "DCT_I_Column_Inverse.cuh" #include "DCT_I_Row_Inverse.cuh" #include "DCT_II_Row.cuh" #include "DCT_II_Row_Inverse.cuh" #include "DCT_II_Column.cuh" #include "DCT_II_Column_Inverse.cuh" #include "DCT_III_Row.cuh" #include "DCT_III_Row_Inverse.cuh" #include "DCT_III_Column.cuh" #include "DCT_III_Column_Inverse.cuh" #include "DCT_IV_Row.cuh" #include "DCT_IV_Row_Inverse.cuh" #include "DCT_IV_Column.cuh" #include "DCT_IV_Column_Inverse.cuh" #include "DST_I_Column.cuh" #include "DST_I_Row.cuh" #include "DST_I_Column_Inverse.cuh" #include "DST_I_Row_Inverse.cuh" #include "DST_II_Row.cuh" #include "DST_II_Row_Inverse.cuh" #include "DST_II_Column.cuh" #include "DST_II_Column_Inverse.cuh" #include "DST_III_Row.cuh" #include "DST_III_Row_Inverse.cuh" #include "DST_III_Column.cuh" #include "DST_III_Column_Inverse.cuh" #include "DST_IV_Row.cuh" #include "DST_IV_Row_Inverse.cuh" #include "DST_IV_Column.cuh" #include "DST_IV_Column_Inverse.cuh" #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define DEFAULT_DIM 32 #define DELTA(i, j) ((i==j)?1:0) #define TILE_DIM 16 // DCT extern "C" void CalculateTransformDCTColumnOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); // DST extern "C" void CalculateTransformDSTColumnOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; hipError_t errCode =hipGetDeviceCount(&nDevices); //int nDevices; //hipGetDeviceCount(&nDevices); if (errCode != hipSuccess){ printf("Error! No CUDA devices found! \n"); return; } //Discrete_Transform(x, 'cosine', 'direct', 'one' , 'column') char row[] = "row"; char column[] = "column"; char one[] = "one"; char two[] = "two"; char three[] = "three"; char four[] = "four"; char direct[] = "direct"; char inverse[] = "inverse"; char cosine[] = "cosine"; char sine[] = "sine"; //char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be five."; if ((nrhs!=5)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(prhs[0]); char *input_buf1; input_buf1 = mxArrayToString(prhs[1]); char *input_buf2; input_buf2 = mxArrayToString(prhs[2]); char *input_buf3; input_buf3 = mxArrayToString(prhs[3]); char *input_buf4; input_buf4 = mxArrayToString(prhs[4]); if ((mxIsChar(prhs[0]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } if (!(mxIsChar(prhs[1]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[2]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(THIRD ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[3]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FOURTH ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[4]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIFTH ARGUMENT) must be of type string.\n."); } ///////// input_buf0=FIRST, SECOND, THIRD, FOURTH, FIFTH if ((strcmp (cosine,input_buf1) != 0) &&(strcmp (sine,input_buf1) != 0) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be 'cosine' or 'sine' not %s\n",input_buf1); } if ((strcmp (direct,input_buf2) != 0)&& (strcmp (inverse,input_buf2) != 0) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(THIRD ARGUMENT) must be 'direct' or 'inverse' not %s\n",input_buf2); } if ((strcmp (one,input_buf3) != 0)&& (strcmp (two,input_buf3) != 0) && (strcmp (three,input_buf3) != 0) && (strcmp (four,input_buf3) != 0)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FOURTH ARGUMENT) must be 'one' or 'two' or 'three' or 'four' not %s\n",input_buf3); } if ((strcmp (column,input_buf4) != 0)&&(strcmp (row,input_buf4) != 0)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIFTH ARGUMENT) must be 'column' or 'row' not %s\n",input_buf4); } /////// //mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", // "Input(FIFTH ARGUMENT) must be 'column' or 'row'.\n."); if (strcmp (cosine,input_buf1) == 0) { if (strcmp (direct,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // hipError_t err1 = hipPeekAtLastError();//To capture last error in function call //hipDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n"); return; } double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //free(hostB); } } // Column /////////////////////////// if (strcmp (row,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; double * hostA ; // The A matrix if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //one //free(hostB); } }//row /////////////////////////// }// direct if (strcmp (inverse,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // hipError_t err1 = hipPeekAtLastError();//To capture last error in function call //hipDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n"); return; } double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTInverseColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTInverseColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTInverseColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTInverseColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //free(hostB); } } // Column if (strcmp (row,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Cosine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // one // hipError_t err1 = hipPeekAtLastError();//To capture last error in function call //hipDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Cosine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; //char const * const errId = "parallel:gpu:DCTTWO:InvalidInput"; //char const * const errMsg = "Invalid input to MEX file."; double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTInverseRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTInverseRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTInverseRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTInverseRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //one //memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double)); // testing // printf("\n plhs[0]:"); // printf("\n"); // for (int i = 0; i<numCRows; i++){ // for (int j = 0; j<numCColumns; j++){ // printf(" %g ", round (pointer[i * numCColumns + j])); // } // printf("\n"); // } //free(hostB); } }//row } // inverse } // cosine //SINE...................................................................................................................................................... if (strcmp (sine,input_buf1) == 0) { if (strcmp (direct,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // hipError_t err1 = hipPeekAtLastError();//To capture last error in function call //hipDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n"); return; } double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //free(hostB); } } // Column /////////////////////////// if (strcmp (row,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Sine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; double * hostA ; // The A matrix if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Sine Transform in column wise \n"); return; } hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //one //free(hostB); } }//row /////////////////////////// }// direct if (strcmp (inverse,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // hipError_t err1 = hipPeekAtLastError();//To capture last error in function call //hipDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTInverseColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTInverseColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTInverseColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTInverseColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //free(hostB); } } // Column if (strcmp (row,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // one // hipError_t err1 = hipPeekAtLastError();//To capture last error in function call //hipDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; //char const * const errId = "parallel:gpu:DCTTWO:InvalidInput"; //char const * const errMsg = "Invalid input to MEX file."; double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTInverseRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTInverseRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTInverseRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTInverseRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //one //memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double)); // testing // printf("\n plhs[0]:"); // printf("\n"); // for (int i = 0; i<numCRows; i++){ // for (int j = 0; j<numCColumns; j++){ // printf(" %g ", round (pointer[i * numCColumns + j])); // } // printf("\n"); // } //free(hostB); } }//row } // inverse } //sine /////////////END }
7066d6c5953f08b4340bd3c1c13ca2c9a6390e6e.cu
/* * Discrete Cosine/Sine Transform(DCT/DST and IDCT/IDST one to four-all in one) * DCT/DST and IDCT/IDST I ---> IV * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=Discrete_Transform(A, , type of Transform (sine or cosine), type of Transform(direct/inverse), type of DCT/DST or IDCT/IDST, dimensions). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include "Discrete_Transform_kernel.cuh" #include "DCT_I_Column.cuh" #include "DCT_I_Row.cuh" #include "DCT_I_Column_Inverse.cuh" #include "DCT_I_Row_Inverse.cuh" #include "DCT_II_Row.cuh" #include "DCT_II_Row_Inverse.cuh" #include "DCT_II_Column.cuh" #include "DCT_II_Column_Inverse.cuh" #include "DCT_III_Row.cuh" #include "DCT_III_Row_Inverse.cuh" #include "DCT_III_Column.cuh" #include "DCT_III_Column_Inverse.cuh" #include "DCT_IV_Row.cuh" #include "DCT_IV_Row_Inverse.cuh" #include "DCT_IV_Column.cuh" #include "DCT_IV_Column_Inverse.cuh" #include "DST_I_Column.cuh" #include "DST_I_Row.cuh" #include "DST_I_Column_Inverse.cuh" #include "DST_I_Row_Inverse.cuh" #include "DST_II_Row.cuh" #include "DST_II_Row_Inverse.cuh" #include "DST_II_Column.cuh" #include "DST_II_Column_Inverse.cuh" #include "DST_III_Row.cuh" #include "DST_III_Row_Inverse.cuh" #include "DST_III_Column.cuh" #include "DST_III_Column_Inverse.cuh" #include "DST_IV_Row.cuh" #include "DST_IV_Row_Inverse.cuh" #include "DST_IV_Column.cuh" #include "DST_IV_Column_Inverse.cuh" #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #define DEFAULT_DIM 32 #define DELTA(i, j) ((i==j)?1:0) #define TILE_DIM 16 // DCT extern "C" void CalculateTransformDCTColumnOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTColumnFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseColumnFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTRowFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDCTInverseRowFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); // DST extern "C" void CalculateTransformDSTColumnOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowOne(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowThree(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTColumnFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseColumnFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTRowFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); extern "C" void CalculateTransformDSTInverseRowFour(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns); void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; cudaError_t errCode =cudaGetDeviceCount(&nDevices); //int nDevices; //cudaGetDeviceCount(&nDevices); if (errCode != cudaSuccess){ printf("Error! No CUDA devices found! \n"); return; } //Discrete_Transform(x, 'cosine', 'direct', 'one' , 'column') char row[] = "row"; char column[] = "column"; char one[] = "one"; char two[] = "two"; char three[] = "three"; char four[] = "four"; char direct[] = "direct"; char inverse[] = "inverse"; char cosine[] = "cosine"; char sine[] = "sine"; //char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be five."; if ((nrhs!=5)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(prhs[0]); char *input_buf1; input_buf1 = mxArrayToString(prhs[1]); char *input_buf2; input_buf2 = mxArrayToString(prhs[2]); char *input_buf3; input_buf3 = mxArrayToString(prhs[3]); char *input_buf4; input_buf4 = mxArrayToString(prhs[4]); if ((mxIsChar(prhs[0]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } if (!(mxIsChar(prhs[1]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[2]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(THIRD ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[3]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FOURTH ARGUMENT) must be of type string.\n."); } if (!(mxIsChar(prhs[4]))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIFTH ARGUMENT) must be of type string.\n."); } ///////// input_buf0=FIRST, SECOND, THIRD, FOURTH, FIFTH if ((strcmp (cosine,input_buf1) != 0) &&(strcmp (sine,input_buf1) != 0) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be 'cosine' or 'sine' not %s\n",input_buf1); } if ((strcmp (direct,input_buf2) != 0)&& (strcmp (inverse,input_buf2) != 0) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(THIRD ARGUMENT) must be 'direct' or 'inverse' not %s\n",input_buf2); } if ((strcmp (one,input_buf3) != 0)&& (strcmp (two,input_buf3) != 0) && (strcmp (three,input_buf3) != 0) && (strcmp (four,input_buf3) != 0)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FOURTH ARGUMENT) must be 'one' or 'two' or 'three' or 'four' not %s\n",input_buf3); } if ((strcmp (column,input_buf4) != 0)&&(strcmp (row,input_buf4) != 0)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIFTH ARGUMENT) must be 'column' or 'row' not %s\n",input_buf4); } /////// //mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", // "Input(FIFTH ARGUMENT) must be 'column' or 'row'.\n."); if (strcmp (cosine,input_buf1) == 0) { if (strcmp (direct,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call //cudaDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Cosine Transform in row wise \n"); return; } double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //free(hostB); } } // Column /////////////////////////// if (strcmp (row,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; double * hostA ; // The A matrix if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Cosine Transform in column wise \n"); return; } hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //one //free(hostB); } }//row /////////////////////////// }// direct if (strcmp (inverse,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call //cudaDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Cosine Transform in row wise \n"); return; } double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTInverseColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTInverseColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTInverseColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTInverseColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //free(hostB); } } // Column if (strcmp (row,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Cosine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DCTI_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DCTII_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DCTIII_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DCTIV_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // one // cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call //cudaDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Cosine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; //char const * const errId = "parallel:gpu:DCTTWO:InvalidInput"; //char const * const errMsg = "Invalid input to MEX file."; double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); if (strcmp (one,input_buf3) == 0) { CalculateTransformDCTInverseRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDCTInverseRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDCTInverseRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDCTInverseRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //one //memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double)); // testing // printf("\n plhs[0]:"); // printf("\n"); // for (int i = 0; i<numCRows; i++){ // for (int j = 0; j<numCColumns; j++){ // printf(" %g ", round (pointer[i * numCColumns + j])); // } // printf("\n"); // } //free(hostB); } }//row } // inverse } // cosine //SINE...................................................................................................................................................... if (strcmp (sine,input_buf1) == 0) { if (strcmp (direct,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call //cudaDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n"); return; } double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //free(hostB); } } // Column /////////////////////////// if (strcmp (row,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Sine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; double * hostA ; // The A matrix if (numAColumns==1) { printf("Attention, this is a column vector, please try Discrete Sine Transform in column wise \n"); return; } hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //one //free(hostB); } }//row /////////////////////////// }// direct if (strcmp (inverse,input_buf2) == 0) { if (strcmp (column,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call //cudaDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTInverseColumnOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTInverseColumnTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTInverseColumnThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTInverseColumnFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //free(hostB); } } // Column if (strcmp (row,input_buf4) == 0) { /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray *B; double const *d_A; double *d_B; int numARows, numAColumns, numCRows, numCColumns; /* Initialize the MathWorks GPU API. */ mxInitGPU(); A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; if (strcmp (one,input_buf3) == 0) { DSTI_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { DSTII_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { DSTIII_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { DSTIV_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_B, numARows, numAColumns, numCRows, numCColumns); } // one // cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call //cudaDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) if (numAColumns==1) { printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n"); return; } numCRows = numARows; numCColumns = numAColumns; //char const * const errId = "parallel:gpu:DCTTWO:InvalidInput"; //char const * const errMsg = "Invalid input to MEX file."; double * hostA ; // The A matrix hostA = (double *)mxGetData(prhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); //CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); if (strcmp (one,input_buf3) == 0) { CalculateTransformDSTInverseRowOne(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (two,input_buf3) == 0) { CalculateTransformDSTInverseRowTwo(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (three,input_buf3) == 0) { CalculateTransformDSTInverseRowThree(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } if (strcmp (four,input_buf3) == 0) { CalculateTransformDSTInverseRowFour(hostA, pointer, numARows, numAColumns, numCRows, numCColumns); } //one //memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double)); // testing // printf("\n plhs[0]:"); // printf("\n"); // for (int i = 0; i<numCRows; i++){ // for (int j = 0; j<numCColumns; j++){ // printf(" %g ", round (pointer[i * numCColumns + j])); // } // printf("\n"); // } //free(hostB); } }//row } // inverse } //sine /////////////END }
927b1368adabb80f62713673a1eb2774ad8a7b6a.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
927b1368adabb80f62713673a1eb2774ad8a7b6a.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
331bf7285df8b651eaee9adcec6a7747821471f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #define BLOCKDIM 16 __global__ void matrixVecMul_kernel(mint * out, mint * mat, mint * vec, mint matWidth, mint matHeight) { int ty = threadIdx.y, by = blockIdx.y; int tx = threadIdx.x, bx = blockIdx.x; int xIndex = tx + bx*BLOCKDIM; int yIndex = ty + by*BLOCKDIM; int vec_index = xIndex; #if 0 // atomic operators are only supported on compute 1.1 and above __shared__ mint smem[BLOCKDIM]; int mat_index = xIndex + yIndex*matWidth; if (ty == 0) smem[tx] = vec[vec_index]; __syncthreads(); if (xIndex < matWidth && yIndex < matHeight) { atomicAdd(&out[vec_index], mat[mat_index]*smem[tx]); } #else mint * mat_row = &mat[xIndex*matWidth]; int ii; mint accum; if (xIndex < matHeight) { for (ii = 0, accum = 0; ii < matWidth; ii++) accum += mat_row[ii]*vec[ii]; out[vec_index] = accum; } #endif }
331bf7285df8b651eaee9adcec6a7747821471f0.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #define BLOCKDIM 16 __global__ void matrixVecMul_kernel(mint * out, mint * mat, mint * vec, mint matWidth, mint matHeight) { int ty = threadIdx.y, by = blockIdx.y; int tx = threadIdx.x, bx = blockIdx.x; int xIndex = tx + bx*BLOCKDIM; int yIndex = ty + by*BLOCKDIM; int vec_index = xIndex; #if 0 // atomic operators are only supported on compute 1.1 and above __shared__ mint smem[BLOCKDIM]; int mat_index = xIndex + yIndex*matWidth; if (ty == 0) smem[tx] = vec[vec_index]; __syncthreads(); if (xIndex < matWidth && yIndex < matHeight) { atomicAdd(&out[vec_index], mat[mat_index]*smem[tx]); } #else mint * mat_row = &mat[xIndex*matWidth]; int ii; mint accum; if (xIndex < matHeight) { for (ii = 0, accum = 0; ii < matWidth; ii++) accum += mat_row[ii]*vec[ii]; out[vec_index] = accum; } #endif }
c9c2356d449be783a98ed88a5efe884ae66df11d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <assert.h> __global__ void Asum(int *a, int *b, int *c){ *c = *a + *b; }
c9c2356d449be783a98ed88a5efe884ae66df11d.cu
#include <stdio.h> #include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <assert.h> __global__ void Asum(int *a, int *b, int *c){ *c = *a + *b; }
e31095c5d04b43f42f78951861b3a207119357d5.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdlib> #include <hip/hip_runtime.h> #include <cassert> #include <ctime> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <cmath> #include <string.h> #define DOMINIO 10000 #define SUBDOMINIO 1000 // = DOMINIO / BLOCO #define BLOCOS 10 #define M 10 //Tamanho do padrao #define N 100 //Tamanho da linha #define LINHAS 100 //Linhas por bloco = threads por bloco #define TAMLINHA 100 #define CHECK_ERROR(call) do { \ if( hipSuccess != call) { \ std::cerr << std::endl << "CUDA ERRO: " << \ hipGetErrorString(call) << " in file: " << __FILE__ \ << " in line: " << __LINE__ << std::endl; \ exit(0); \ } } while (0) //Lendo sequencia e padrao a partir de um arquivo __host__ void le_sequencia(char *nome_arquivo, char *seq, int tam) { FILE *arq; arq = fopen(nome_arquivo, "r"); fscanf(arq, "%s", seq); } //Calcula qual o avano de acordo com a localizacao do caracter no padrao __device__ int ord(char *padrao, char c) { int i = M - 1; while(padrao[i] != c && i >= 0) i--; if(i >= 0) return i; else return M - 1; } __global__ void kernel(char *texto, char *padrao, int *res) { int thread = blockDim.x * blockIdx.x + threadIdx.x; int d[M]; int i = 0, k, j; int a = 1; k = SUBDOMINIO * blockDim.x; //Pre-processamento for (j = 0; j < M; j++) d[j] = M; for (j = 0; j < M - 1; j++) { d[ord(padrao, padrao[j])] = M - a; a++; } i = (thread * TAMLINHA) + M; //C e F sao o inicio e o fim de cada linha, pra evitar que uma thread acesse a linha da outra thread int c = thread * TAMLINHA; int f = (thread * TAMLINHA) + TAMLINHA; while ((i <= f) && ( i > c)) { k = i - 1; j = M - 1; while ((j > 0) && (texto[k] == padrao[j])) { k -= 1; j -= 1; } if (j == 0 && (texto[k] == padrao[j])) res[k] = 1; a = ord(padrao, texto[i-1]); i = i + d[a]; } } using namespace std; int main (int argc, char **argv) { hipEvent_t e_Start, e_Stop; float elapsedTime = 0.0f; //Criando os vetores - Device char *d_Texto = NULL, *d_Padrao = NULL; int *d_resultado = NULL; //Vetores - Host char h_Texto[DOMINIO], h_Padrao[M]; int h_resultado[DOMINIO]; le_sequencia("dna.txt", h_Texto, DOMINIO); le_sequencia("padrao_dna.txt", h_Padrao, M); memset(h_resultado, 0, DOMINIO * sizeof(int)); unsigned int qtdeDados = DOMINIO * sizeof(char); //Aloca memria GPU CHECK_ERROR(hipMalloc((void**) &d_Texto, DOMINIO * sizeof(char))); CHECK_ERROR(hipMalloc((void**) &d_Padrao, M * sizeof(char))); CHECK_ERROR(hipMalloc((void**) &d_resultado, DOMINIO * sizeof(int))); //Copiando o texto da CPU -> GPU CHECK_ERROR(hipMemcpy(d_Texto, h_Texto , DOMINIO * sizeof(char), hipMemcpyHostToDevice)); CHECK_ERROR(hipMemcpy(d_Padrao, h_Padrao, M * sizeof(char), hipMemcpyHostToDevice)); CHECK_ERROR(hipMemcpy(d_resultado, h_resultado, DOMINIO * sizeof(int), hipMemcpyHostToDevice)); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); cout << "\n\n Algoritmo Boyer Moore Horspool\n\n\n"; //Dados do Problema cout << "::Dados do Problema::\n" << endl; cout << "Tamanho do texto: " << DOMINIO << " caracteres" << endl; cout << "Blocos: " << BLOCOS << endl; cout << "Threads: " << LINHAS << endl; cout << "Padrao: " << h_Padrao << endl; //Reset no device CHECK_ERROR(hipDeviceReset()); //Criando eventos CHECK_ERROR(hipEventCreate(&e_Start)); CHECK_ERROR(hipEventCreate(&e_Stop)); //Alocando memria em GPU CHECK_ERROR(hipMalloc(reinterpret_cast<void**> (&d_Texto), qtdeDados)); CHECK_ERROR(hipMalloc(reinterpret_cast<void**> (&d_Padrao), M * sizeof(char))); CHECK_ERROR(hipMalloc(reinterpret_cast<void**> (&d_resultado), DOMINIO * sizeof(int))); CHECK_ERROR(hipEventRecord(e_Start, hipEventDefault)); //Lanando o KERNEL hipLaunchKernelGGL(( kernel), dim3(BLOCOS), dim3(LINHAS), 1, 0, d_Texto, d_Padrao, d_resultado); CHECK_ERROR(hipDeviceSynchronize()); //GPU -> CPU CHECK_ERROR(hipMemcpy(h_resultado, d_resultado, DOMINIO * sizeof(int), hipMemcpyDeviceToHost)); CHECK_ERROR(hipEventRecord(e_Stop, hipEventDefault)); CHECK_ERROR(hipEventSynchronize(e_Stop)); CHECK_ERROR(hipEventElapsedTime(&elapsedTime, e_Start, e_Stop)); cout << "Tempo de execucao: " << elapsedTime / 1000.0f << " (s) \n\n\n"; //Resultado for(int k = 0; k < DOMINIO; k++) if(h_resultado[k] == 1) cout << "Ocorrencia em: " << k << endl; CHECK_ERROR(hipEventDestroy(e_Start)); CHECK_ERROR(hipEventDestroy(e_Stop)); cout << "\nFIM\n"; return EXIT_SUCCESS; }
e31095c5d04b43f42f78951861b3a207119357d5.cu
#include <iostream> #include <cstdlib> #include <cuda_runtime.h> #include <cassert> #include <ctime> #include <curand.h> #include <curand_kernel.h> #include <cmath> #include <string.h> #define DOMINIO 10000 #define SUBDOMINIO 1000 // = DOMINIO / BLOCO #define BLOCOS 10 #define M 10 //Tamanho do padrao #define N 100 //Tamanho da linha #define LINHAS 100 //Linhas por bloco = threads por bloco #define TAMLINHA 100 #define CHECK_ERROR(call) do { \ if( cudaSuccess != call) { \ std::cerr << std::endl << "CUDA ERRO: " << \ cudaGetErrorString(call) << " in file: " << __FILE__ \ << " in line: " << __LINE__ << std::endl; \ exit(0); \ } } while (0) //Lendo sequencia e padrao a partir de um arquivo __host__ void le_sequencia(char *nome_arquivo, char *seq, int tam) { FILE *arq; arq = fopen(nome_arquivo, "r"); fscanf(arq, "%s", seq); } //Calcula qual o avanço de acordo com a localizacao do caracter no padrao __device__ int ord(char *padrao, char c) { int i = M - 1; while(padrao[i] != c && i >= 0) i--; if(i >= 0) return i; else return M - 1; } __global__ void kernel(char *texto, char *padrao, int *res) { int thread = blockDim.x * blockIdx.x + threadIdx.x; int d[M]; int i = 0, k, j; int a = 1; k = SUBDOMINIO * blockDim.x; //Pre-processamento for (j = 0; j < M; j++) d[j] = M; for (j = 0; j < M - 1; j++) { d[ord(padrao, padrao[j])] = M - a; a++; } i = (thread * TAMLINHA) + M; //C e F sao o inicio e o fim de cada linha, pra evitar que uma thread acesse a linha da outra thread int c = thread * TAMLINHA; int f = (thread * TAMLINHA) + TAMLINHA; while ((i <= f) && ( i > c)) { k = i - 1; j = M - 1; while ((j > 0) && (texto[k] == padrao[j])) { k -= 1; j -= 1; } if (j == 0 && (texto[k] == padrao[j])) res[k] = 1; a = ord(padrao, texto[i-1]); i = i + d[a]; } } using namespace std; int main (int argc, char **argv) { cudaEvent_t e_Start, e_Stop; float elapsedTime = 0.0f; //Criando os vetores - Device char *d_Texto = NULL, *d_Padrao = NULL; int *d_resultado = NULL; //Vetores - Host char h_Texto[DOMINIO], h_Padrao[M]; int h_resultado[DOMINIO]; le_sequencia("dna.txt", h_Texto, DOMINIO); le_sequencia("padrao_dna.txt", h_Padrao, M); memset(h_resultado, 0, DOMINIO * sizeof(int)); unsigned int qtdeDados = DOMINIO * sizeof(char); //Aloca memória GPU CHECK_ERROR(cudaMalloc((void**) &d_Texto, DOMINIO * sizeof(char))); CHECK_ERROR(cudaMalloc((void**) &d_Padrao, M * sizeof(char))); CHECK_ERROR(cudaMalloc((void**) &d_resultado, DOMINIO * sizeof(int))); //Copiando o texto da CPU -> GPU CHECK_ERROR(cudaMemcpy(d_Texto, h_Texto , DOMINIO * sizeof(char), cudaMemcpyHostToDevice)); CHECK_ERROR(cudaMemcpy(d_Padrao, h_Padrao, M * sizeof(char), cudaMemcpyHostToDevice)); CHECK_ERROR(cudaMemcpy(d_resultado, h_resultado, DOMINIO * sizeof(int), cudaMemcpyHostToDevice)); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); cout << "\n\n Algoritmo Boyer Moore Horspool\n\n\n"; //Dados do Problema cout << "::Dados do Problema::\n" << endl; cout << "Tamanho do texto: " << DOMINIO << " caracteres" << endl; cout << "Blocos: " << BLOCOS << endl; cout << "Threads: " << LINHAS << endl; cout << "Padrao: " << h_Padrao << endl; //Reset no device CHECK_ERROR(cudaDeviceReset()); //Criando eventos CHECK_ERROR(cudaEventCreate(&e_Start)); CHECK_ERROR(cudaEventCreate(&e_Stop)); //Alocando memória em GPU CHECK_ERROR(cudaMalloc(reinterpret_cast<void**> (&d_Texto), qtdeDados)); CHECK_ERROR(cudaMalloc(reinterpret_cast<void**> (&d_Padrao), M * sizeof(char))); CHECK_ERROR(cudaMalloc(reinterpret_cast<void**> (&d_resultado), DOMINIO * sizeof(int))); CHECK_ERROR(cudaEventRecord(e_Start, cudaEventDefault)); //Lançando o KERNEL kernel<<<BLOCOS, LINHAS, 1>>>(d_Texto, d_Padrao, d_resultado); CHECK_ERROR(cudaDeviceSynchronize()); //GPU -> CPU CHECK_ERROR(cudaMemcpy(h_resultado, d_resultado, DOMINIO * sizeof(int), cudaMemcpyDeviceToHost)); CHECK_ERROR(cudaEventRecord(e_Stop, cudaEventDefault)); CHECK_ERROR(cudaEventSynchronize(e_Stop)); CHECK_ERROR(cudaEventElapsedTime(&elapsedTime, e_Start, e_Stop)); cout << "Tempo de execucao: " << elapsedTime / 1000.0f << " (s) \n\n\n"; //Resultado for(int k = 0; k < DOMINIO; k++) if(h_resultado[k] == 1) cout << "Ocorrencia em: " << k << endl; CHECK_ERROR(cudaEventDestroy(e_Start)); CHECK_ERROR(cudaEventDestroy(e_Stop)); cout << "\nFIM\n"; return EXIT_SUCCESS; }
c107788d1d1b3e277cb6b1bd431d17f5cd447d77.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hip/hip_runtime.h> #include <cstdio> #include <iostream> #include <iomanip> #include <fstream> #include <vector> #include "device_launch_parameters.h" using namespace std; const int MAX_STRING_LENGTH = 256; const int THREADS = 3; const string DATA_FILE = "/home/lukasz/Documents/GitHub/Lygretus_Programavimas/lab3_cuda/IFF-8-8_ZumarasLukas_L1_dat_1.txt"; // 1, 2, 3 const string REZ_FILE = "/home/lukasz/Documents/GitHub/Lygretus_Programavimas/lab3_cuda/IFF-8-8_ZumarasLukas_L1_rez.txt"; // 1, 2, 3 struct BenchmarkGPU { char Name[MAX_STRING_LENGTH]; int MSRP = -1; double Score = -1; char result[MAX_STRING_LENGTH+2]; string toString() { stringstream ss; ss << setw(45) << Name << " | " << setw(6) << MSRP << " | " << setw(8) << Score << " | " << setw(12) << result; return ss.str(); } }; double calculateNew(int x, double y) { return (x / y); } void readGPUFile(BenchmarkGPU *data); void write_results_to_file(BenchmarkGPU* data, int n, const string file_path, const string title); __global__ void sum_on_gpu(BenchmarkGPU* gpus, int* count, int* n, int* chunk_size, BenchmarkGPU* results); __device__ void gpu_memset(char* dest, int add); __device__ void gpu_strcat(char* dest, char* src, int offset); int main() { // Host int n = 25; BenchmarkGPU data[n]; readGPUFile(data); BenchmarkGPU results[n]; int chunk_size = n / THREADS; int count = 0; char* sresults[25]; // GPU BenchmarkGPU* d_all_gpus; int* d_count; int* d_n; int* d_chunk_size; BenchmarkGPU* d_results; char** d_sresults; // Memory allocation for GPU hipMalloc((void**)&d_all_gpus, n * sizeof(BenchmarkGPU)); hipMalloc((void**)&d_results, n * sizeof(BenchmarkGPU)); hipMalloc((void**)&d_count, sizeof(int)); hipMalloc((void**)&d_n, sizeof(int)); hipMalloc((void**)&d_chunk_size, sizeof(int)); // Copies memory from CPU to GPU hipMemcpy(d_all_gpus, data, n * sizeof(BenchmarkGPU), hipMemcpyHostToDevice); hipMemcpy(d_count, &count, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_n, &n, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_chunk_size, &chunk_size, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( sum_on_gpu), dim3(1),dim3(THREADS), 0, 0, d_all_gpus, d_count, d_n, d_chunk_size, d_results); hipDeviceSynchronize(); hipMemcpy(&results, d_results, n * sizeof(BenchmarkGPU), hipMemcpyDeviceToHost); hipMemcpy(&count, d_count, 1, hipMemcpyDeviceToHost); hipFree(d_all_gpus); hipFree(d_count); hipFree(d_n); hipFree(d_chunk_size); hipFree(d_results); cout << "Found results: " << count << endl; cout << "Finished" << endl; write_results_to_file(results, count, REZ_FILE, "A dalies rezultatai"); return 0; } /** * GPU * Sums gpus list chunk data properties * @param gpus BenchmarkGPUs list * @param count BenchmarkGPUs list size * @param chunk_size Summed items per thread * @param results Summed chunk results */ __global__ void sum_on_gpu(BenchmarkGPU* gpus, int* count, int* n, int* chunk_size, BenchmarkGPU* results) { int start_index = threadIdx.x * *chunk_size; int end_index = start_index + 1 * *chunk_size; if (threadIdx.x == blockDim.x -1) end_index = *n; printf("Thread: %d Start Index: %d End Index: %d\n", threadIdx.x, start_index, end_index); for (int i = start_index; i < end_index; ++i) { BenchmarkGPU tmp; gpu_memset(tmp.Name,0); gpu_memset(tmp.result,2); tmp.MSRP = 0; tmp.Score = 0.0; gpu_strcat(tmp.Name, gpus[i].Name, 0); tmp.Score = gpus[i].Score; tmp.MSRP = gpus[i].MSRP; double my_number = tmp.MSRP / tmp.Score; char tmp_res[256+2]; gpu_memset(tmp_res, 2); tmp_res[0] = 'F'; tmp_res[1] = '-'; if(my_number < 70) tmp_res[0] = 'E'; if(my_number < 60) tmp_res[0] = 'D'; if(my_number < 50) tmp_res[0] = 'C'; if(my_number < 40) tmp_res[0] = 'B'; if(my_number < 30) tmp_res[0] = 'A'; gpu_strcat(tmp_res, gpus[i].Name, 2); printf("Thread: %d Brand: %d\n", threadIdx.x, tmp_res); gpu_strcat(tmp.result, tmp_res,0); if(tmp.result[0] < 'F') { int index = atomicAdd(count, 1); results[index] = tmp; } // printf("Thread: %d Index: %d Brand: %s Make Year: %d Mileage: %f\n", threadIdx.x, index, results[index].Name, results[index].Score, results[index].MSRP); } } /** * Appends char array to other char array * @param dest Destination array * @param src Source array */ __device__ void gpu_strcat(char* dest, char* src, int offset) { int i = 0; do { dest[offset + i] = src[i];} while (src[i++] != 0 && i + offset != MAX_STRING_LENGTH+offset); } /** * Zeroes all char memory * @param dest Char array */ __device__ void gpu_memset(char* dest, int add) { for (int i = 0; i < MAX_STRING_LENGTH + add; ++i) { dest[i] = 0; } } void readGPUFile(BenchmarkGPU *data) { string line; ifstream myfile; myfile.open(DATA_FILE); if(!myfile.is_open()) { perror("Error open"); exit(EXIT_FAILURE); } int ch = 0; int count = 0; while(getline(myfile, line)) { string::size_type pos; pos=line.find(' ',0); line = line.substr(pos+1); switch (ch) { case 0: strcpy(data[count].Name, line.c_str()); break; case 1: data[count].MSRP = stoi(line); break; case 2: data[count].Score = stoi(line); count++; ch = -1; break; } ch++; } } /** * Writes given monitor cars formatted in table to file * @param cars Cars list * @param file_path Result file path * @param title Results table title */ void write_results_to_file(BenchmarkGPU* gpus, int n, const string file_path, const string title) { ofstream file; file.open(file_path); file << setw(80) << title << endl << "------------------------------------------------------------------------------------------------------------------------" << endl << setw(45) << "Name" << " | " << setw(6) << "MSRP" << " | " << setw(8) << "Score" << " | " << setw(20) << "Result" << endl << "------------------------------------------------------------------------------------------------------------------------" << endl; for (int i = 0; i < n; ++i) { file << gpus[i].toString() << endl; } file << endl << endl << endl; }
c107788d1d1b3e277cb6b1bd431d17f5cd447d77.cu
#include "cuda_runtime.h" #include <cuda.h> #include <cstdio> #include <iostream> #include <iomanip> #include <fstream> #include <vector> #include "device_launch_parameters.h" using namespace std; const int MAX_STRING_LENGTH = 256; const int THREADS = 3; const string DATA_FILE = "/home/lukasz/Documents/GitHub/Lygretus_Programavimas/lab3_cuda/IFF-8-8_ZumarasLukas_L1_dat_1.txt"; // 1, 2, 3 const string REZ_FILE = "/home/lukasz/Documents/GitHub/Lygretus_Programavimas/lab3_cuda/IFF-8-8_ZumarasLukas_L1_rez.txt"; // 1, 2, 3 struct BenchmarkGPU { char Name[MAX_STRING_LENGTH]; int MSRP = -1; double Score = -1; char result[MAX_STRING_LENGTH+2]; string toString() { stringstream ss; ss << setw(45) << Name << " | " << setw(6) << MSRP << " | " << setw(8) << Score << " | " << setw(12) << result; return ss.str(); } }; double calculateNew(int x, double y) { return (x / y); } void readGPUFile(BenchmarkGPU *data); void write_results_to_file(BenchmarkGPU* data, int n, const string file_path, const string title); __global__ void sum_on_gpu(BenchmarkGPU* gpus, int* count, int* n, int* chunk_size, BenchmarkGPU* results); __device__ void gpu_memset(char* dest, int add); __device__ void gpu_strcat(char* dest, char* src, int offset); int main() { // Host int n = 25; BenchmarkGPU data[n]; readGPUFile(data); BenchmarkGPU results[n]; int chunk_size = n / THREADS; int count = 0; char* sresults[25]; // GPU BenchmarkGPU* d_all_gpus; int* d_count; int* d_n; int* d_chunk_size; BenchmarkGPU* d_results; char** d_sresults; // Memory allocation for GPU cudaMalloc((void**)&d_all_gpus, n * sizeof(BenchmarkGPU)); cudaMalloc((void**)&d_results, n * sizeof(BenchmarkGPU)); cudaMalloc((void**)&d_count, sizeof(int)); cudaMalloc((void**)&d_n, sizeof(int)); cudaMalloc((void**)&d_chunk_size, sizeof(int)); // Copies memory from CPU to GPU cudaMemcpy(d_all_gpus, data, n * sizeof(BenchmarkGPU), cudaMemcpyHostToDevice); cudaMemcpy(d_count, &count, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_chunk_size, &chunk_size, sizeof(int), cudaMemcpyHostToDevice); sum_on_gpu<<<1,THREADS>>>(d_all_gpus, d_count, d_n, d_chunk_size, d_results); cudaDeviceSynchronize(); cudaMemcpy(&results, d_results, n * sizeof(BenchmarkGPU), cudaMemcpyDeviceToHost); cudaMemcpy(&count, d_count, 1, cudaMemcpyDeviceToHost); cudaFree(d_all_gpus); cudaFree(d_count); cudaFree(d_n); cudaFree(d_chunk_size); cudaFree(d_results); cout << "Found results: " << count << endl; cout << "Finished" << endl; write_results_to_file(results, count, REZ_FILE, "A dalies rezultatai"); return 0; } /** * GPU * Sums gpus list chunk data properties * @param gpus BenchmarkGPUs list * @param count BenchmarkGPUs list size * @param chunk_size Summed items per thread * @param results Summed chunk results */ __global__ void sum_on_gpu(BenchmarkGPU* gpus, int* count, int* n, int* chunk_size, BenchmarkGPU* results) { int start_index = threadIdx.x * *chunk_size; int end_index = start_index + 1 * *chunk_size; if (threadIdx.x == blockDim.x -1) end_index = *n; printf("Thread: %d Start Index: %d End Index: %d\n", threadIdx.x, start_index, end_index); for (int i = start_index; i < end_index; ++i) { BenchmarkGPU tmp; gpu_memset(tmp.Name,0); gpu_memset(tmp.result,2); tmp.MSRP = 0; tmp.Score = 0.0; gpu_strcat(tmp.Name, gpus[i].Name, 0); tmp.Score = gpus[i].Score; tmp.MSRP = gpus[i].MSRP; double my_number = tmp.MSRP / tmp.Score; char tmp_res[256+2]; gpu_memset(tmp_res, 2); tmp_res[0] = 'F'; tmp_res[1] = '-'; if(my_number < 70) tmp_res[0] = 'E'; if(my_number < 60) tmp_res[0] = 'D'; if(my_number < 50) tmp_res[0] = 'C'; if(my_number < 40) tmp_res[0] = 'B'; if(my_number < 30) tmp_res[0] = 'A'; gpu_strcat(tmp_res, gpus[i].Name, 2); printf("Thread: %d Brand: %d\n", threadIdx.x, tmp_res); gpu_strcat(tmp.result, tmp_res,0); if(tmp.result[0] < 'F') { int index = atomicAdd(count, 1); results[index] = tmp; } // printf("Thread: %d Index: %d Brand: %s Make Year: %d Mileage: %f\n", threadIdx.x, index, results[index].Name, results[index].Score, results[index].MSRP); } } /** * Appends char array to other char array * @param dest Destination array * @param src Source array */ __device__ void gpu_strcat(char* dest, char* src, int offset) { int i = 0; do { dest[offset + i] = src[i];} while (src[i++] != 0 && i + offset != MAX_STRING_LENGTH+offset); } /** * Zeroes all char memory * @param dest Char array */ __device__ void gpu_memset(char* dest, int add) { for (int i = 0; i < MAX_STRING_LENGTH + add; ++i) { dest[i] = 0; } } void readGPUFile(BenchmarkGPU *data) { string line; ifstream myfile; myfile.open(DATA_FILE); if(!myfile.is_open()) { perror("Error open"); exit(EXIT_FAILURE); } int ch = 0; int count = 0; while(getline(myfile, line)) { string::size_type pos; pos=line.find(' ',0); line = line.substr(pos+1); switch (ch) { case 0: strcpy(data[count].Name, line.c_str()); break; case 1: data[count].MSRP = stoi(line); break; case 2: data[count].Score = stoi(line); count++; ch = -1; break; } ch++; } } /** * Writes given monitor cars formatted in table to file * @param cars Cars list * @param file_path Result file path * @param title Results table title */ void write_results_to_file(BenchmarkGPU* gpus, int n, const string file_path, const string title) { ofstream file; file.open(file_path); file << setw(80) << title << endl << "------------------------------------------------------------------------------------------------------------------------" << endl << setw(45) << "Name" << " | " << setw(6) << "MSRP" << " | " << setw(8) << "Score" << " | " << setw(20) << "Result" << endl << "------------------------------------------------------------------------------------------------------------------------" << endl; for (int i = 0; i < n; ++i) { file << gpus[i].toString() << endl; } file << endl << endl << endl; }
68f6a2901f44e0c44c94e2ada20fe52af4dfb38c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Algo to so the weight distribution of 5000 particle on a grid of 64x64 */ //#include<conio.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <iostream> #include <sys/time.h> #include "time.h" using namespace std; //struct timeval tv; __global__ void parMap(float *pD, float *netD, int grid) { unsigned int rID= blockDim.x*blockIdx.x + threadIdx.x; int left, right, top, bottom; float x,y, fL,fR,fB,fT; //x=pD[1]; x = pD[rID*2]; //x=102.358000; y = pD[rID*2+1]; //y=320.568000; //printf("%d %f %f ",rID,x,y); //printf("thread: %d x:%f, y:%f \n", rID,x,y); left = (int)floorf(x); right = left + 1; bottom = (int)floorf(y); top = bottom +1; //printf("left:%d, right:%d,top:%d, bottom:%d \n", left, right, top, bottom ); if (left>= grid||right>= grid||top>= grid||bottom>= grid) { left=0; right=1; top=1; bottom = 0; x=0.500000; y=0.500000; } fL = x - left; fR = 1 - fL; fB = y - bottom; fT = 1 - fB; // printf("fL:%f, fR:%f, fT:%f, fB:%f L:%d, R:%d, T:%d, B:%d \n", fL, fR, fT,fB, left, right, top, bottom ); // printf("L:%d, R:%d, T:%d, B:%d \n", left, right, top, bottom ); // printf("grid: left:%f, right:%f, top:%f, bottom:%f \n", netD[left], netD[right], netD[top], netD[bottom]); netD[grid*left + bottom] = netD[grid*left + bottom] +(fT*fR); netD[grid*right + bottom] = netD[grid*right + bottom]+(fT*fL); netD[grid*left+ top] = netD[grid*left + top] +(fB*fR); netD[grid*right+ top] = netD[grid*right + top] +(fB*fL); // if(rID%50==1) // printf("grid: left:%f, right:%f, top:%f, bottom:%f \n", netD[left], netD[right], netD[top], netD[bottom]); } // main function int main(int argc, char *argv[]) { int grid = 1024, i, j, max = grid, sizeGrid= grid*grid; unsigned int par = 1600000, sizePar = 2*par; hipEvent_t s_i, e_i, s_mc_h2d, e_mc_h2d, s_mc_d2h, e_mc_d2h, s_pl, e_pl; float t_i, t_mc_h2d, t_mc_d2h, t_pl; hipEventCreate(&s_i); hipEventCreate(&s_mc_h2d); hipEventCreate(&e_i); hipEventCreate(&e_mc_h2d); hipEventCreate(&s_mc_d2h); hipEventCreate(&s_pl); hipEventCreate(&e_mc_d2h); hipEventCreate(&e_pl); /* float* netH; float* pH; float* netD; float* pD; */ float *netH, *pH, *netD, *pD; hipEventRecord(s_i,0); netH = (float*)malloc(sizeof(float)*sizeGrid); pH = (float*)malloc(sizeof(float)*sizePar); //intialising particles. for( i = 0; i < sizePar; i++) pH[i]= ((float)rand()/(float)(RAND_MAX) * (float)(max-1)); // printf("particle initialised \n "); for(i=0;i< grid;i++) for(j=0;j< grid;j++) netH[grid*i+j]=0.0; // printf("Grid initialised \n "); hipEventRecord( e_i,0 ); hipEventSynchronize( e_i ); hipEventElapsedTime( &t_i, s_i, e_i); // Allocating GPU memory hipEventRecord(s_mc_h2d,0); hipMalloc( (void **)&netD, sizeof(float)*sizeGrid); hipMalloc( (void **)&pD, sizeof(float)*sizePar); // printf("Cuda memory allocated \n "); //transfering data to gpu hipMemcpy( pD, pH, sizePar*(sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(netD, netH, sizeGrid*(sizeof(float)), hipMemcpyHostToDevice); hipEventRecord( e_mc_h2d,0 ); hipEventSynchronize( e_mc_h2d ); hipEventElapsedTime( &t_mc_h2d, s_mc_h2d, e_mc_h2d); // printf("Data cpy to gpu \n \n "); //initialising the thread in groups hipEventRecord( s_pl,0 ); dim3 dimBlock(192); dim3 dimGrid((par/192)); // printf("Thread launched \n \n "); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( parMap), dim3(dimGrid), dim3(dimBlock), 0, 0, pD, netD, grid); // printf("Data back to CPU \n \n "); hipEventRecord( e_pl,0 ); hipEventSynchronize( e_pl ); hipEventElapsedTime( &t_pl, s_pl, e_pl); // Copy the results in GPU memory back to the CPU hipEventRecord( s_mc_d2h,0 ); hipMemcpy(netH, netD, sizeof(float)*sizeGrid, hipMemcpyDeviceToHost); hipEventRecord( e_mc_d2h,0 ); hipEventSynchronize( e_mc_d2h ); hipEventElapsedTime( &t_mc_d2h, s_mc_d2h, e_mc_d2h); //!! if(x<0) stop print i //!! denominator -- nan FILE *f = fopen("file.txt", "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } //float temp1=par/(sizeGrid); for ( i = 0; i < sizeGrid; ++i) { //cout<<netH[i]<<" "; fprintf (f,"%f ",((netH[i]))) ;// /temp1)); if (i%grid==(grid-1)) { //printf("\n"); fprintf (f," \n" ); } } fclose(f); // cout<<"Grid size: "<<grid<<"x"<<grid<<" particles:"<<par <<"\n"; // cout<<"Initialisation time: "<<t_i<<"\n"; // cout<<"Memory copy H 2 d: "<<t_mc_h2d<<"\n"; // cout<<"Memory copy D 2 H: "<<t_mc_d2h<<"\n"; // cout<<"Processing time: "<<t_pl<<"\n"; // cout<<"Total time: "<<( t_mc_h2d + t_mc_d2h + t_pl )<<"\n"; printf("\nGrid size: \t\t%d \t %d\n", grid,par); printf("\nInitialisation time:\t%f \n", t_i); printf("\nMemory Copy H 2 D:\t%f \n", t_mc_h2d); printf("\nMemory Copy D 2 H:\t%f \n", t_mc_d2h); printf("\nProcessing time:\t%f \n", t_pl); //event destroy hipEventDestroy(s_i); hipEventDestroy(s_mc_h2d); hipEventDestroy(s_mc_d2h); hipEventDestroy(e_i); hipEventDestroy(e_mc_h2d); hipEventDestroy(e_mc_d2h); hipEventDestroy(s_pl); hipEventDestroy(e_pl); // Free the GPU memory hipFree(netD); hipFree(pD); free(netH); free(pH); return 0; }
68f6a2901f44e0c44c94e2ada20fe52af4dfb38c.cu
/* Algo to so the weight distribution of 5000 particle on a grid of 64x64 */ //#include<conio.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <iostream> #include <sys/time.h> #include "time.h" using namespace std; //struct timeval tv; __global__ void parMap(float *pD, float *netD, int grid) { unsigned int rID= blockDim.x*blockIdx.x + threadIdx.x; int left, right, top, bottom; float x,y, fL,fR,fB,fT; //x=pD[1]; x = pD[rID*2]; //x=102.358000; y = pD[rID*2+1]; //y=320.568000; //printf("%d %f %f ",rID,x,y); //printf("thread: %d x:%f, y:%f \n", rID,x,y); left = (int)floorf(x); right = left + 1; bottom = (int)floorf(y); top = bottom +1; //printf("left:%d, right:%d,top:%d, bottom:%d \n", left, right, top, bottom ); if (left>= grid||right>= grid||top>= grid||bottom>= grid) { left=0; right=1; top=1; bottom = 0; x=0.500000; y=0.500000; } fL = x - left; fR = 1 - fL; fB = y - bottom; fT = 1 - fB; // printf("fL:%f, fR:%f, fT:%f, fB:%f L:%d, R:%d, T:%d, B:%d \n", fL, fR, fT,fB, left, right, top, bottom ); // printf("L:%d, R:%d, T:%d, B:%d \n", left, right, top, bottom ); // printf("grid: left:%f, right:%f, top:%f, bottom:%f \n", netD[left], netD[right], netD[top], netD[bottom]); netD[grid*left + bottom] = netD[grid*left + bottom] +(fT*fR); netD[grid*right + bottom] = netD[grid*right + bottom]+(fT*fL); netD[grid*left+ top] = netD[grid*left + top] +(fB*fR); netD[grid*right+ top] = netD[grid*right + top] +(fB*fL); // if(rID%50==1) // printf("grid: left:%f, right:%f, top:%f, bottom:%f \n", netD[left], netD[right], netD[top], netD[bottom]); } // main function int main(int argc, char *argv[]) { int grid = 1024, i, j, max = grid, sizeGrid= grid*grid; unsigned int par = 1600000, sizePar = 2*par; cudaEvent_t s_i, e_i, s_mc_h2d, e_mc_h2d, s_mc_d2h, e_mc_d2h, s_pl, e_pl; float t_i, t_mc_h2d, t_mc_d2h, t_pl; cudaEventCreate(&s_i); cudaEventCreate(&s_mc_h2d); cudaEventCreate(&e_i); cudaEventCreate(&e_mc_h2d); cudaEventCreate(&s_mc_d2h); cudaEventCreate(&s_pl); cudaEventCreate(&e_mc_d2h); cudaEventCreate(&e_pl); /* float* netH; float* pH; float* netD; float* pD; */ float *netH, *pH, *netD, *pD; cudaEventRecord(s_i,0); netH = (float*)malloc(sizeof(float)*sizeGrid); pH = (float*)malloc(sizeof(float)*sizePar); //intialising particles. for( i = 0; i < sizePar; i++) pH[i]= ((float)rand()/(float)(RAND_MAX) * (float)(max-1)); // printf("particle initialised \n "); for(i=0;i< grid;i++) for(j=0;j< grid;j++) netH[grid*i+j]=0.0; // printf("Grid initialised \n "); cudaEventRecord( e_i,0 ); cudaEventSynchronize( e_i ); cudaEventElapsedTime( &t_i, s_i, e_i); // Allocating GPU memory cudaEventRecord(s_mc_h2d,0); cudaMalloc( (void **)&netD, sizeof(float)*sizeGrid); cudaMalloc( (void **)&pD, sizeof(float)*sizePar); // printf("Cuda memory allocated \n "); //transfering data to gpu cudaMemcpy( pD, pH, sizePar*(sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(netD, netH, sizeGrid*(sizeof(float)), cudaMemcpyHostToDevice); cudaEventRecord( e_mc_h2d,0 ); cudaEventSynchronize( e_mc_h2d ); cudaEventElapsedTime( &t_mc_h2d, s_mc_h2d, e_mc_h2d); // printf("Data cpy to gpu \n \n "); //initialising the thread in groups cudaEventRecord( s_pl,0 ); dim3 dimBlock(192); dim3 dimGrid((par/192)); // printf("Thread launched \n \n "); //@@ Launch the GPU Kernel here parMap<<<dimGrid, dimBlock>>>(pD, netD, grid); // printf("Data back to CPU \n \n "); cudaEventRecord( e_pl,0 ); cudaEventSynchronize( e_pl ); cudaEventElapsedTime( &t_pl, s_pl, e_pl); // Copy the results in GPU memory back to the CPU cudaEventRecord( s_mc_d2h,0 ); cudaMemcpy(netH, netD, sizeof(float)*sizeGrid, cudaMemcpyDeviceToHost); cudaEventRecord( e_mc_d2h,0 ); cudaEventSynchronize( e_mc_d2h ); cudaEventElapsedTime( &t_mc_d2h, s_mc_d2h, e_mc_d2h); //!! if(x<0) stop print i //!! denominator -- nan FILE *f = fopen("file.txt", "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } //float temp1=par/(sizeGrid); for ( i = 0; i < sizeGrid; ++i) { //cout<<netH[i]<<" "; fprintf (f,"%f ",((netH[i]))) ;// /temp1)); if (i%grid==(grid-1)) { //printf("\n"); fprintf (f," \n" ); } } fclose(f); // cout<<"Grid size: "<<grid<<"x"<<grid<<" particles:"<<par <<"\n"; // cout<<"Initialisation time: "<<t_i<<"\n"; // cout<<"Memory copy H 2 d: "<<t_mc_h2d<<"\n"; // cout<<"Memory copy D 2 H: "<<t_mc_d2h<<"\n"; // cout<<"Processing time: "<<t_pl<<"\n"; // cout<<"Total time: "<<( t_mc_h2d + t_mc_d2h + t_pl )<<"\n"; printf("\nGrid size: \t\t%d \t %d\n", grid,par); printf("\nInitialisation time:\t%f \n", t_i); printf("\nMemory Copy H 2 D:\t%f \n", t_mc_h2d); printf("\nMemory Copy D 2 H:\t%f \n", t_mc_d2h); printf("\nProcessing time:\t%f \n", t_pl); //event destroy cudaEventDestroy(s_i); cudaEventDestroy(s_mc_h2d); cudaEventDestroy(s_mc_d2h); cudaEventDestroy(e_i); cudaEventDestroy(e_mc_h2d); cudaEventDestroy(e_mc_d2h); cudaEventDestroy(s_pl); cudaEventDestroy(e_pl); // Free the GPU memory cudaFree(netD); cudaFree(pD); free(netH); free(pH); return 0; }
b136fd6a5064c1a884613b0e879bd71200769db7.hip
// !!! This is a file automatically generated by hipify!!! #include <nvgraph.h> #include <stdio.h> #include <sys/time.h> #include <time.h> void check(nvgraphStatus_t status) { if (status != NVGRAPH_STATUS_SUCCESS) { printf("ERROR : %d\n",status); exit(0); } } int main(int argc, char *argv[]) { /*Check Errors*/ if(argc != 7){ printf("Arguments should be in following order:\n"); printf("<# Vertices> <# Edges> <Weights txt File> <Offset txt File> <Indices txt File> <Save Name>\n"); return 1; } const size_t vertex_numsets = 1, edge_numsets = 1; size_t n, nnz; int i; float *sssp_1_h; void** vertex_dim; /*Declare time based variables*/ struct timeval tv1, tv2; struct timezone tz; long int total_time = 0, timing[100]; /*Assign Variables*/ n = atoi(argv[1]); nnz = atoi(argv[2]); /*Open Files*/ FILE *weights, *offsets, *indices, *results; weights = fopen(argv[3], "r"); offsets = fopen(argv[4], "r"); indices = fopen(argv[5], "r"); float weights_h[nnz]; float destination_offsets[n+1]; int destination_offsets_h[n+1]; float source_indices[nnz]; int source_indices_h[nnz]; for (i = 0; i < nnz; i++) { /*Assign Weights*/ fscanf(weights, "%e", &weights_h[i]); /*Assign Indices*/ fscanf(indices, "%e", &source_indices[i]); source_indices_h[i] = source_indices[i]; } for (i = 0; i < n; i++) { /*Assign Indices*/ fscanf(offsets, "%e", &destination_offsets[i]); destination_offsets_h[i] = destination_offsets[i]; } fclose(weights); fclose(indices); fclose(offsets); destination_offsets_h[n] = nnz; // nvgraph variables nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphGraphDescr_t graph; nvgraphCSCTopology32I_t CSC_input; hipDataType edge_dimT = HIP_R_32F; hipDataType* vertex_dimT; // Init host data sssp_1_h = (float*)malloc(n*sizeof(float)); vertex_dim = (void**)malloc(vertex_numsets*sizeof(void*)); vertex_dimT = (hipDataType*)malloc(vertex_numsets*sizeof(hipDataType)); CSC_input = (nvgraphCSCTopology32I_t) malloc(sizeof(struct nvgraphCSCTopology32I_st)); vertex_dim[0]= (void*)sssp_1_h; vertex_dimT[0] = HIP_R_32F; check(nvgraphCreate(&handle)); check(nvgraphCreateGraphDescr (handle, &graph)); CSC_input->nvertices = n; CSC_input->nedges = nnz; CSC_input->destination_offsets = destination_offsets_h; CSC_input->source_indices = source_indices_h; // Set graph connectivity and properties (tranfers) check(nvgraphSetGraphStructure(handle, graph, (void*)CSC_input, NVGRAPH_CSC_32)); check(nvgraphAllocateVertexData(handle, graph, vertex_numsets, vertex_dimT)); check(nvgraphAllocateEdgeData (handle, graph, edge_numsets, &edge_dimT)); check(nvgraphSetEdgeData(handle, graph, (void*)weights_h, 0)); /*Measure timing for 100 different source_vert*/ int source_vert_offset = n/100; int source_vert = 0; for (int i = 0; i < 100; i++){ gettimeofday(&tv1,&tz); // Get starting time // Solve check(nvgraphSssp(handle, graph, 0, &source_vert, 0)); // Get and print result check(nvgraphGetVertexData(handle, graph, (void*)sssp_1_h, 0)); gettimeofday(&tv2,&tz); // Get ending time source_vert = source_vert + source_vert_offset; /*Calculate time taken in microseconds*/ timing[i] = (tv2.tv_sec-tv1.tv_sec)*1000000 + (tv2.tv_usec-tv1.tv_usec); } /*Write the Shortest Path to a file*/ results = fopen(argv[6], "w+"); for (int i = 0; i < 100; i++){ fprintf(results, "%ld\n", timing[i]); total_time = total_time + timing[i]; } printf("Average Time: %ld\n", total_time/100); fclose(results); //Clean free(sssp_1_h); free(vertex_dim); free(vertex_dimT); free(CSC_input); check(nvgraphDestroyGraphDescr(handle, graph)); check(nvgraphDestroy(handle)); return 0; }
b136fd6a5064c1a884613b0e879bd71200769db7.cu
#include <nvgraph.h> #include <stdio.h> #include <sys/time.h> #include <time.h> void check(nvgraphStatus_t status) { if (status != NVGRAPH_STATUS_SUCCESS) { printf("ERROR : %d\n",status); exit(0); } } int main(int argc, char *argv[]) { /*Check Errors*/ if(argc != 7){ printf("Arguments should be in following order:\n"); printf("<# Vertices> <# Edges> <Weights txt File> <Offset txt File> <Indices txt File> <Save Name>\n"); return 1; } const size_t vertex_numsets = 1, edge_numsets = 1; size_t n, nnz; int i; float *sssp_1_h; void** vertex_dim; /*Declare time based variables*/ struct timeval tv1, tv2; struct timezone tz; long int total_time = 0, timing[100]; /*Assign Variables*/ n = atoi(argv[1]); nnz = atoi(argv[2]); /*Open Files*/ FILE *weights, *offsets, *indices, *results; weights = fopen(argv[3], "r"); offsets = fopen(argv[4], "r"); indices = fopen(argv[5], "r"); float weights_h[nnz]; float destination_offsets[n+1]; int destination_offsets_h[n+1]; float source_indices[nnz]; int source_indices_h[nnz]; for (i = 0; i < nnz; i++) { /*Assign Weights*/ fscanf(weights, "%e", &weights_h[i]); /*Assign Indices*/ fscanf(indices, "%e", &source_indices[i]); source_indices_h[i] = source_indices[i]; } for (i = 0; i < n; i++) { /*Assign Indices*/ fscanf(offsets, "%e", &destination_offsets[i]); destination_offsets_h[i] = destination_offsets[i]; } fclose(weights); fclose(indices); fclose(offsets); destination_offsets_h[n] = nnz; // nvgraph variables nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphGraphDescr_t graph; nvgraphCSCTopology32I_t CSC_input; cudaDataType_t edge_dimT = CUDA_R_32F; cudaDataType_t* vertex_dimT; // Init host data sssp_1_h = (float*)malloc(n*sizeof(float)); vertex_dim = (void**)malloc(vertex_numsets*sizeof(void*)); vertex_dimT = (cudaDataType_t*)malloc(vertex_numsets*sizeof(cudaDataType_t)); CSC_input = (nvgraphCSCTopology32I_t) malloc(sizeof(struct nvgraphCSCTopology32I_st)); vertex_dim[0]= (void*)sssp_1_h; vertex_dimT[0] = CUDA_R_32F; check(nvgraphCreate(&handle)); check(nvgraphCreateGraphDescr (handle, &graph)); CSC_input->nvertices = n; CSC_input->nedges = nnz; CSC_input->destination_offsets = destination_offsets_h; CSC_input->source_indices = source_indices_h; // Set graph connectivity and properties (tranfers) check(nvgraphSetGraphStructure(handle, graph, (void*)CSC_input, NVGRAPH_CSC_32)); check(nvgraphAllocateVertexData(handle, graph, vertex_numsets, vertex_dimT)); check(nvgraphAllocateEdgeData (handle, graph, edge_numsets, &edge_dimT)); check(nvgraphSetEdgeData(handle, graph, (void*)weights_h, 0)); /*Measure timing for 100 different source_vert*/ int source_vert_offset = n/100; int source_vert = 0; for (int i = 0; i < 100; i++){ gettimeofday(&tv1,&tz); // Get starting time // Solve check(nvgraphSssp(handle, graph, 0, &source_vert, 0)); // Get and print result check(nvgraphGetVertexData(handle, graph, (void*)sssp_1_h, 0)); gettimeofday(&tv2,&tz); // Get ending time source_vert = source_vert + source_vert_offset; /*Calculate time taken in microseconds*/ timing[i] = (tv2.tv_sec-tv1.tv_sec)*1000000 + (tv2.tv_usec-tv1.tv_usec); } /*Write the Shortest Path to a file*/ results = fopen(argv[6], "w+"); for (int i = 0; i < 100; i++){ fprintf(results, "%ld\n", timing[i]); total_time = total_time + timing[i]; } printf("Average Time: %ld\n", total_time/100); fclose(results); //Clean free(sssp_1_h); free(vertex_dim); free(vertex_dimT); free(CSC_input); check(nvgraphDestroyGraphDescr(handle, graph)); check(nvgraphDestroy(handle)); return 0; }
4e0ef0744d06b463379b835413169f01d97f166d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<cstdlib> #include<pthread.h> #include<semaphore.h> #include<time.h> #include "ant.h" #include "map3d.h" #define MAX_STEP 10000 #define THREAD_COUNT 4 #define TOTAL_ANTS 60 #define MAX_HORM_LEFT 100 #define MAP_X 50 #define MAP_Y 50 #define MAP_Z 3 #define HOME_X 11 #define HOME_Y 11 #define HOME_Z 1 #define FOOD_X 39 #define FOOD_Y 39 #define FOOD_Z 1 #define BLOCK_SIZE 512 using namespace std; int tt=0, ff=0, fh=0; int counter1,counter2; sem_t update_barrier; sem_t barrier1,barrier2; sem_t mutex1,mutex2; ant ants[TOTAL_ANTS]; map3d mmap; double *Md1, *Md2, blue_horm[MAP_X*MAP_Y*MAP_Z], red_horm[MAP_X*MAP_Y*MAP_Z]; int total_block, tpoints; __global__ void horm_update(double *Md, double decline) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < MAP_X*MAP_Y*MAP_Z) Md[index] = Md[index]*decline; } int matrix_3d_to_1d(int x,int y,int z) { return z * MAP_X * MAP_Y + y * MAP_X + x; } void layout(const char *file_name, double *horm_array) { FILE *fp; fp = fopen(file_name,"a+"); if(fp!=NULL) { fprintf(fp, "\n%d*%d*%d\n",MAP_X, MAP_Y, MAP_Z); for(int i=0;i<MAP_Z;i++) { for(int j=0;j<MAP_Y;j++) { for(int k=0;k<MAP_X;k++) { fprintf(fp, "%f",horm_array[matrix_3d_to_1d(k,j,i)]); if(k!=MAP_X-1) fprintf(fp, ","); else fprintf(fp, "\n"); } } } } } void* run_ants(void* data) { double horm0, horm1, horm2, horm3, horm4, horm5, horm6, horm7, horm8, horm9; unsigned int i, j, pos_x, pos_y, pos_z; unsigned int thread_id = *(unsigned int*) data; unsigned int istart = thread_id * TOTAL_ANTS / THREAD_COUNT; unsigned int iend = (thread_id + 1) * TOTAL_ANTS / THREAD_COUNT; double left_horm; if (thread_id == THREAD_COUNT - 1) iend = TOTAL_ANTS; while(true){ // cpu: update horm map if(thread_id == 0) { for(j = 0; j < TOTAL_ANTS; j++) { pos_x = ants[j].get_x(); pos_y = ants[j].get_y(); pos_z = ants[j].get_z(); left_horm = ants[j].get_horm(); if(ants[j].get_state() == 0/*blue*/) blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z)] += left_horm; else if(ants[j].get_state() == 1/*red*/) red_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z)] += left_horm; else if(ants[j].get_state() == -1/*init blue*/) blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z)] += left_horm; } // release barrier for(j = 0; j < THREAD_COUNT - 1; j++){ sem_post(&update_barrier); } } else{ sem_wait(&update_barrier); } // cpu: get & compute horm & decide direction for(int i=istart; i<iend; i++) { pos_x = ants[i].get_x(); pos_y = ants[i].get_y(); pos_z = ants[i].get_z(); if(ants[i].get_state() == 1){ // the ant is in red horm horm0 = blue_horm[matrix_3d_to_1d(pos_x,pos_y+1,pos_z)]; horm1 = blue_horm[matrix_3d_to_1d(pos_x-1,pos_y,pos_z)]; horm2 = blue_horm[matrix_3d_to_1d(pos_x+1,pos_y,pos_z)]; horm3 = blue_horm[matrix_3d_to_1d(pos_x,pos_y-1,pos_z)]; horm4 = blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z+1)]; horm5 = blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z-1)]; } else if (ants[i].get_state() == 0){ // the ant is in blue horm horm0 = red_horm[matrix_3d_to_1d(pos_x,pos_y+1,pos_z)]; horm1 = red_horm[matrix_3d_to_1d(pos_x-1,pos_y,pos_z)]; horm2 = red_horm[matrix_3d_to_1d(pos_x+1,pos_y,pos_z)]; horm3 = red_horm[matrix_3d_to_1d(pos_x,pos_y-1,pos_z)]; horm4 = red_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z+1)]; horm5 = red_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z-1)]; } else if (ants[i].get_state() == -1){ // the ant is in blue horm horm0 = -blue_horm[matrix_3d_to_1d(pos_x,pos_y+1,pos_z)]+red_horm[matrix_3d_to_1d(pos_x,pos_y+1,pos_z)]; horm1 = -blue_horm[matrix_3d_to_1d(pos_x-1,pos_y,pos_z)]+red_horm[matrix_3d_to_1d(pos_x-1,pos_y,pos_z)]; horm2 = -blue_horm[matrix_3d_to_1d(pos_x+1,pos_y,pos_z)]+red_horm[matrix_3d_to_1d(pos_x+1,pos_y,pos_z)]; horm3 = -blue_horm[matrix_3d_to_1d(pos_x,pos_y-1,pos_z)]+red_horm[matrix_3d_to_1d(pos_x,pos_y-1,pos_z)]; horm4 = -blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z+1)]+red_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z+1)]; horm5 = -blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z-1)]+red_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z-1)]; } ants[i].set_sight(horm0, horm1, horm2, horm3, horm4, horm5); ants[i].decide_direction(mmap); } // barrier sem_wait(&mutex1); ++counter1; if (counter1 == THREAD_COUNT) { counter1 = 0; for(j = 0; j < THREAD_COUNT; j++) sem_post(&barrier1); } sem_post(&mutex1); sem_wait(&barrier1); // cpu: decide direction, gpu: decline horm if(thread_id == 0) { hipMemcpy(blue_horm, Md1, tpoints*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(red_horm, Md2, tpoints*sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( horm_update), dim3(total_block), dim3(BLOCK_SIZE), 0, 0, Md1, 0.99); hipLaunchKernelGGL(( horm_update), dim3(total_block), dim3(BLOCK_SIZE), 0, 0, Md2, 0.99); hipMemcpy(red_horm, Md2, tpoints*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(blue_horm, Md1, tpoints*sizeof(double), hipMemcpyDeviceToHost); } // barrier sem_wait(&mutex2); ++counter2; if (counter2 == THREAD_COUNT) { counter2 = 0; if (++tt%100 == 0) { ff = 0; fh = 0; for(j = 0; j < TOTAL_ANTS; j++) { if(ants[j].get_state()==1) ++ff; else if(ants[j].get_state()==0) ++fh; } cout << tt << "\t" << TOTAL_ANTS-ff-fh << " : " << ff << " : " << fh <<endl; if(tt>=MAX_STEP) { remove( "blue.txt" ); layout("blue.txt", blue_horm); remove( "red.txt" ); layout("red.txt", red_horm); exit(0); } } for(j = 0; j < THREAD_COUNT; j++) sem_post(&barrier2); } sem_post(&mutex2); sem_wait(&barrier2); } } void init() { counter1 = 0; counter2 = 0; sem_init(&update_barrier, 0, 0); sem_init(&mutex1, 0, 1); sem_init(&barrier1, 0, 0); sem_init(&mutex2, 0, 1); sem_init(&barrier2, 0, 0); tpoints = MAP_X*MAP_Y*MAP_Z; hipMalloc((void**) &Md1, tpoints*sizeof(double)); hipMalloc((void**) &Md2, tpoints*sizeof(double)); total_block = ((tpoints % BLOCK_SIZE) == 0)? (tpoints/BLOCK_SIZE) : (tpoints/BLOCK_SIZE + 1); mmap.load_sample(MAP_X,MAP_Y,MAP_Z); //set home and food point mmap.edit(HOME_X,HOME_Y,HOME_Z,101); mmap.edit(FOOD_X,FOOD_Y,FOOD_Z,100); //initial ants srand (time(NULL)); int offset = rand()/2; unsigned int set_seed; for(int i=0; i<TOTAL_ANTS; ++i) { ants[i].set_position(HOME_X,HOME_Y,HOME_Z); ants[i].set_home_xyz(HOME_X,HOME_Y,HOME_Z); ants[i].set_horm(MAX_HORM_LEFT); ants[i].set_max_horm(MAX_HORM_LEFT); ants[i].set_state(-1); set_seed = i+offset; ants[i].set_seed(set_seed); ants[i].ini_prefer_direction(); } for(int i=0;i<tpoints;++i) { blue_horm[i] = 0; red_horm[i] = 0; } } int main(int argc, char* argv[]) { init(); unsigned int thread_id[THREAD_COUNT]; unsigned int thread; pthread_t* thread_handles; thread_handles = (pthread_t*) malloc(THREAD_COUNT*sizeof(pthread_t)); for(thread = 0; thread < THREAD_COUNT; thread++){ thread_id[thread] = thread; pthread_create(&thread_handles[thread], NULL, run_ants,(void*) &thread_id[thread]); } char key = 'w'; while(key != 'x') { cin >> key; } return 0; }
4e0ef0744d06b463379b835413169f01d97f166d.cu
#include<iostream> #include<cstdlib> #include<pthread.h> #include<semaphore.h> #include<time.h> #include "ant.h" #include "map3d.h" #define MAX_STEP 10000 #define THREAD_COUNT 4 #define TOTAL_ANTS 60 #define MAX_HORM_LEFT 100 #define MAP_X 50 #define MAP_Y 50 #define MAP_Z 3 #define HOME_X 11 #define HOME_Y 11 #define HOME_Z 1 #define FOOD_X 39 #define FOOD_Y 39 #define FOOD_Z 1 #define BLOCK_SIZE 512 using namespace std; int tt=0, ff=0, fh=0; int counter1,counter2; sem_t update_barrier; sem_t barrier1,barrier2; sem_t mutex1,mutex2; ant ants[TOTAL_ANTS]; map3d mmap; double *Md1, *Md2, blue_horm[MAP_X*MAP_Y*MAP_Z], red_horm[MAP_X*MAP_Y*MAP_Z]; int total_block, tpoints; __global__ void horm_update(double *Md, double decline) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < MAP_X*MAP_Y*MAP_Z) Md[index] = Md[index]*decline; } int matrix_3d_to_1d(int x,int y,int z) { return z * MAP_X * MAP_Y + y * MAP_X + x; } void layout(const char *file_name, double *horm_array) { FILE *fp; fp = fopen(file_name,"a+"); if(fp!=NULL) { fprintf(fp, "\n%d*%d*%d\n",MAP_X, MAP_Y, MAP_Z); for(int i=0;i<MAP_Z;i++) { for(int j=0;j<MAP_Y;j++) { for(int k=0;k<MAP_X;k++) { fprintf(fp, "%f",horm_array[matrix_3d_to_1d(k,j,i)]); if(k!=MAP_X-1) fprintf(fp, ","); else fprintf(fp, "\n"); } } } } } void* run_ants(void* data) { double horm0, horm1, horm2, horm3, horm4, horm5, horm6, horm7, horm8, horm9; unsigned int i, j, pos_x, pos_y, pos_z; unsigned int thread_id = *(unsigned int*) data; unsigned int istart = thread_id * TOTAL_ANTS / THREAD_COUNT; unsigned int iend = (thread_id + 1) * TOTAL_ANTS / THREAD_COUNT; double left_horm; if (thread_id == THREAD_COUNT - 1) iend = TOTAL_ANTS; while(true){ // cpu: update horm map if(thread_id == 0) { for(j = 0; j < TOTAL_ANTS; j++) { pos_x = ants[j].get_x(); pos_y = ants[j].get_y(); pos_z = ants[j].get_z(); left_horm = ants[j].get_horm(); if(ants[j].get_state() == 0/*blue*/) blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z)] += left_horm; else if(ants[j].get_state() == 1/*red*/) red_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z)] += left_horm; else if(ants[j].get_state() == -1/*init blue*/) blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z)] += left_horm; } // release barrier for(j = 0; j < THREAD_COUNT - 1; j++){ sem_post(&update_barrier); } } else{ sem_wait(&update_barrier); } // cpu: get & compute horm & decide direction for(int i=istart; i<iend; i++) { pos_x = ants[i].get_x(); pos_y = ants[i].get_y(); pos_z = ants[i].get_z(); if(ants[i].get_state() == 1){ // the ant is in red horm horm0 = blue_horm[matrix_3d_to_1d(pos_x,pos_y+1,pos_z)]; horm1 = blue_horm[matrix_3d_to_1d(pos_x-1,pos_y,pos_z)]; horm2 = blue_horm[matrix_3d_to_1d(pos_x+1,pos_y,pos_z)]; horm3 = blue_horm[matrix_3d_to_1d(pos_x,pos_y-1,pos_z)]; horm4 = blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z+1)]; horm5 = blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z-1)]; } else if (ants[i].get_state() == 0){ // the ant is in blue horm horm0 = red_horm[matrix_3d_to_1d(pos_x,pos_y+1,pos_z)]; horm1 = red_horm[matrix_3d_to_1d(pos_x-1,pos_y,pos_z)]; horm2 = red_horm[matrix_3d_to_1d(pos_x+1,pos_y,pos_z)]; horm3 = red_horm[matrix_3d_to_1d(pos_x,pos_y-1,pos_z)]; horm4 = red_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z+1)]; horm5 = red_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z-1)]; } else if (ants[i].get_state() == -1){ // the ant is in blue horm horm0 = -blue_horm[matrix_3d_to_1d(pos_x,pos_y+1,pos_z)]+red_horm[matrix_3d_to_1d(pos_x,pos_y+1,pos_z)]; horm1 = -blue_horm[matrix_3d_to_1d(pos_x-1,pos_y,pos_z)]+red_horm[matrix_3d_to_1d(pos_x-1,pos_y,pos_z)]; horm2 = -blue_horm[matrix_3d_to_1d(pos_x+1,pos_y,pos_z)]+red_horm[matrix_3d_to_1d(pos_x+1,pos_y,pos_z)]; horm3 = -blue_horm[matrix_3d_to_1d(pos_x,pos_y-1,pos_z)]+red_horm[matrix_3d_to_1d(pos_x,pos_y-1,pos_z)]; horm4 = -blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z+1)]+red_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z+1)]; horm5 = -blue_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z-1)]+red_horm[matrix_3d_to_1d(pos_x,pos_y,pos_z-1)]; } ants[i].set_sight(horm0, horm1, horm2, horm3, horm4, horm5); ants[i].decide_direction(mmap); } // barrier sem_wait(&mutex1); ++counter1; if (counter1 == THREAD_COUNT) { counter1 = 0; for(j = 0; j < THREAD_COUNT; j++) sem_post(&barrier1); } sem_post(&mutex1); sem_wait(&barrier1); // cpu: decide direction, gpu: decline horm if(thread_id == 0) { cudaMemcpy(blue_horm, Md1, tpoints*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(red_horm, Md2, tpoints*sizeof(double), cudaMemcpyHostToDevice); horm_update<<<total_block, BLOCK_SIZE>>>(Md1, 0.99); horm_update<<<total_block, BLOCK_SIZE>>>(Md2, 0.99); cudaMemcpy(red_horm, Md2, tpoints*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(blue_horm, Md1, tpoints*sizeof(double), cudaMemcpyDeviceToHost); } // barrier sem_wait(&mutex2); ++counter2; if (counter2 == THREAD_COUNT) { counter2 = 0; if (++tt%100 == 0) { ff = 0; fh = 0; for(j = 0; j < TOTAL_ANTS; j++) { if(ants[j].get_state()==1) ++ff; else if(ants[j].get_state()==0) ++fh; } cout << tt << "\t" << TOTAL_ANTS-ff-fh << " : " << ff << " : " << fh <<endl; if(tt>=MAX_STEP) { remove( "blue.txt" ); layout("blue.txt", blue_horm); remove( "red.txt" ); layout("red.txt", red_horm); exit(0); } } for(j = 0; j < THREAD_COUNT; j++) sem_post(&barrier2); } sem_post(&mutex2); sem_wait(&barrier2); } } void init() { counter1 = 0; counter2 = 0; sem_init(&update_barrier, 0, 0); sem_init(&mutex1, 0, 1); sem_init(&barrier1, 0, 0); sem_init(&mutex2, 0, 1); sem_init(&barrier2, 0, 0); tpoints = MAP_X*MAP_Y*MAP_Z; cudaMalloc((void**) &Md1, tpoints*sizeof(double)); cudaMalloc((void**) &Md2, tpoints*sizeof(double)); total_block = ((tpoints % BLOCK_SIZE) == 0)? (tpoints/BLOCK_SIZE) : (tpoints/BLOCK_SIZE + 1); mmap.load_sample(MAP_X,MAP_Y,MAP_Z); //set home and food point mmap.edit(HOME_X,HOME_Y,HOME_Z,101); mmap.edit(FOOD_X,FOOD_Y,FOOD_Z,100); //initial ants srand (time(NULL)); int offset = rand()/2; unsigned int set_seed; for(int i=0; i<TOTAL_ANTS; ++i) { ants[i].set_position(HOME_X,HOME_Y,HOME_Z); ants[i].set_home_xyz(HOME_X,HOME_Y,HOME_Z); ants[i].set_horm(MAX_HORM_LEFT); ants[i].set_max_horm(MAX_HORM_LEFT); ants[i].set_state(-1); set_seed = i+offset; ants[i].set_seed(set_seed); ants[i].ini_prefer_direction(); } for(int i=0;i<tpoints;++i) { blue_horm[i] = 0; red_horm[i] = 0; } } int main(int argc, char* argv[]) { init(); unsigned int thread_id[THREAD_COUNT]; unsigned int thread; pthread_t* thread_handles; thread_handles = (pthread_t*) malloc(THREAD_COUNT*sizeof(pthread_t)); for(thread = 0; thread < THREAD_COUNT; thread++){ thread_id[thread] = thread; pthread_create(&thread_handles[thread], NULL, run_ants,(void*) &thread_id[thread]); } char key = 'w'; while(key != 'x') { cin >> key; } return 0; }
26c672356084837de628b9b802e66f59539dcf22.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <rocblas.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include "kblas.h" #include "kblas_struct.h" #include "kblas_gpu_util.ch" #include "batch_rand.h" //------------------------------------------------------------------------------ // Random state structure and routines struct KBlasRandState { hiprandState_t* states; size_t num_states; }; __global__ void init_random_states(unsigned int seed, hiprandState_t* states, size_t num_states) { int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id > num_states) return; hiprand_init(seed, thread_id, 0, &states[thread_id]); } int kblasInitRandState(kblasHandle_t handle, kblasRandState_t* state, int num_states, unsigned int seed) { *state = new KBlasRandState(); (*state)->num_states = num_states; check_error_ret( hipMalloc((void**)&((*state)->states), (*state)->num_states * sizeof(hiprandState_t)), KBLAS_Error_Allocation ); int block_threads = 256; int blocks = iDivUp(num_states, block_threads); dim3 dimBlock(block_threads, 1); dim3 dimGrid(blocks, 1, 1); hipLaunchKernelGGL(( init_random_states), dim3(dimGrid), dim3(dimBlock), 0, handle->stream , seed, (*state)->states, (*state)->num_states); check_error_ret( hipGetLastError(), KBLAS_UnknownError ); return KBLAS_Success; } int kblasDestroyRandState(kblasRandState_t state) { if(state && state->states) { check_error_ret( hipFree(state->states), KBLAS_Error_Deallocation ); delete state; } return KBLAS_Success; } //------------------------------------------------------------------------------ template<class T> __device__ __forceinline__ T tcurand_normal(hiprandState_t* state); template<> __device__ __forceinline__ float tcurand_normal<float>(hiprandState_t* state) { return hiprand_normal(state); } template<> __device__ __forceinline__ double tcurand_normal<double>(hiprandState_t* state) { return hiprand_normal_double(state); } template<class T, class T_ptr> __global__ void batch_rand_kernel2( int rows, int cols, T_ptr A_batch, int lda, int stride_a, hiprandState_t* states, int num_ops, int padded_rows, int elements_per_thread ) { int thread_index = blockIdx.x * blockDim.x + threadIdx.x; // Cache the state in local memory hiprandState_t state = states[thread_index]; int curr_op = -1; T* A_block = NULL; for(int e = 0; e < elements_per_thread; e++) { int global_linear_index = e * blockDim.x * gridDim.x + thread_index; int op_index = global_linear_index / (padded_rows * cols); if(op_index >= num_ops) break; if(op_index != curr_op) { curr_op = op_index; A_block = getOperationPtr<T>(A_batch, op_index, stride_a); } int matrix_linear_index = global_linear_index % (padded_rows * cols); int row_index = matrix_linear_index % padded_rows; int col_index = matrix_linear_index / padded_rows; if(row_index < rows && col_index < cols) A_block[row_index + col_index * lda] = tcurand_normal<T>(&state); } // Flush back to global memory states[thread_index] = state; } template<class T, class T_ptr> __global__ void batch_rand_kernel( int rows, int cols, T_ptr A_batch, int lda, int stride_a, hiprandState_t* states, int num_ops, int thread_block_rows ) { int row_start_index = blockIdx.x * blockDim.x + threadIdx.x; int state_index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; int row_increment = blockDim.x * gridDim.x; // Cache the state in local memory hiprandState_t state = states[state_index]; for(int op = blockIdx.y; op < num_ops; op += gridDim.y) { T* A_block = getOperationPtr<T>(A_batch, op, stride_a); for(int j = 0; j < cols; j++) { for(int b = 0; b < thread_block_rows ; b++) { int row_index = row_start_index + b * row_increment; if(row_index < rows) A_block[row_index + j * lda] = tcurand_normal<T>(&state); } } } // Flush back to global memory states[state_index] = state; } template<class T, class T_ptr> int batch_rand_template2(kblasHandle_t handle, int rows, int cols, T_ptr A_batch, int lda, int stride_a, kblasRandState_t state, int num_ops) { int block_x = 128; int grid_x = state->num_states / block_x; int padded_rows = iDivUp(rows, block_x) * block_x; int total_entries = padded_rows * cols * num_ops; int elements_per_thread = iDivUp(total_entries, block_x * grid_x); dim3 dimBlock(block_x, 1, 1); dim3 dimGrid(grid_x, 1, 1); hipLaunchKernelGGL(( batch_rand_kernel<T, T_ptr>), dim3(dimGrid), dim3(dimBlock), 0, handle->stream , rows, cols, A_batch, lda, stride_a, state->states, num_ops, padded_rows, elements_per_thread ); return KBLAS_Success; } template<class T, class T_ptr> int batch_rand_template(kblasHandle_t handle, int rows, int cols, T_ptr A_batch, int lda, int stride_a, kblasRandState_t state, int num_ops) { int block_x = 64; int grouped_states = state->num_states / block_x; int block_rows = iDivUp(rows, block_x); int grid_x = kmin(grouped_states, block_rows); int grid_y = grouped_states / grid_x; int thread_block_rows = iDivUp(block_rows, grid_x); dim3 dimBlock(block_x, 1, 1); dim3 dimGrid(grid_x, grid_y, 1); hipLaunchKernelGGL(( batch_rand_kernel<T, T_ptr>), dim3(dimGrid), dim3(dimBlock), 0, handle->stream , rows, cols, A_batch, lda, stride_a, state->states, num_ops, thread_block_rows ); return KBLAS_Success; } //------------------------------------------------------------------------------ // Array of pointers interface int kblasDrand_batch(kblasHandle_t handle, int m, int n, double** A_ptrs, int lda, kblasRandState_t state, int num_ops) { return batch_rand_template<double, double**>(handle, m, n, A_ptrs, lda, 0, state, num_ops); } int kblasSrand_batch(kblasHandle_t handle, int m, int n, float** A_ptrs, int lda, kblasRandState_t state, int num_ops) { return batch_rand_template<float, float**>(handle, m, n, A_ptrs, lda, 0, state, num_ops); } //------------------------------------------------------------------------------ // Strided interface int kblasDrand_batch_strided(kblasHandle_t handle, int m, int n, double* A_strided, int lda, int stride_a, kblasRandState_t state, int num_ops) { return batch_rand_template<double, double*>(handle, m, n, A_strided, lda, stride_a, state, num_ops); } int kblasSrand_batch_strided(kblasHandle_t handle, int m, int n, float* A_strided, int lda, int stride_a, kblasRandState_t state, int num_ops) { return batch_rand_template<float, float*>(handle, m, n, A_strided, lda, stride_a, state, num_ops); }
26c672356084837de628b9b802e66f59539dcf22.cu
#include <cublas_v2.h> #include <curand.h> #include <curand_kernel.h> #include <stdio.h> #include "kblas.h" #include "kblas_struct.h" #include "kblas_gpu_util.ch" #include "batch_rand.h" //------------------------------------------------------------------------------ // Random state structure and routines struct KBlasRandState { curandState_t* states; size_t num_states; }; __global__ void init_random_states(unsigned int seed, curandState_t* states, size_t num_states) { int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id > num_states) return; curand_init(seed, thread_id, 0, &states[thread_id]); } int kblasInitRandState(kblasHandle_t handle, kblasRandState_t* state, int num_states, unsigned int seed) { *state = new KBlasRandState(); (*state)->num_states = num_states; check_error_ret( cudaMalloc((void**)&((*state)->states), (*state)->num_states * sizeof(curandState_t)), KBLAS_Error_Allocation ); int block_threads = 256; int blocks = iDivUp(num_states, block_threads); dim3 dimBlock(block_threads, 1); dim3 dimGrid(blocks, 1, 1); init_random_states<<< dimGrid, dimBlock, 0, handle->stream >>>(seed, (*state)->states, (*state)->num_states); check_error_ret( cudaGetLastError(), KBLAS_UnknownError ); return KBLAS_Success; } int kblasDestroyRandState(kblasRandState_t state) { if(state && state->states) { check_error_ret( cudaFree(state->states), KBLAS_Error_Deallocation ); delete state; } return KBLAS_Success; } //------------------------------------------------------------------------------ template<class T> __device__ __forceinline__ T tcurand_normal(curandState* state); template<> __device__ __forceinline__ float tcurand_normal<float>(curandState* state) { return curand_normal(state); } template<> __device__ __forceinline__ double tcurand_normal<double>(curandState* state) { return curand_normal_double(state); } template<class T, class T_ptr> __global__ void batch_rand_kernel2( int rows, int cols, T_ptr A_batch, int lda, int stride_a, curandState_t* states, int num_ops, int padded_rows, int elements_per_thread ) { int thread_index = blockIdx.x * blockDim.x + threadIdx.x; // Cache the state in local memory curandState state = states[thread_index]; int curr_op = -1; T* A_block = NULL; for(int e = 0; e < elements_per_thread; e++) { int global_linear_index = e * blockDim.x * gridDim.x + thread_index; int op_index = global_linear_index / (padded_rows * cols); if(op_index >= num_ops) break; if(op_index != curr_op) { curr_op = op_index; A_block = getOperationPtr<T>(A_batch, op_index, stride_a); } int matrix_linear_index = global_linear_index % (padded_rows * cols); int row_index = matrix_linear_index % padded_rows; int col_index = matrix_linear_index / padded_rows; if(row_index < rows && col_index < cols) A_block[row_index + col_index * lda] = tcurand_normal<T>(&state); } // Flush back to global memory states[thread_index] = state; } template<class T, class T_ptr> __global__ void batch_rand_kernel( int rows, int cols, T_ptr A_batch, int lda, int stride_a, curandState_t* states, int num_ops, int thread_block_rows ) { int row_start_index = blockIdx.x * blockDim.x + threadIdx.x; int state_index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; int row_increment = blockDim.x * gridDim.x; // Cache the state in local memory curandState state = states[state_index]; for(int op = blockIdx.y; op < num_ops; op += gridDim.y) { T* A_block = getOperationPtr<T>(A_batch, op, stride_a); for(int j = 0; j < cols; j++) { for(int b = 0; b < thread_block_rows ; b++) { int row_index = row_start_index + b * row_increment; if(row_index < rows) A_block[row_index + j * lda] = tcurand_normal<T>(&state); } } } // Flush back to global memory states[state_index] = state; } template<class T, class T_ptr> int batch_rand_template2(kblasHandle_t handle, int rows, int cols, T_ptr A_batch, int lda, int stride_a, kblasRandState_t state, int num_ops) { int block_x = 128; int grid_x = state->num_states / block_x; int padded_rows = iDivUp(rows, block_x) * block_x; int total_entries = padded_rows * cols * num_ops; int elements_per_thread = iDivUp(total_entries, block_x * grid_x); dim3 dimBlock(block_x, 1, 1); dim3 dimGrid(grid_x, 1, 1); batch_rand_kernel<T, T_ptr><<< dimGrid, dimBlock, 0, handle->stream >>>( rows, cols, A_batch, lda, stride_a, state->states, num_ops, padded_rows, elements_per_thread ); return KBLAS_Success; } template<class T, class T_ptr> int batch_rand_template(kblasHandle_t handle, int rows, int cols, T_ptr A_batch, int lda, int stride_a, kblasRandState_t state, int num_ops) { int block_x = 64; int grouped_states = state->num_states / block_x; int block_rows = iDivUp(rows, block_x); int grid_x = kmin(grouped_states, block_rows); int grid_y = grouped_states / grid_x; int thread_block_rows = iDivUp(block_rows, grid_x); dim3 dimBlock(block_x, 1, 1); dim3 dimGrid(grid_x, grid_y, 1); batch_rand_kernel<T, T_ptr><<< dimGrid, dimBlock, 0, handle->stream >>>( rows, cols, A_batch, lda, stride_a, state->states, num_ops, thread_block_rows ); return KBLAS_Success; } //------------------------------------------------------------------------------ // Array of pointers interface int kblasDrand_batch(kblasHandle_t handle, int m, int n, double** A_ptrs, int lda, kblasRandState_t state, int num_ops) { return batch_rand_template<double, double**>(handle, m, n, A_ptrs, lda, 0, state, num_ops); } int kblasSrand_batch(kblasHandle_t handle, int m, int n, float** A_ptrs, int lda, kblasRandState_t state, int num_ops) { return batch_rand_template<float, float**>(handle, m, n, A_ptrs, lda, 0, state, num_ops); } //------------------------------------------------------------------------------ // Strided interface int kblasDrand_batch_strided(kblasHandle_t handle, int m, int n, double* A_strided, int lda, int stride_a, kblasRandState_t state, int num_ops) { return batch_rand_template<double, double*>(handle, m, n, A_strided, lda, stride_a, state, num_ops); } int kblasSrand_batch_strided(kblasHandle_t handle, int m, int n, float* A_strided, int lda, int stride_a, kblasRandState_t state, int num_ops) { return batch_rand_template<float, float*>(handle, m, n, A_strided, lda, stride_a, state, num_ops); }
87c0c0196e7d6e41e19cc920da3ac664c9628c15.hip
// !!! This is a file automatically generated by hipify!!! // Modified from // https://github.com/NVlabs/stylegan3/blob/main/torch_utils/ops/bias_act.cpp // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. #include <c10/util/Half.h> #include <hip/hip_runtime.h> #include <torch/types.h> #include "pytorch_cuda_helper.hpp" struct bias_act_kernel_params { const void *x; // [sizeX] const void *b; // [sizeB] or NULL const void *xref; // [sizeX] or NULL const void *yref; // [sizeX] or NULL const void *dy; // [sizeX] or NULL void *y; // [sizeX] int grad; int act; float alpha; float gain; float clamp; int sizeX; int sizeB; int stepB; int loopX; }; // CUDA kernel selection. template <class T> void *choose_bias_act_kernel(const bias_act_kernel_params &p); //------------------------------------------------------------------------ // Helpers. template <class T> struct InternalType; template <> struct InternalType<double> { typedef double scalar_t; }; template <> struct InternalType<float> { typedef float scalar_t; }; template <> struct InternalType<c10::Half> { typedef float scalar_t; }; //------------------------------------------------------------------------ // CUDA kernel. template <class T, int A> __global__ void bias_act_kernel(bias_act_kernel_params p) { typedef typename InternalType<T>::scalar_t scalar_t; int G = p.grad; scalar_t alpha = (scalar_t)p.alpha; scalar_t gain = (scalar_t)p.gain; scalar_t clamp = (scalar_t)p.clamp; scalar_t one = (scalar_t)1; scalar_t two = (scalar_t)2; scalar_t expRange = (scalar_t)80; scalar_t halfExpRange = (scalar_t)40; scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946; scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717; // Loop over elements. int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x; for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x) { // Load. scalar_t x = (scalar_t)((const T *)p.x)[xi]; scalar_t b = (p.b) ? (scalar_t)((const T *)p.b)[(xi / p.stepB) % p.sizeB] : 0; scalar_t xref = (p.xref) ? (scalar_t)((const T *)p.xref)[xi] : 0; scalar_t yref = (p.yref) ? (scalar_t)((const T *)p.yref)[xi] : 0; scalar_t dy = (p.dy) ? (scalar_t)((const T *)p.dy)[xi] : one; scalar_t yy = (gain != 0) ? yref / gain : 0; scalar_t y = 0; // Apply bias. ((G == 0) ? x : xref) += b; // linear if (A == 1) { if (G == 0) y = x; if (G == 1) y = x; } // relu if (A == 2) { if (G == 0) y = (x > 0) ? x : 0; if (G == 1) y = (yy > 0) ? x : 0; } // lrelu if (A == 3) { if (G == 0) y = (x > 0) ? x : x * alpha; if (G == 1) y = (yy > 0) ? x : x * alpha; } // tanh if (A == 4) { if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); } if (G == 1) y = x * (one - yy * yy); if (G == 2) y = x * (one - yy * yy) * (-two * yy); } // sigmoid if (A == 5) { if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one); if (G == 1) y = x * yy * (one - yy); if (G == 2) y = x * yy * (one - yy) * (one - two * yy); } // elu if (A == 6) { if (G == 0) y = (x >= 0) ? x : exp(x) - one; if (G == 1) y = (yy >= 0) ? x : x * (yy + one); if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one); } // selu if (A == 7) { if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one); if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha); if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha); } // softplus if (A == 8) { if (G == 0) y = (x > expRange) ? x : log(exp(x) + one); if (G == 1) y = x * (one - exp(-yy)); if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); } } // swish if (A == 9) { if (G == 0) y = (x < -expRange) ? 0 : x / (exp(-x) + one); else { scalar_t c = exp(xref); scalar_t d = c + one; if (G == 1) y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d); else y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d); yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain; } } // Apply gain. y *= gain * dy; // Clamp. if (clamp >= 0) { if (G == 0) y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp; else y = (yref > -clamp & yref < clamp) ? y : 0; } // Store. ((T *)p.y)[xi] = (T)y; } } //------------------------------------------------------------------------ // CUDA kernel selection. template <class T> void *choose_bias_act_kernel(const bias_act_kernel_params &p) { if (p.act == 1) return (void *)bias_act_kernel<T, 1>; if (p.act == 2) return (void *)bias_act_kernel<T, 2>; if (p.act == 3) return (void *)bias_act_kernel<T, 3>; if (p.act == 4) return (void *)bias_act_kernel<T, 4>; if (p.act == 5) return (void *)bias_act_kernel<T, 5>; if (p.act == 6) return (void *)bias_act_kernel<T, 6>; if (p.act == 7) return (void *)bias_act_kernel<T, 7>; if (p.act == 8) return (void *)bias_act_kernel<T, 8>; if (p.act == 9) return (void *)bias_act_kernel<T, 9>; return NULL; } //------------------------------------------------------------------------ static bool has_same_layout(torch::Tensor x, torch::Tensor y) { if (x.dim() != y.dim()) return false; for (int64_t i = 0; i < x.dim(); i++) { if (x.size(i) != y.size(i)) return false; if (x.size(i) >= 2 && x.stride(i) != y.stride(i)) return false; } return true; } //------------------------------------------------------------------------ torch::Tensor bias_act_op(const torch::Tensor &x, const torch::Tensor &b, const torch::Tensor &xref, const torch::Tensor &yref, const torch::Tensor &dy, int grad, int dim, int act, float alpha, float gain, float clamp) { // Validate arguments. TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); TORCH_CHECK( b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x"); TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x"); TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x"); TORCH_CHECK( dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x"); TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); TORCH_CHECK(b.dim() == 1, "b must have rank 1"); TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds"); TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements"); TORCH_CHECK(grad >= 0, "grad must be non-negative"); // Validate layout. TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense"); TORCH_CHECK(b.is_contiguous(), "b must be contiguous"); TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x"); TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x"); TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x"); // Create output tensor. const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(x)); torch::Tensor y = torch::empty_like(x); TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x"); // Initialize CUDA kernel parameters. bias_act_kernel_params p; p.x = x.data_ptr(); p.b = (b.numel()) ? b.data_ptr() : NULL; p.xref = (xref.numel()) ? xref.data_ptr() : NULL; p.yref = (yref.numel()) ? yref.data_ptr() : NULL; p.dy = (dy.numel()) ? dy.data_ptr() : NULL; p.y = y.data_ptr(); p.grad = grad; p.act = act; p.alpha = alpha; p.gain = gain; p.clamp = clamp; p.sizeX = (int)x.numel(); p.sizeB = (int)b.numel(); p.stepB = (b.numel()) ? (int)x.stride(dim) : 1; // Choose CUDA kernel. void *kernel; AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { kernel = choose_bias_act_kernel<scalar_t>(p); }); TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func"); // Launch CUDA kernel. p.loopX = 4; int blockSize = 4 * 32; int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1; void *args[] = {&p}; #ifdef MMCV_WITH_HIP AT_CUDA_CHECK(hipLaunchKernel(kernel, gridSize, blockSize, args, 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA())); #else AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA())); #endif return y; }
87c0c0196e7d6e41e19cc920da3ac664c9628c15.cu
// Modified from // https://github.com/NVlabs/stylegan3/blob/main/torch_utils/ops/bias_act.cpp // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. #include <c10/util/Half.h> #include <cuda_runtime.h> #include <torch/types.h> #include "pytorch_cuda_helper.hpp" struct bias_act_kernel_params { const void *x; // [sizeX] const void *b; // [sizeB] or NULL const void *xref; // [sizeX] or NULL const void *yref; // [sizeX] or NULL const void *dy; // [sizeX] or NULL void *y; // [sizeX] int grad; int act; float alpha; float gain; float clamp; int sizeX; int sizeB; int stepB; int loopX; }; // CUDA kernel selection. template <class T> void *choose_bias_act_kernel(const bias_act_kernel_params &p); //------------------------------------------------------------------------ // Helpers. template <class T> struct InternalType; template <> struct InternalType<double> { typedef double scalar_t; }; template <> struct InternalType<float> { typedef float scalar_t; }; template <> struct InternalType<c10::Half> { typedef float scalar_t; }; //------------------------------------------------------------------------ // CUDA kernel. template <class T, int A> __global__ void bias_act_kernel(bias_act_kernel_params p) { typedef typename InternalType<T>::scalar_t scalar_t; int G = p.grad; scalar_t alpha = (scalar_t)p.alpha; scalar_t gain = (scalar_t)p.gain; scalar_t clamp = (scalar_t)p.clamp; scalar_t one = (scalar_t)1; scalar_t two = (scalar_t)2; scalar_t expRange = (scalar_t)80; scalar_t halfExpRange = (scalar_t)40; scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946; scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717; // Loop over elements. int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x; for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x) { // Load. scalar_t x = (scalar_t)((const T *)p.x)[xi]; scalar_t b = (p.b) ? (scalar_t)((const T *)p.b)[(xi / p.stepB) % p.sizeB] : 0; scalar_t xref = (p.xref) ? (scalar_t)((const T *)p.xref)[xi] : 0; scalar_t yref = (p.yref) ? (scalar_t)((const T *)p.yref)[xi] : 0; scalar_t dy = (p.dy) ? (scalar_t)((const T *)p.dy)[xi] : one; scalar_t yy = (gain != 0) ? yref / gain : 0; scalar_t y = 0; // Apply bias. ((G == 0) ? x : xref) += b; // linear if (A == 1) { if (G == 0) y = x; if (G == 1) y = x; } // relu if (A == 2) { if (G == 0) y = (x > 0) ? x : 0; if (G == 1) y = (yy > 0) ? x : 0; } // lrelu if (A == 3) { if (G == 0) y = (x > 0) ? x : x * alpha; if (G == 1) y = (yy > 0) ? x : x * alpha; } // tanh if (A == 4) { if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); } if (G == 1) y = x * (one - yy * yy); if (G == 2) y = x * (one - yy * yy) * (-two * yy); } // sigmoid if (A == 5) { if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one); if (G == 1) y = x * yy * (one - yy); if (G == 2) y = x * yy * (one - yy) * (one - two * yy); } // elu if (A == 6) { if (G == 0) y = (x >= 0) ? x : exp(x) - one; if (G == 1) y = (yy >= 0) ? x : x * (yy + one); if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one); } // selu if (A == 7) { if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one); if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha); if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha); } // softplus if (A == 8) { if (G == 0) y = (x > expRange) ? x : log(exp(x) + one); if (G == 1) y = x * (one - exp(-yy)); if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); } } // swish if (A == 9) { if (G == 0) y = (x < -expRange) ? 0 : x / (exp(-x) + one); else { scalar_t c = exp(xref); scalar_t d = c + one; if (G == 1) y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d); else y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d); yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain; } } // Apply gain. y *= gain * dy; // Clamp. if (clamp >= 0) { if (G == 0) y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp; else y = (yref > -clamp & yref < clamp) ? y : 0; } // Store. ((T *)p.y)[xi] = (T)y; } } //------------------------------------------------------------------------ // CUDA kernel selection. template <class T> void *choose_bias_act_kernel(const bias_act_kernel_params &p) { if (p.act == 1) return (void *)bias_act_kernel<T, 1>; if (p.act == 2) return (void *)bias_act_kernel<T, 2>; if (p.act == 3) return (void *)bias_act_kernel<T, 3>; if (p.act == 4) return (void *)bias_act_kernel<T, 4>; if (p.act == 5) return (void *)bias_act_kernel<T, 5>; if (p.act == 6) return (void *)bias_act_kernel<T, 6>; if (p.act == 7) return (void *)bias_act_kernel<T, 7>; if (p.act == 8) return (void *)bias_act_kernel<T, 8>; if (p.act == 9) return (void *)bias_act_kernel<T, 9>; return NULL; } //------------------------------------------------------------------------ static bool has_same_layout(torch::Tensor x, torch::Tensor y) { if (x.dim() != y.dim()) return false; for (int64_t i = 0; i < x.dim(); i++) { if (x.size(i) != y.size(i)) return false; if (x.size(i) >= 2 && x.stride(i) != y.stride(i)) return false; } return true; } //------------------------------------------------------------------------ torch::Tensor bias_act_op(const torch::Tensor &x, const torch::Tensor &b, const torch::Tensor &xref, const torch::Tensor &yref, const torch::Tensor &dy, int grad, int dim, int act, float alpha, float gain, float clamp) { // Validate arguments. TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); TORCH_CHECK( b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x"); TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x"); TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x"); TORCH_CHECK( dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x"); TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); TORCH_CHECK(b.dim() == 1, "b must have rank 1"); TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds"); TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements"); TORCH_CHECK(grad >= 0, "grad must be non-negative"); // Validate layout. TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense"); TORCH_CHECK(b.is_contiguous(), "b must be contiguous"); TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x"); TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x"); TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x"); // Create output tensor. const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); torch::Tensor y = torch::empty_like(x); TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x"); // Initialize CUDA kernel parameters. bias_act_kernel_params p; p.x = x.data_ptr(); p.b = (b.numel()) ? b.data_ptr() : NULL; p.xref = (xref.numel()) ? xref.data_ptr() : NULL; p.yref = (yref.numel()) ? yref.data_ptr() : NULL; p.dy = (dy.numel()) ? dy.data_ptr() : NULL; p.y = y.data_ptr(); p.grad = grad; p.act = act; p.alpha = alpha; p.gain = gain; p.clamp = clamp; p.sizeX = (int)x.numel(); p.sizeB = (int)b.numel(); p.stepB = (b.numel()) ? (int)x.stride(dim) : 1; // Choose CUDA kernel. void *kernel; AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { kernel = choose_bias_act_kernel<scalar_t>(p); }); TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func"); // Launch CUDA kernel. p.loopX = 4; int blockSize = 4 * 32; int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1; void *args[] = {&p}; #ifdef MMCV_WITH_HIP AT_CUDA_CHECK(hipLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); #else AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); #endif return y; }
c7acc836c61d2add223f3a91853ef3bc4093a854.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include <thrust/transform_reduce.h> #include "tensors/tensor_operators.h" #include "functional/functional.h" #include "functional/tensor.h" #include "tensors/gpu/backend.h" #include "tensors/gpu/cuda_helpers.h" #include "3rd_party/reduce_all.h" namespace marian { namespace gpu { struct isnan_test { __host__ __device__ bool operator()(const float a) const { return isnan(a); } }; __device__ inline float stableLogit(float x) { if(x >= 0) { float z = expf(-x); return 1.0 / (1.0 + z); } else { float z = expf(x); return z / (1.0 + z); } } bool IsNan(Tensor in) { // hipSetDevice(in->getDevice().no); // thrust::device_ptr<float> begin = thrust::device_pointer_cast(in->data()); // thrust::device_ptr<float> end // = thrust::device_pointer_cast(in->data() + in->size()); // return thrust::transform_reduce( // begin, end, isnan_test(), 0, thrust::plus<bool>()); return false; } void ConcatCont(Tensor out, const std::vector<Tensor>& inputs, int axis) { hipSetDevice(out->getDevice().no); int step = 1; for(int i = 0; i < axis; ++i) step *= out->shape()[i]; size_t offset1 = 0; for(int i = 0; i < step; ++i) { for(auto in : inputs) { size_t size = in->shape().elements() / step; size_t offset2 = i * size; hipMemcpyAsync(out->data() + offset1, in->data() + offset2, size * sizeof(float), hipMemcpyDeviceToDevice); offset1 += size; } } hipStreamSynchronize(0); } __global__ void gInsertCols(float* out, const float* in, size_t rows, size_t cols, size_t cols_out, size_t cols_in, size_t offset_out, size_t offset_in) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOut = out + j * cols_out + offset_out; const float* rowIn = in + j * cols_in + offset_in; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) rowOut[i] = rowIn[i]; } } } } void Concatenate1(Tensor out, const std::vector<Tensor>& inputs) { hipSetDevice(out->getDevice().no); int rows = out->shape().elements() / out->shape().back(); size_t offset = 0; int cols_out = out->shape().back(); for(auto in : inputs) { ABORT_IF(rows != in->shape().elements() / in->shape().back(), "First dimension must be equal"); int cols_in = in->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols_in); hipLaunchKernelGGL(( gInsertCols), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), rows, cols_in, cols_out, cols_in, offset, 0); offset += cols_in; } hipStreamSynchronize(0); } void Concatenate(Tensor out, const std::vector<Tensor>& inputs, int ax) { if(ax == out->shape().size() - 1) Concatenate1(out, inputs); else ConcatCont(out, inputs, ax); } void Split1(std::vector<Tensor>& outputs, const Tensor in) { hipSetDevice(in->getDevice().no); size_t offset = 0; int rows = in->shape().elements() / in->shape().back(); int cols_in = in->shape().back(); for(auto out : outputs) { ABORT_IF(rows != out->shape().elements() / out->shape().back(), "First dimension must be equal"); int cols_out = out->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols_out); hipLaunchKernelGGL(( gInsertCols), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), rows, cols_out, cols_out, cols_in, 0, offset); offset += cols_out; } hipStreamSynchronize(0); } void SplitCont(std::vector<Tensor>& outputs, const Tensor in, int axis) { hipSetDevice(in->getDevice().no); int step = 1; for(int i = 0; i < axis; ++i) step *= in->shape()[i]; size_t offset1 = 0; for(int i = 0; i < step; ++i) { for(auto out : outputs) { size_t size = out->shape().elements() / step; size_t offset2 = i * size; hipMemcpyAsync(out->data() + offset2, in->data() + offset1, size * sizeof(float), hipMemcpyDeviceToDevice); offset1 += size; } } hipStreamSynchronize(0); } void Deconcatenate(std::vector<Tensor>& outputs, const Tensor in, int ax) { if(ax == in->shape().size() - 1) Split1(outputs, in); else SplitCont(outputs, in, ax); } __global__ void gTransposeND( functional::Tensor<float> out, const functional::Tensor<float> in, const functional::Array<int, functional::Shape::size()> permute) { constexpr size_t N = functional::Shape::size(); functional::Array<int, N> oDims; functional::Array<int, N> pDims; int length = out.shape().elements(); for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { out.shape().dims(index, oDims); for(int i = 0; i < N; ++i) pDims[permute[i]] = oDims[i]; out[index] = in[pDims]; } } } void TransposeND(Tensor out, Tensor in, const std::vector<int>& vAxis) { hipSetDevice(out->getDevice().no); functional::Array<int, functional::Shape::size()> axes; int diff = functional::Shape::size() - vAxis.size(); for(int i = 0; i < axes.size(); ++i) if(i < diff) axes[i] = i; else axes[i] = vAxis[i - diff] + diff; int length = out->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); hipLaunchKernelGGL(( gTransposeND), dim3(blocks), dim3(threads), 0, 0, out, in, axes); } __global__ void gSoftmax(float* out, functional::Shape outShape, const float* in, const float* mask, const functional::Shape maskShape) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); bool broadcast = outShape != maskShape; functional::Array<int, functional::Shape::size()> dims; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = -CUDA_FLT_MAX; // mask for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float mVal = 1.f; if(mask) { int mIndex = id + j * cols; if(broadcast) { outShape.dims(mIndex, dims); mIndex = maskShape.bindex(dims); } mVal = mask[mIndex]; } if(mVal && sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float mVal = 1.f; if(mask) { int mIndex = id + j * cols; if(broadcast) { outShape.dims(mIndex, dims); mIndex = maskShape.bindex(dims); } mVal = mask[mIndex]; } float ex = 0; if(mVal) ex = __expf(sp[id] - max); so[id] = ex; _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { so[id] = so[id] / _sum[0]; } } } } } void Softmax(Tensor out, Tensor in, Tensor mask) { hipSetDevice(out->getDevice().no); size_t m = out->shape().elements() / out->shape().back(); size_t k = out->shape().back(); int blocks = ::min(MAX_BLOCKS, (int)m); int threads = ::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; if(mask) hipLaunchKernelGGL(( gSoftmax), dim3(blocks), dim3(threads), shared, 0, out->data(), out->shape(), in->data(), mask->data(), mask->shape()); else hipLaunchKernelGGL(( gSoftmax), dim3(blocks), dim3(threads), shared, 0, out->data(), out->shape(), in->data(), 0, out->shape()); } __global__ void gLogSoftmax(float* out, const functional::Shape outShape, const float* in) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float sm = sp[id] - max; float ex = __expf(sm); so[id] = sm; _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) so[id] -= __logf(_sum[0]); } } } } void LogSoftmax(Tensor out, Tensor in) { hipSetDevice(out->getDevice().no); size_t m = out->shape().elements() / out->shape().back(); size_t k = out->shape().back(); int blocks = ::min(MAX_BLOCKS, (int)m); int threads = ::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gLogSoftmax), dim3(blocks), dim3(threads), shared, 0, out->data(), out->shape(), in->data()); } /////////////////////////////////////////////////////// __global__ void gSoftmaxGrad(float* grad, const float* adj, const float* val, const int rows, const int cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { extern __shared__ float _share[]; float* _sum = _share + blockDim.x; float* gradRow = grad + j * cols; const float* adjRow = adj + j * cols; const float* valRow = val + j * cols; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += valRow[id] * adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float val = valRow[id] * (adjRow[id] - _sum[0]); if(val) gradRow[id] += val; } } } } } void SoftmaxGrad(Tensor grad, Tensor adj, Tensor val) { hipSetDevice(adj->getDevice().no); // grad and val are both m-by-k matrices, passed as input. // A weighted average of each row of grad (according to the weights // specified in val) is computed and subtracted from Out. // adj is multiplied for each element to get backward step in autodiff int m = grad->shape().elements() / grad->shape().back(); int k = grad->shape().back(); int blocks = ::min(MAX_BLOCKS, m); int threads = ::min(MAX_THREADS, k); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gSoftmaxGrad), dim3(blocks), dim3(threads), shared, 0, grad->data(), adj->data(), val->data(), m, k); } __global__ void gLogSoftmaxGrad(float* grad, const float* adj, const float* val, const int rows, const int cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { extern __shared__ float _share[]; float* _sum = _share + blockDim.x; float* gradRow = grad + j * cols; const float* adjRow = adj + j * cols; const float* valRow = val + j * cols; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) gradRow[id] += adjRow[id] - (expf(valRow[id]) * _sum[0]); } } } } void LogSoftmaxGrad(Tensor grad, Tensor adj, Tensor val) { hipSetDevice(adj->getDevice().no); // grad and val are both m-by-k matrices, passed as input. // A weighted average of each row of grad (according to the weights // specified in val) is computed and subtracted from Out. // adj is multiplied for each element to get backward step in autodiff int m = grad->shape().elements() / grad->shape().back(); int k = grad->shape().back(); int blocks = ::min(MAX_BLOCKS, m); int threads = ::min(MAX_THREADS, k); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gLogSoftmaxGrad), dim3(blocks), dim3(threads), shared, 0, grad->data(), adj->data(), val->data(), m, k); } /////////////////////////////////////////////////////// __global__ void gArgmax(float* out, const float* data, size_t rows, size_t cols) { size_t row = blockIdx.x; size_t startInd = row * cols; float maxScore = -99999; size_t maxInd; for(size_t col = 0; col < cols; ++col) { size_t ind = startInd + col; float score = data[ind]; if(score > maxScore) { maxScore = score; maxInd = col; } } out[row] = maxInd; } /////////////////////////////////////////////////////// __global__ void gCopyRows(float* out, const float* in, size_t cols, const size_t* sourceRowIdx, size_t rows) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { size_t dstId = j; size_t srcId = sourceRowIdx[j]; float* rowOut = out + dstId * cols; const float* rowIn = in + srcId * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) rowOut[i] = rowIn[i]; } } } } void CopyRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) { hipSetDevice(out->getDevice().no); size_t cols = in->shape().back(); size_t rowsToCopy = indices.size(); int threads = ::min(MAX_THREADS, (int)cols); int blocks = ::min(MAX_BLOCKS, (int)rowsToCopy); size_t* d_indices; CUDA_CHECK(hipMalloc(&d_indices, rowsToCopy * sizeof(size_t))); CUDA_CHECK(hipMemcpy(d_indices, indices.data(), rowsToCopy * sizeof(size_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gCopyRows), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), cols, d_indices, rowsToCopy); CUDA_CHECK(hipFree(d_indices)); } __global__ void gPasteRows(float* out, const float* in, size_t cols, const size_t* targetRowIdx, size_t rows) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { size_t dstId = targetRowIdx[j]; size_t srcId = j; float* rowOut = out + dstId * cols; const float* rowIn = in + srcId * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) atomicAdd(rowOut + i, rowIn[i]); } } } } void PasteRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) { hipSetDevice(out->getDevice().no); size_t cols = in->shape().back(); size_t rowsToCopy = indices.size(); int threads = ::min(MAX_THREADS, (int)cols); int blocks = ::min(MAX_BLOCKS, (int)rowsToCopy); // @TODO: turn into tensor size_t* d_indices; CUDA_CHECK(hipMalloc(&d_indices, rowsToCopy * sizeof(size_t))); CUDA_CHECK(hipMemcpy(d_indices, indices.data(), rowsToCopy * sizeof(size_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gPasteRows), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), cols, d_indices, rowsToCopy); CUDA_CHECK(hipFree(d_indices)); } ///////////// __global__ void gCopyCols(float* out, const float* in, size_t rows, size_t colsIn, const size_t* sourceColIdx, size_t colsOut) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* rowIn = in + j * colsIn; float* rowOut = out + j * colsOut; for(int tid = 0; tid < colsOut; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < colsOut) rowOut[i] = rowIn[sourceColIdx[i]]; } } } } void CopyCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) { hipSetDevice(out->getDevice().no); size_t rows = in->shape().elements() / in->shape().back(); size_t cols = in->shape().back(); size_t colsToCopy = indices.size(); int threads = ::min(MAX_THREADS, (int)colsToCopy); int blocks = ::min(MAX_BLOCKS, (int)rows); size_t* d_indices; CUDA_CHECK(hipMalloc(&d_indices, colsToCopy * sizeof(size_t))); CUDA_CHECK(hipMemcpy(d_indices, indices.data(), colsToCopy * sizeof(size_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gCopyCols), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), rows, cols, d_indices, colsToCopy); CUDA_CHECK(hipFree(d_indices)); } __global__ void gPasteCols(float* out, const float* in, size_t rows, size_t colsOut, const size_t* targetColIdx, size_t colsIn) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* rowIn = in + j * colsIn; float* rowOut = out + j * colsOut; for(int tid = 0; tid < colsIn; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < colsIn) rowOut[targetColIdx[i]] = rowIn[i]; } } } } void PasteCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) { hipSetDevice(out->getDevice().no); size_t rows = in->shape().elements() / in->shape().back(); size_t cols = in->shape().back(); size_t colsToCopy = indices.size(); int threads = ::min(MAX_THREADS, (int)colsToCopy); int blocks = ::min(MAX_BLOCKS, (int)rows); size_t* d_indices; CUDA_CHECK(hipMalloc(&d_indices, colsToCopy * sizeof(size_t))); CUDA_CHECK(hipMemcpy(d_indices, indices.data(), colsToCopy * sizeof(size_t), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gPasteCols), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), rows, cols, d_indices, colsToCopy); CUDA_CHECK(hipFree(d_indices)); } __global__ void gSelect(float* out, functional::Shape outShape, const float* in, const functional::Shape inShape, int axis, size_t* d_indices) { int length = outShape.elements(); functional::Array<int, functional::Shape::size()> dims; for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { outShape.dims(index, dims); dims[axis] = d_indices[dims[axis]]; int inIndex = inShape.index(dims); out[index] = in[inIndex]; } } } __global__ void gInsert(float* out, functional::Shape outShape, const float* in, const functional::Shape inShape, int axis, size_t* d_indices) { int length = inShape.elements(); functional::Array<int, functional::Shape::size()> dims; for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { inShape.dims(index, dims); dims[axis] = d_indices[dims[index]]; int outIndex = outShape.index(dims); out[outIndex] = in[index]; } } } void Select(Tensor out, const Tensor in, int axis, const std::vector<size_t>& indices, Ptr<Allocator> allocator) { hipSetDevice(out->getDevice().no); int length = out->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); auto mp_indices = allocator->alloc<size_t>(indices.size()); CudaCopy(indices.data(), indices.data() + indices.size(), mp_indices->data<size_t>()); int axisGPU = axis + functional::Shape::size() - out->shape().size(); hipLaunchKernelGGL(( gSelect), dim3(blocks), dim3(threads), 0, 0, out->data(), out->shape(), in->data(), in->shape(), axisGPU, mp_indices->data<size_t>()); allocator->free(mp_indices); } void Insert(Tensor out, const Tensor in, int axis, const std::vector<size_t>& indices, Ptr<Allocator> allocator) { hipSetDevice(in->getDevice().no); int length = in->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); auto mp_indices = allocator->alloc<size_t>(indices.size()); CudaCopy(indices.data(), indices.data() + indices.size(), mp_indices->data<size_t>()); int axisGPU = axis + functional::Shape::size() - out->shape().size(); hipLaunchKernelGGL(( gInsert), dim3(blocks), dim3(threads), 0, 0, out->data(), out->shape(), in->data(), in->shape(), axisGPU, mp_indices->data<size_t>()); allocator->free(mp_indices); } __global__ void gGRUFastForward(float* out, const float* state, const float* xW, const float* sU, const float* b, const float* mask, size_t rows, size_t cols, bool final) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOut = out + j * cols; const float* rowState = state + j * cols; const float* xWrow = xW + j * cols * 3; const float* sUrow = sU + j * cols * 3; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float r = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float z = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float h; if(final) h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r); else h = tanhf(xWrow[l] + sUrow[l] * r + b[l]); float out = (1.0f - z) * h + z * rowState[i]; rowOut[i] = m * out + (1 - m) * rowState[i]; } } } } } void GRUFastForward(Tensor out, std::vector<Tensor> inputs, bool final) { hipSetDevice(out->getDevice().no); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gGRUFastForward), dim3(blocks), dim3(threads), 0, 0, out->data(), // output inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask rows, cols, final); } __global__ void gGRUFastBackward(float* outState, float* outXW, float* outSU, float* outB, const float* state, const float* xW, const float* sU, const float* b, const float* mask, const float* adj, size_t rows, size_t cols, bool final) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOutState = outState + j * cols; float* rowOutXW = outXW + j * cols * 3; float* rowOutSU = outSU + j * cols * 3; const float* rowState = state + j * cols; const float* rowXW = xW + j * cols * 3; const float* rowSU = sU + j * cols * 3; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + cols; int l = i + 2 * cols; float r = stableLogit(rowXW[i] + rowSU[i] + b[i]); float z = stableLogit(rowXW[k] + rowSU[k] + b[k]); float h; if(final) h = tanhf(rowXW[l] + (rowSU[l] + b[l]) * r); else h = tanhf(rowXW[l] + rowSU[l] * r + b[l]); float adj = rowAdj[i]; float t = (1 - z) * (1 - h * h); // df/ds if(outState) rowOutState[i] += (m * z - m + 1) * adj; // df/d(xW_r) ... float dfdxW_r = m * r * (1 - r) * t * adj; if(final) dfdxW_r *= rowSU[l] + b[l]; else dfdxW_r *= rowSU[l]; if(outXW) rowOutXW[i] += dfdxW_r; if(outSU) rowOutSU[i] += dfdxW_r; if(outB) atomicAdd(outB + i, dfdxW_r); // df/d(xW_z) ... float dfdxW_z = m * (1 - z) * z * (rowState[i] - h) * adj; if(outXW) rowOutXW[k] += dfdxW_z; if(outSU) rowOutSU[k] += dfdxW_z; if(outB) atomicAdd(outB + k, dfdxW_z); // df/d(xW_x) ... float dfdxW_x = m * t * adj; if(outXW) rowOutXW[l] += dfdxW_x; if(outSU) rowOutSU[l] += dfdxW_x * r; if(outB) if(final) atomicAdd(outB + l, dfdxW_x * r); else atomicAdd(outB + l, dfdxW_x); } } } } } void GRUFastBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj, bool final) { hipSetDevice(adj->getDevice().no); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gGRUFastBackward), dim3(blocks), dim3(threads), 0, 0, outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask adj->data(), rows, cols, final); } __global__ void gCrossEntropyPick(float* out, const functional::Shape outShape, const float* in, const functional::Shape inShape, const float* pick) { int rows = inShape.elements() / inShape.back(); int cols = inShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 1; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += __expf(sp[id] - max); } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); // cross-entropy for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id == (int)pick[j]) { out[j] = __logf(_sum[0]) - sp[id] + max; } } } } } void CrossEntropyPick(Tensor out, Tensor in, Tensor pick) { hipSetDevice(out->getDevice().no); int rows = in->shape().elements() / in->shape().back(); int cols = in->shape().back(); int blocks = ::min(MAX_BLOCKS, (int)rows); int threads = ::min(MAX_THREADS, (int)cols); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gCrossEntropyPick), dim3(blocks), dim3(threads), shared, 0, out->data(), out->shape(), in->data(), in->shape(), pick->data()); } __global__ void gCrossEntropyPickBackward(float* out, const functional::Shape outShape, const float* adj, const float* in, const float* pick) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* sp = in + j * cols; float* so = out + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 1; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = __expf(sp[id] - max); _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); // cross-entropy for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float sub = (float)(id == (int)pick[j]); so[id] += adj[j] * (__expf(sp[id] - max) / _sum[0] - sub); } } } } } void CrossEntropyPickBackward(Tensor out, Tensor adj, Tensor a, Tensor pick) { hipSetDevice(out->getDevice().no); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = ::min(MAX_BLOCKS, (int)rows); int threads = ::min(MAX_THREADS, (int)cols); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gCrossEntropyPickBackward), dim3(blocks), dim3(threads), shared, 0, out->data(), out->shape(), adj->data(), a->data(), pick->data()); } float L2Norm(Tensor in) { hipSetDevice(in->getDevice().no); int size = in->shape().elements(); int threads = ::min(MAX_THREADS, size); int blocks = ::min(MAX_BLOCKS, size / threads + (size % threads != 0)); uint8_t* data; hipMalloc(&data, blocks * sizeof(float)); Tensor out(new TensorBase(New<MemoryPiece>(data, blocks * sizeof(float)), {1, blocks}, in->getBackend())); using namespace functional; ReduceAll(_1 * _1, out, in); float dataCpu = sqrtf(out->get(0)); out.reset(); hipFree(data); return dataCpu; } __global__ void gAtt(float* out, const float* va, const float* ctx, const float* state, int m, // total rows (batch x time x beam) int k, // depth int b, // batch size int t // time of ctx ) { int rows = m; int cols = k; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* vaRow = va; const float* ctxRow = ctx + (j % (b * t)) * cols; const float* stateRow = state + ((j / (b * t)) * b + j % b) * cols; extern __shared__ float _share[]; float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float z = ctxRow[id] + stateRow[id]; float ex = tanhf(z) * vaRow[id]; _sum[threadIdx.x] += ex; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); out[j] = _sum[0]; __syncthreads(); } } } void Att(Tensor out, Tensor va, Tensor context, Tensor state) { hipSetDevice(out->getDevice().no); size_t m = out->shape().elements() / out->shape().back(); size_t k = context->shape()[-1]; size_t b = context->shape()[-2]; size_t t = context->shape()[-3]; int blocks = ::min(MAX_BLOCKS, (int)m); int threads = ::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; hipLaunchKernelGGL(( gAtt), dim3(blocks), dim3(threads), shared, 0, out->data(), va->data(), context->data(), state->data(), m, k, b, t); } __global__ void gAttBack(float* gVa, float* gContext, float* gState, const float* va, const float* context, const float* state, const float* adj, int m, // rows int k, // cols int n // batch size ) { int rows = m; int cols = k; for(int bid = 0; bid < m; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* gcRow = gContext + j * cols; float* gsRow = gState + (j % n) * cols; const float* cRow = context + j * cols; const float* sRow = state + (j % n) * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float z = cRow[id] + sRow[id]; float t = tanhf(z); float r = va[id] * (1.f - t * t); gcRow[id] += r * adj[j]; gsRow[id] += r * adj[j]; atomicAdd(gVa + id, t * adj[j]); } } } } } void AttBack(Tensor gVa, Tensor gContext, Tensor gState, Tensor va, Tensor context, Tensor state, Tensor adj) { hipSetDevice(adj->getDevice().no); size_t m = adj->shape().elements() / adj->shape()[-1]; size_t k = context->shape()[-1]; size_t n = context->shape()[-2]; int blocks = ::min(MAX_BLOCKS, (int)n); int threads = ::min(MAX_THREADS, (int)k); hipLaunchKernelGGL(( gAttBack), dim3(blocks), dim3(threads), 0, 0, gVa->data(), gContext->data(), gState->data(), va->data(), context->data(), state->data(), adj->data(), m, k, n); } __global__ void gLNormalization(float* out, const float* in, const float* alpha, const float* beta, int rows, int cols, float eps = 1e-9) { extern __shared__ float _share[]; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0f; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { _sum[threadIdx.x] += _sum[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); float mean = _sum[0] / cols; __syncthreads(); float* _sqSum = _share + blockDim.x; _sqSum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = sp[id] - mean; _sqSum[threadIdx.x] += ex * ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); float sigma = sqrtf(eps + (_sqSum[0] / cols)); __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float t = alpha[id] * ((sp[id] - mean) / sigma); if(beta != nullptr) t += beta[id]; so[id] = t; } } } } } void LayerNormalization(Tensor out, Tensor in, Tensor gamma, Tensor beta, float eps) { hipSetDevice(out->getDevice().no); int rows = in->shape().elements() / in->shape().back(); int cols = in->shape().back(); int blocks = ::min(MAX_BLOCKS, (int)rows); int threads = ::min(MAX_THREADS, (int)cols); int shared = 2 * threads * sizeof(float); hipLaunchKernelGGL(( gLNormalization), dim3(blocks), dim3(threads), shared, 0, out->data(), in->data(), gamma->data(), beta ? beta->data() : nullptr, rows, cols, eps); } __global__ void gLayerNormalizationGrad(float* gradX, float* gradGamma, float* gradBeta, float* adj, float* y, float* x, float* gamma, float* beta, int rows, int cols, float eps = 1e-9) { extern __shared__ float shared[]; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* sum_adj = shared; float* sum_adj_x = shared + blockDim.x; float* sum_x = shared + 2 * blockDim.x; float* sum_sqr = shared + 3 * blockDim.x; const float* xRow = x + j * cols; const float* yRow = y + j * cols; const float* adjRow = adj + j * cols; float* gradXRow = gradX + j * cols; sum_x[threadIdx.x] = 0.0f; sum_adj[threadIdx.x] = 0.0f; sum_adj_x[threadIdx.x] = 0.0f; sum_sqr[threadIdx.x] = 0.0f; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { sum_x[threadIdx.x] += xRow[id]; sum_adj_x[threadIdx.x] += adjRow[id] * (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id]; sum_adj[threadIdx.x] += adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { sum_x[threadIdx.x] += sum_x[threadIdx.x + skip]; sum_adj[threadIdx.x] += sum_adj[threadIdx.x + skip]; sum_adj_x[threadIdx.x] += sum_adj_x[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); float mean = sum_x[0] / cols; __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = xRow[id] - mean; sum_sqr[threadIdx.x] += ex * ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) sum_sqr[threadIdx.x] += sum_sqr[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); float sigma = sqrtf(eps + (sum_sqr[0] / cols)); __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float grad_x = 0.0f; float x_hat = (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id]; grad_x += cols * adjRow[id]; grad_x -= sum_adj[0]; grad_x -= sum_adj_x[0] * x_hat; grad_x /= (cols * sigma); float valX = gamma[id] * grad_x; float sign = (0.f < valX) - (valX < 0.f); valX = fabs(valX) > 1000 ? sign * 1000 : valX; gradXRow[id] += valX; atomicAdd(gradGamma + id, adjRow[id] * x_hat); if(beta) { atomicAdd(gradBeta + id, adjRow[id]); } } } } } } void LayerNormalizationGrad(Tensor gradX, Tensor gradGamma, Tensor gradBeta, Tensor adj, Tensor y, Tensor x, Tensor gamma, Tensor beta, float eps) { hipSetDevice(adj->getDevice().no); int rows = y->shape().elements() / y->shape()[-1]; int cols = y->shape()[-1]; int threads = ::min(MAX_THREADS, cols); int blocks = ::min(MAX_BLOCKS, rows); int shared = sizeof(float) * threads * 4; hipLaunchKernelGGL(( gLayerNormalizationGrad), dim3(blocks), dim3(threads), shared, 0, gradX->data(), gradGamma->data(), (gradBeta) ? gradBeta->data() : nullptr, adj->data(), y->data(), x->data(), gamma->data(), (beta) ? beta->data() : nullptr, rows, cols, eps); } __global__ void gShift(float* out, const float* in, int length, int offset) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { if(index - offset < 0 || index - offset >= length) out[index] = 0; else out[index] = in[index - offset]; } } } void Shift(Tensor out, Tensor in, marian::Shape shift, bool invert) { ABORT_IF(in->shape().size() != shift.size(), "bad dimensions"); int offset = 0; for(int i = 0; i < shift.size(); ++i) offset += in->shape().stride(i) * shift[i]; if(invert) offset = -offset; hipSetDevice(out->getDevice().no); int length = out->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); hipLaunchKernelGGL(( gShift), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), length, offset); } __global__ void gSetSparse(float* out, const size_t* indices, const float* values, int length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { out[indices[index]] = values[index]; } } } void SetSparse(float* out, const std::vector<size_t>& indices, const std::vector<float>& values) { int length = indices.size(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); size_t* d_indices; CUDA_CHECK(hipMalloc(&d_indices, length * sizeof(size_t))); CUDA_CHECK(hipMemcpy(d_indices, indices.data(), length * sizeof(size_t), hipMemcpyHostToDevice)); float* d_values; CUDA_CHECK(hipMalloc(&d_values, length * sizeof(float))); CUDA_CHECK(hipMemcpy( d_values, values.data(), length * sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gSetSparse), dim3(blocks), dim3(threads), 0, 0, out, d_indices, d_values, length); hipFree(d_indices); hipFree(d_values); } /******************************************************************************/ __global__ void gLSTMCellForward(float* out, const float* cell, const float* xW, const float* sU, const float* b, const float* mask, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOut = out + j * cols; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float gc = tanhf(xWrow[l] + sUrow[l] + b[l]); float cout = gf * rowCell[i] + gi * gc; rowOut[i] = m * cout + (1 - m) * rowCell[i]; } } } } } void LSTMCellForward(Tensor out, std::vector<Tensor> inputs) { hipSetDevice(out->getDevice().no); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gLSTMCellForward), dim3(blocks), dim3(threads), 0, 0, out->data(), // output inputs[0]->data(), // cell state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask rows, cols); } __global__ void gLSTMOutputForward(float* out, const float* cell, const float* xW, const float* sU, const float* b, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOut = out + j * cols; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + 3 * cols; float go = stableLogit(xWrow[k] + sUrow[k] + b[k]); rowOut[i] = go * tanhf(rowCell[i]); } } } } } void LSTMOutputForward(Tensor out, std::vector<Tensor> inputs) { hipSetDevice(out->getDevice().no); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gLSTMOutputForward), dim3(blocks), dim3(threads), 0, 0, out->data(), // output inputs[0]->data(), // cell state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b rows, cols); } __global__ void gLSTMCellBackward(float* outCell, float* outXW, float* outSU, float* outB, const float* cell, const float* xW, const float* sU, const float* b, const float* mask, const float* adj, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOutCell = outCell + j * cols; float* rowOutXW = outXW + j * cols * 4; float* rowOutSU = outSU + j * cols * 4; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float gc = tanhf(xWrow[l] + sUrow[l] + b[l]); float adj = rowAdj[i]; // dc/dc_{t-1} if(outCell) rowOutCell[i] += (m * gf - m + 1) * adj; // dc/d(b_f) = dc/d(xW_f) ... float dcdxf = m * rowCell[i] * gf * (1 - gf) * adj; if(outXW) rowOutXW[i] += dcdxf; if(outSU) rowOutSU[i] += dcdxf; if(outB) atomicAdd(outB + i, dcdxf); // dc/d(b_i) ... float dcdb_i = m * gc * gi * (1 - gi) * adj; if(outXW) rowOutXW[k] += dcdb_i; if(outSU) rowOutSU[k] += dcdb_i; if(outB) atomicAdd(outB + k, dcdb_i); // dc/d(b_c) ... float dcdxc = m * gi * (1 - gc * gc) * adj; if(outXW) rowOutXW[l] += dcdxc; if(outSU) rowOutSU[l] += dcdxc; if(outB) atomicAdd(outB + l, dcdxc); } } } } } void LSTMCellBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj) { hipSetDevice(adj->getDevice().no); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gLSTMCellBackward), dim3(blocks), dim3(threads), 0, 0, outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask adj->data(), rows, cols); } __global__ void gLSTMOutputBackward(float* outCell, float* outXW, float* outSU, float* outB, const float* cell, const float* xW, const float* sU, const float* b, const float* adj, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOutCell = outCell + j * cols; float* rowOutXW = outXW + j * cols * 4; float* rowOutSU = outSU + j * cols * 4; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + 3 * cols; float go = stableLogit(xWrow[k] + sUrow[k] + b[k]); float t = tanhf(rowCell[i]); float adj = rowAdj[i]; // dc/dc_{t-1} if(outCell) rowOutCell[i] += go * (1 - t * t) * adj; // dc/d(b_o) = dc/d(xW_f) ... float dcdxo = t * go * (1 - go) * adj; if(outXW) rowOutXW[k] += dcdxo; if(outSU) rowOutSU[k] += dcdxo; if(outB) atomicAdd(outB + k, dcdxo); } } } } } void LSTMOutputBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj) { hipSetDevice(adj->getDevice().no); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = ::min(MAX_BLOCKS, rows); int threads = ::min(MAX_THREADS, cols); hipLaunchKernelGGL(( gLSTMOutputBackward), dim3(blocks), dim3(threads), 0, 0, outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b adj->data(), rows, cols); } __global__ void gHighwayForward(float* out, const float* in1, const float* in2, const float* t, size_t length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { float sigma = stableLogit(t[index]); out[index] = in1[index] * sigma + in2[index] * (1.f - sigma); } } } void HighwayForward(Tensor out, const Tensor in1, const Tensor in2, const Tensor t) { hipSetDevice(out->getDevice().no); int length = out->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); hipLaunchKernelGGL(( gHighwayForward), dim3(blocks), dim3(threads), 0, 0, out->data(), in1->data(), in2->data(), t->data(), length); } __global__ void gHighwayBackward(float* out1, float* out2, float* outt, const float* in1, const float* in2, const float* t, const float* adj, size_t length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { float sigma = stableLogit(t[index]); out1[index] = sigma * adj[index]; out2[index] = (1.f - sigma) * adj[index]; outt[index] = sigma * (1.f - sigma) * (in1[index] - in2[index]) * adj[index]; } } } void HighwayBackward(Tensor out1, Tensor out2, Tensor outt, const Tensor in1, const Tensor in2, const Tensor t, const Tensor adj) { hipSetDevice(out1->getDevice().no); int length = out1->shape().elements(); int threads = ::min(MAX_THREADS, length); int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0)); hipLaunchKernelGGL(( gHighwayBackward), dim3(blocks), dim3(threads), 0, 0, out1->data(), out2->data(), outt->data(), in1->data(), in2->data(), t->data(), adj->data(), length); } __global__ void gMaxPoolingForward(float* out, int outRows, int outCols, float* in, int inRows, int inCols, float* mask, int numKernels, int maskCols, int width, int lastWidth) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= outRows * outCols) return; int rowId = tid / outRows; int colId = tid % outRows; float* b = in + (rowId * inCols) + (colId * width); float* localMask = mask + (rowId / numKernels) * maskCols + colId * width; if(colId == outRows - 1) { width = lastWidth; } float currentMax = b[0] * localMask[0]; for(int i = 1; i < width; ++i) { if(b[i] * localMask[i] > currentMax) { currentMax = b[i] * localMask[i]; } } out[rowId + (colId * outCols)] = currentMax; } void PoolingWithMaskingForward(Tensor out, Tensor in, Tensor mask, int width, bool isEven) { int n = out->shape().elements(); int threads = ::min(n, MAX_THREADS); int blocks = n / threads + (n % threads != 0); auto& inShape = in->shape(); int inRows = inShape[0] * inShape[1]; int inCols = inShape[2]; auto& outShape = out->shape(); int outRows = outShape[2]; int outCols = outShape[0] * outShape[1]; int lastWidth = ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width; hipLaunchKernelGGL(( gMaxPoolingForward), dim3(blocks), dim3(threads), 0, 0, out->data(), outRows, outCols, in->data(), inRows, inCols, mask->data(), outShape[1], mask->shape()[2], width, lastWidth); } __global__ void gMaxPoolingBackward(float* adj, int adjRows, int adjCols, float* in, float* adjIn, int inRows, int inCols, float* mask, int numKernels, int maskCols, int width, int lastWidth) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= adjRows * adjCols) return; int rowId = tid / adjRows; int colId = tid % adjRows; float* b = in + (rowId * inCols) + (colId * width); if(colId == adjRows - 1) { width = lastWidth; } float* localMask = mask + (rowId / numKernels) * maskCols + colId * width; size_t currentMaxIdx = 0; for(int i = 1; i < width; ++i) { if(b[i] * localMask[i] > b[currentMaxIdx] * localMask[currentMaxIdx]) { currentMaxIdx = i; } } adjIn[(rowId * inCols) + (colId * width) + currentMaxIdx] += adj[rowId + (colId * adjCols)]; } void PoolingWithMaskingBackward(Tensor adj, Tensor adjIn, Tensor in, Tensor mask, int width, bool isEven) { int n = adj->shape().elements(); int threads = ::min(n, 512); int blocks = n / threads + (n % threads != 0); auto& inShape = in->shape(); int inRows = inShape[0] * inShape[1]; int inCols = inShape[2]; auto& adjShape = adj->shape(); int adjRows = adjShape[2]; int adjCols = adjShape[0] * adjShape[1]; int lastWidth = ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width; hipLaunchKernelGGL(( gMaxPoolingBackward), dim3(blocks), dim3(threads), 0, 0, adj->data(), adjRows, adjCols, in->data(), adjIn->data(), inRows, inCols, mask->data(), adjShape[1], mask->shape()[2], width, lastWidth); } } } // namespace marian
c7acc836c61d2add223f3a91853ef3bc4093a854.cu
//#include <thrust/transform_reduce.h> #include "tensors/tensor_operators.h" #include "functional/functional.h" #include "functional/tensor.h" #include "tensors/gpu/backend.h" #include "tensors/gpu/cuda_helpers.h" #include "3rd_party/reduce_all.h" namespace marian { namespace gpu { struct isnan_test { __host__ __device__ bool operator()(const float a) const { return isnan(a); } }; __device__ inline float stableLogit(float x) { if(x >= 0) { float z = expf(-x); return 1.0 / (1.0 + z); } else { float z = expf(x); return z / (1.0 + z); } } bool IsNan(Tensor in) { // cudaSetDevice(in->getDevice().no); // thrust::device_ptr<float> begin = thrust::device_pointer_cast(in->data()); // thrust::device_ptr<float> end // = thrust::device_pointer_cast(in->data() + in->size()); // return thrust::transform_reduce( // begin, end, isnan_test(), 0, thrust::plus<bool>()); return false; } void ConcatCont(Tensor out, const std::vector<Tensor>& inputs, int axis) { cudaSetDevice(out->getDevice().no); int step = 1; for(int i = 0; i < axis; ++i) step *= out->shape()[i]; size_t offset1 = 0; for(int i = 0; i < step; ++i) { for(auto in : inputs) { size_t size = in->shape().elements() / step; size_t offset2 = i * size; cudaMemcpyAsync(out->data() + offset1, in->data() + offset2, size * sizeof(float), cudaMemcpyDeviceToDevice); offset1 += size; } } cudaStreamSynchronize(0); } __global__ void gInsertCols(float* out, const float* in, size_t rows, size_t cols, size_t cols_out, size_t cols_in, size_t offset_out, size_t offset_in) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOut = out + j * cols_out + offset_out; const float* rowIn = in + j * cols_in + offset_in; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) rowOut[i] = rowIn[i]; } } } } void Concatenate1(Tensor out, const std::vector<Tensor>& inputs) { cudaSetDevice(out->getDevice().no); int rows = out->shape().elements() / out->shape().back(); size_t offset = 0; int cols_out = out->shape().back(); for(auto in : inputs) { ABORT_IF(rows != in->shape().elements() / in->shape().back(), "First dimension must be equal"); int cols_in = in->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols_in); gInsertCols<<<blocks, threads>>>( out->data(), in->data(), rows, cols_in, cols_out, cols_in, offset, 0); offset += cols_in; } cudaStreamSynchronize(0); } void Concatenate(Tensor out, const std::vector<Tensor>& inputs, int ax) { if(ax == out->shape().size() - 1) Concatenate1(out, inputs); else ConcatCont(out, inputs, ax); } void Split1(std::vector<Tensor>& outputs, const Tensor in) { cudaSetDevice(in->getDevice().no); size_t offset = 0; int rows = in->shape().elements() / in->shape().back(); int cols_in = in->shape().back(); for(auto out : outputs) { ABORT_IF(rows != out->shape().elements() / out->shape().back(), "First dimension must be equal"); int cols_out = out->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols_out); gInsertCols<<<blocks, threads>>>( out->data(), in->data(), rows, cols_out, cols_out, cols_in, 0, offset); offset += cols_out; } cudaStreamSynchronize(0); } void SplitCont(std::vector<Tensor>& outputs, const Tensor in, int axis) { cudaSetDevice(in->getDevice().no); int step = 1; for(int i = 0; i < axis; ++i) step *= in->shape()[i]; size_t offset1 = 0; for(int i = 0; i < step; ++i) { for(auto out : outputs) { size_t size = out->shape().elements() / step; size_t offset2 = i * size; cudaMemcpyAsync(out->data() + offset2, in->data() + offset1, size * sizeof(float), cudaMemcpyDeviceToDevice); offset1 += size; } } cudaStreamSynchronize(0); } void Deconcatenate(std::vector<Tensor>& outputs, const Tensor in, int ax) { if(ax == in->shape().size() - 1) Split1(outputs, in); else SplitCont(outputs, in, ax); } __global__ void gTransposeND( functional::Tensor<float> out, const functional::Tensor<float> in, const functional::Array<int, functional::Shape::size()> permute) { constexpr size_t N = functional::Shape::size(); functional::Array<int, N> oDims; functional::Array<int, N> pDims; int length = out.shape().elements(); for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { out.shape().dims(index, oDims); for(int i = 0; i < N; ++i) pDims[permute[i]] = oDims[i]; out[index] = in[pDims]; } } } void TransposeND(Tensor out, Tensor in, const std::vector<int>& vAxis) { cudaSetDevice(out->getDevice().no); functional::Array<int, functional::Shape::size()> axes; int diff = functional::Shape::size() - vAxis.size(); for(int i = 0; i < axes.size(); ++i) if(i < diff) axes[i] = i; else axes[i] = vAxis[i - diff] + diff; int length = out->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); gTransposeND<<<blocks, threads>>>(out, in, axes); } __global__ void gSoftmax(float* out, functional::Shape outShape, const float* in, const float* mask, const functional::Shape maskShape) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); bool broadcast = outShape != maskShape; functional::Array<int, functional::Shape::size()> dims; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = -CUDA_FLT_MAX; // mask for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float mVal = 1.f; if(mask) { int mIndex = id + j * cols; if(broadcast) { outShape.dims(mIndex, dims); mIndex = maskShape.bindex(dims); } mVal = mask[mIndex]; } if(mVal && sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float mVal = 1.f; if(mask) { int mIndex = id + j * cols; if(broadcast) { outShape.dims(mIndex, dims); mIndex = maskShape.bindex(dims); } mVal = mask[mIndex]; } float ex = 0; if(mVal) ex = __expf(sp[id] - max); so[id] = ex; _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { so[id] = so[id] / _sum[0]; } } } } } void Softmax(Tensor out, Tensor in, Tensor mask) { cudaSetDevice(out->getDevice().no); size_t m = out->shape().elements() / out->shape().back(); size_t k = out->shape().back(); int blocks = std::min(MAX_BLOCKS, (int)m); int threads = std::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; if(mask) gSoftmax<<<blocks, threads, shared>>>( out->data(), out->shape(), in->data(), mask->data(), mask->shape()); else gSoftmax<<<blocks, threads, shared>>>( out->data(), out->shape(), in->data(), 0, out->shape()); } __global__ void gLogSoftmax(float* out, const functional::Shape outShape, const float* in) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float sm = sp[id] - max; float ex = __expf(sm); so[id] = sm; _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) so[id] -= __logf(_sum[0]); } } } } void LogSoftmax(Tensor out, Tensor in) { cudaSetDevice(out->getDevice().no); size_t m = out->shape().elements() / out->shape().back(); size_t k = out->shape().back(); int blocks = std::min(MAX_BLOCKS, (int)m); int threads = std::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; gLogSoftmax<<<blocks, threads, shared>>>( out->data(), out->shape(), in->data()); } /////////////////////////////////////////////////////// __global__ void gSoftmaxGrad(float* grad, const float* adj, const float* val, const int rows, const int cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { extern __shared__ float _share[]; float* _sum = _share + blockDim.x; float* gradRow = grad + j * cols; const float* adjRow = adj + j * cols; const float* valRow = val + j * cols; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += valRow[id] * adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float val = valRow[id] * (adjRow[id] - _sum[0]); if(val) gradRow[id] += val; } } } } } void SoftmaxGrad(Tensor grad, Tensor adj, Tensor val) { cudaSetDevice(adj->getDevice().no); // grad and val are both m-by-k matrices, passed as input. // A weighted average of each row of grad (according to the weights // specified in val) is computed and subtracted from Out. // adj is multiplied for each element to get backward step in autodiff int m = grad->shape().elements() / grad->shape().back(); int k = grad->shape().back(); int blocks = std::min(MAX_BLOCKS, m); int threads = std::min(MAX_THREADS, k); int shared = sizeof(float) * threads * 2; gSoftmaxGrad<<<blocks, threads, shared>>>( grad->data(), adj->data(), val->data(), m, k); } __global__ void gLogSoftmaxGrad(float* grad, const float* adj, const float* val, const int rows, const int cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { extern __shared__ float _share[]; float* _sum = _share + blockDim.x; float* gradRow = grad + j * cols; const float* adjRow = adj + j * cols; const float* valRow = val + j * cols; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) gradRow[id] += adjRow[id] - (expf(valRow[id]) * _sum[0]); } } } } void LogSoftmaxGrad(Tensor grad, Tensor adj, Tensor val) { cudaSetDevice(adj->getDevice().no); // grad and val are both m-by-k matrices, passed as input. // A weighted average of each row of grad (according to the weights // specified in val) is computed and subtracted from Out. // adj is multiplied for each element to get backward step in autodiff int m = grad->shape().elements() / grad->shape().back(); int k = grad->shape().back(); int blocks = std::min(MAX_BLOCKS, m); int threads = std::min(MAX_THREADS, k); int shared = sizeof(float) * threads * 2; gLogSoftmaxGrad<<<blocks, threads, shared>>>( grad->data(), adj->data(), val->data(), m, k); } /////////////////////////////////////////////////////// __global__ void gArgmax(float* out, const float* data, size_t rows, size_t cols) { size_t row = blockIdx.x; size_t startInd = row * cols; float maxScore = -99999; size_t maxInd; for(size_t col = 0; col < cols; ++col) { size_t ind = startInd + col; float score = data[ind]; if(score > maxScore) { maxScore = score; maxInd = col; } } out[row] = maxInd; } /////////////////////////////////////////////////////// __global__ void gCopyRows(float* out, const float* in, size_t cols, const size_t* sourceRowIdx, size_t rows) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { size_t dstId = j; size_t srcId = sourceRowIdx[j]; float* rowOut = out + dstId * cols; const float* rowIn = in + srcId * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) rowOut[i] = rowIn[i]; } } } } void CopyRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) { cudaSetDevice(out->getDevice().no); size_t cols = in->shape().back(); size_t rowsToCopy = indices.size(); int threads = std::min(MAX_THREADS, (int)cols); int blocks = std::min(MAX_BLOCKS, (int)rowsToCopy); size_t* d_indices; CUDA_CHECK(cudaMalloc(&d_indices, rowsToCopy * sizeof(size_t))); CUDA_CHECK(cudaMemcpy(d_indices, indices.data(), rowsToCopy * sizeof(size_t), cudaMemcpyHostToDevice)); gCopyRows<<<blocks, threads>>>( out->data(), in->data(), cols, d_indices, rowsToCopy); CUDA_CHECK(cudaFree(d_indices)); } __global__ void gPasteRows(float* out, const float* in, size_t cols, const size_t* targetRowIdx, size_t rows) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { size_t dstId = targetRowIdx[j]; size_t srcId = j; float* rowOut = out + dstId * cols; const float* rowIn = in + srcId * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) atomicAdd(rowOut + i, rowIn[i]); } } } } void PasteRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) { cudaSetDevice(out->getDevice().no); size_t cols = in->shape().back(); size_t rowsToCopy = indices.size(); int threads = std::min(MAX_THREADS, (int)cols); int blocks = std::min(MAX_BLOCKS, (int)rowsToCopy); // @TODO: turn into tensor size_t* d_indices; CUDA_CHECK(cudaMalloc(&d_indices, rowsToCopy * sizeof(size_t))); CUDA_CHECK(cudaMemcpy(d_indices, indices.data(), rowsToCopy * sizeof(size_t), cudaMemcpyHostToDevice)); gPasteRows<<<blocks, threads>>>( out->data(), in->data(), cols, d_indices, rowsToCopy); CUDA_CHECK(cudaFree(d_indices)); } ///////////// __global__ void gCopyCols(float* out, const float* in, size_t rows, size_t colsIn, const size_t* sourceColIdx, size_t colsOut) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* rowIn = in + j * colsIn; float* rowOut = out + j * colsOut; for(int tid = 0; tid < colsOut; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < colsOut) rowOut[i] = rowIn[sourceColIdx[i]]; } } } } void CopyCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) { cudaSetDevice(out->getDevice().no); size_t rows = in->shape().elements() / in->shape().back(); size_t cols = in->shape().back(); size_t colsToCopy = indices.size(); int threads = std::min(MAX_THREADS, (int)colsToCopy); int blocks = std::min(MAX_BLOCKS, (int)rows); size_t* d_indices; CUDA_CHECK(cudaMalloc(&d_indices, colsToCopy * sizeof(size_t))); CUDA_CHECK(cudaMemcpy(d_indices, indices.data(), colsToCopy * sizeof(size_t), cudaMemcpyHostToDevice)); gCopyCols<<<blocks, threads>>>( out->data(), in->data(), rows, cols, d_indices, colsToCopy); CUDA_CHECK(cudaFree(d_indices)); } __global__ void gPasteCols(float* out, const float* in, size_t rows, size_t colsOut, const size_t* targetColIdx, size_t colsIn) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* rowIn = in + j * colsIn; float* rowOut = out + j * colsOut; for(int tid = 0; tid < colsIn; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < colsIn) rowOut[targetColIdx[i]] = rowIn[i]; } } } } void PasteCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) { cudaSetDevice(out->getDevice().no); size_t rows = in->shape().elements() / in->shape().back(); size_t cols = in->shape().back(); size_t colsToCopy = indices.size(); int threads = std::min(MAX_THREADS, (int)colsToCopy); int blocks = std::min(MAX_BLOCKS, (int)rows); size_t* d_indices; CUDA_CHECK(cudaMalloc(&d_indices, colsToCopy * sizeof(size_t))); CUDA_CHECK(cudaMemcpy(d_indices, indices.data(), colsToCopy * sizeof(size_t), cudaMemcpyHostToDevice)); gPasteCols<<<blocks, threads>>>( out->data(), in->data(), rows, cols, d_indices, colsToCopy); CUDA_CHECK(cudaFree(d_indices)); } __global__ void gSelect(float* out, functional::Shape outShape, const float* in, const functional::Shape inShape, int axis, size_t* d_indices) { int length = outShape.elements(); functional::Array<int, functional::Shape::size()> dims; for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { outShape.dims(index, dims); dims[axis] = d_indices[dims[axis]]; int inIndex = inShape.index(dims); out[index] = in[inIndex]; } } } __global__ void gInsert(float* out, functional::Shape outShape, const float* in, const functional::Shape inShape, int axis, size_t* d_indices) { int length = inShape.elements(); functional::Array<int, functional::Shape::size()> dims; for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { inShape.dims(index, dims); dims[axis] = d_indices[dims[index]]; int outIndex = outShape.index(dims); out[outIndex] = in[index]; } } } void Select(Tensor out, const Tensor in, int axis, const std::vector<size_t>& indices, Ptr<Allocator> allocator) { cudaSetDevice(out->getDevice().no); int length = out->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); auto mp_indices = allocator->alloc<size_t>(indices.size()); CudaCopy(indices.data(), indices.data() + indices.size(), mp_indices->data<size_t>()); int axisGPU = axis + functional::Shape::size() - out->shape().size(); gSelect<<<blocks, threads>>>(out->data(), out->shape(), in->data(), in->shape(), axisGPU, mp_indices->data<size_t>()); allocator->free(mp_indices); } void Insert(Tensor out, const Tensor in, int axis, const std::vector<size_t>& indices, Ptr<Allocator> allocator) { cudaSetDevice(in->getDevice().no); int length = in->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); auto mp_indices = allocator->alloc<size_t>(indices.size()); CudaCopy(indices.data(), indices.data() + indices.size(), mp_indices->data<size_t>()); int axisGPU = axis + functional::Shape::size() - out->shape().size(); gInsert<<<blocks, threads>>>(out->data(), out->shape(), in->data(), in->shape(), axisGPU, mp_indices->data<size_t>()); allocator->free(mp_indices); } __global__ void gGRUFastForward(float* out, const float* state, const float* xW, const float* sU, const float* b, const float* mask, size_t rows, size_t cols, bool final) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOut = out + j * cols; const float* rowState = state + j * cols; const float* xWrow = xW + j * cols * 3; const float* sUrow = sU + j * cols * 3; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float r = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float z = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float h; if(final) h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r); else h = tanhf(xWrow[l] + sUrow[l] * r + b[l]); float out = (1.0f - z) * h + z * rowState[i]; rowOut[i] = m * out + (1 - m) * rowState[i]; } } } } } void GRUFastForward(Tensor out, std::vector<Tensor> inputs, bool final) { cudaSetDevice(out->getDevice().no); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gGRUFastForward<<<blocks, threads>>>( out->data(), // output inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask rows, cols, final); } __global__ void gGRUFastBackward(float* outState, float* outXW, float* outSU, float* outB, const float* state, const float* xW, const float* sU, const float* b, const float* mask, const float* adj, size_t rows, size_t cols, bool final) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOutState = outState + j * cols; float* rowOutXW = outXW + j * cols * 3; float* rowOutSU = outSU + j * cols * 3; const float* rowState = state + j * cols; const float* rowXW = xW + j * cols * 3; const float* rowSU = sU + j * cols * 3; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + cols; int l = i + 2 * cols; float r = stableLogit(rowXW[i] + rowSU[i] + b[i]); float z = stableLogit(rowXW[k] + rowSU[k] + b[k]); float h; if(final) h = tanhf(rowXW[l] + (rowSU[l] + b[l]) * r); else h = tanhf(rowXW[l] + rowSU[l] * r + b[l]); float adj = rowAdj[i]; float t = (1 - z) * (1 - h * h); // df/ds if(outState) rowOutState[i] += (m * z - m + 1) * adj; // df/d(xW_r) ... float dfdxW_r = m * r * (1 - r) * t * adj; if(final) dfdxW_r *= rowSU[l] + b[l]; else dfdxW_r *= rowSU[l]; if(outXW) rowOutXW[i] += dfdxW_r; if(outSU) rowOutSU[i] += dfdxW_r; if(outB) atomicAdd(outB + i, dfdxW_r); // df/d(xW_z) ... float dfdxW_z = m * (1 - z) * z * (rowState[i] - h) * adj; if(outXW) rowOutXW[k] += dfdxW_z; if(outSU) rowOutSU[k] += dfdxW_z; if(outB) atomicAdd(outB + k, dfdxW_z); // df/d(xW_x) ... float dfdxW_x = m * t * adj; if(outXW) rowOutXW[l] += dfdxW_x; if(outSU) rowOutSU[l] += dfdxW_x * r; if(outB) if(final) atomicAdd(outB + l, dfdxW_x * r); else atomicAdd(outB + l, dfdxW_x); } } } } } void GRUFastBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj, bool final) { cudaSetDevice(adj->getDevice().no); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gGRUFastBackward<<<blocks, threads>>>( outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask adj->data(), rows, cols, final); } __global__ void gCrossEntropyPick(float* out, const functional::Shape outShape, const float* in, const functional::Shape inShape, const float* pick) { int rows = inShape.elements() / inShape.back(); int cols = inShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* sp = in + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 1; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += __expf(sp[id] - max); } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); // cross-entropy for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id == (int)pick[j]) { out[j] = __logf(_sum[0]) - sp[id] + max; } } } } } void CrossEntropyPick(Tensor out, Tensor in, Tensor pick) { cudaSetDevice(out->getDevice().no); int rows = in->shape().elements() / in->shape().back(); int cols = in->shape().back(); int blocks = std::min(MAX_BLOCKS, (int)rows); int threads = std::min(MAX_THREADS, (int)cols); int shared = sizeof(float) * threads * 2; gCrossEntropyPick<<<blocks, threads, shared>>>( out->data(), out->shape(), in->data(), in->shape(), pick->data()); } __global__ void gCrossEntropyPickBackward(float* out, const functional::Shape outShape, const float* adj, const float* in, const float* pick) { int rows = outShape.elements() / outShape.back(); int cols = outShape.back(); for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* sp = in + j * cols; float* so = out + j * cols; extern __shared__ float _share[]; float* _max = _share + blockDim.x; _max[threadIdx.x] = sp[threadIdx.x]; for(int tid = 1; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { if(sp[id] > _max[threadIdx.x]) _max[threadIdx.x] = sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x + skip] > _max[threadIdx.x]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); float max = _max[0]; __syncthreads(); float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = __expf(sp[id] - max); _sum[threadIdx.x] += ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); // cross-entropy for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float sub = (float)(id == (int)pick[j]); so[id] += adj[j] * (__expf(sp[id] - max) / _sum[0] - sub); } } } } } void CrossEntropyPickBackward(Tensor out, Tensor adj, Tensor a, Tensor pick) { cudaSetDevice(out->getDevice().no); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = std::min(MAX_BLOCKS, (int)rows); int threads = std::min(MAX_THREADS, (int)cols); int shared = sizeof(float) * threads * 2; gCrossEntropyPickBackward<<<blocks, threads, shared>>>( out->data(), out->shape(), adj->data(), a->data(), pick->data()); } float L2Norm(Tensor in) { cudaSetDevice(in->getDevice().no); int size = in->shape().elements(); int threads = std::min(MAX_THREADS, size); int blocks = std::min(MAX_BLOCKS, size / threads + (size % threads != 0)); uint8_t* data; cudaMalloc(&data, blocks * sizeof(float)); Tensor out(new TensorBase(New<MemoryPiece>(data, blocks * sizeof(float)), {1, blocks}, in->getBackend())); using namespace functional; ReduceAll(_1 * _1, out, in); float dataCpu = sqrtf(out->get(0)); out.reset(); cudaFree(data); return dataCpu; } __global__ void gAtt(float* out, const float* va, const float* ctx, const float* state, int m, // total rows (batch x time x beam) int k, // depth int b, // batch size int t // time of ctx ) { int rows = m; int cols = k; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { const float* vaRow = va; const float* ctxRow = ctx + (j % (b * t)) * cols; const float* stateRow = state + ((j / (b * t)) * b + j % b) * cols; extern __shared__ float _share[]; float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float z = ctxRow[id] + stateRow[id]; float ex = tanhf(z) * vaRow[id]; _sum[threadIdx.x] += ex; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sum[threadIdx.x] += _sum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); out[j] = _sum[0]; __syncthreads(); } } } void Att(Tensor out, Tensor va, Tensor context, Tensor state) { cudaSetDevice(out->getDevice().no); size_t m = out->shape().elements() / out->shape().back(); size_t k = context->shape()[-1]; size_t b = context->shape()[-2]; size_t t = context->shape()[-3]; int blocks = std::min(MAX_BLOCKS, (int)m); int threads = std::min(MAX_THREADS, (int)k); int shared = sizeof(float) * threads * 2; gAtt<<<blocks, threads, shared>>>( out->data(), va->data(), context->data(), state->data(), m, k, b, t); } __global__ void gAttBack(float* gVa, float* gContext, float* gState, const float* va, const float* context, const float* state, const float* adj, int m, // rows int k, // cols int n // batch size ) { int rows = m; int cols = k; for(int bid = 0; bid < m; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* gcRow = gContext + j * cols; float* gsRow = gState + (j % n) * cols; const float* cRow = context + j * cols; const float* sRow = state + (j % n) * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float z = cRow[id] + sRow[id]; float t = tanhf(z); float r = va[id] * (1.f - t * t); gcRow[id] += r * adj[j]; gsRow[id] += r * adj[j]; atomicAdd(gVa + id, t * adj[j]); } } } } } void AttBack(Tensor gVa, Tensor gContext, Tensor gState, Tensor va, Tensor context, Tensor state, Tensor adj) { cudaSetDevice(adj->getDevice().no); size_t m = adj->shape().elements() / adj->shape()[-1]; size_t k = context->shape()[-1]; size_t n = context->shape()[-2]; int blocks = std::min(MAX_BLOCKS, (int)n); int threads = std::min(MAX_THREADS, (int)k); gAttBack<<<blocks, threads>>>(gVa->data(), gContext->data(), gState->data(), va->data(), context->data(), state->data(), adj->data(), m, k, n); } __global__ void gLNormalization(float* out, const float* in, const float* alpha, const float* beta, int rows, int cols, float eps = 1e-9) { extern __shared__ float _share[]; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* so = out + j * cols; const float* sp = in + j * cols; float* _sum = _share + blockDim.x; _sum[threadIdx.x] = 0.0f; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { _sum[threadIdx.x] += sp[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { _sum[threadIdx.x] += _sum[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); float mean = _sum[0] / cols; __syncthreads(); float* _sqSum = _share + blockDim.x; _sqSum[threadIdx.x] = 0.0; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = sp[id] - mean; _sqSum[threadIdx.x] += ex * ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) _sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); float sigma = sqrtf(eps + (_sqSum[0] / cols)); __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float t = alpha[id] * ((sp[id] - mean) / sigma); if(beta != nullptr) t += beta[id]; so[id] = t; } } } } } void LayerNormalization(Tensor out, Tensor in, Tensor gamma, Tensor beta, float eps) { cudaSetDevice(out->getDevice().no); int rows = in->shape().elements() / in->shape().back(); int cols = in->shape().back(); int blocks = std::min(MAX_BLOCKS, (int)rows); int threads = std::min(MAX_THREADS, (int)cols); int shared = 2 * threads * sizeof(float); gLNormalization<<<blocks, threads, shared>>>(out->data(), in->data(), gamma->data(), beta ? beta->data() : nullptr, rows, cols, eps); } __global__ void gLayerNormalizationGrad(float* gradX, float* gradGamma, float* gradBeta, float* adj, float* y, float* x, float* gamma, float* beta, int rows, int cols, float eps = 1e-9) { extern __shared__ float shared[]; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* sum_adj = shared; float* sum_adj_x = shared + blockDim.x; float* sum_x = shared + 2 * blockDim.x; float* sum_sqr = shared + 3 * blockDim.x; const float* xRow = x + j * cols; const float* yRow = y + j * cols; const float* adjRow = adj + j * cols; float* gradXRow = gradX + j * cols; sum_x[threadIdx.x] = 0.0f; sum_adj[threadIdx.x] = 0.0f; sum_adj_x[threadIdx.x] = 0.0f; sum_sqr[threadIdx.x] = 0.0f; for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { sum_x[threadIdx.x] += xRow[id]; sum_adj_x[threadIdx.x] += adjRow[id] * (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id]; sum_adj[threadIdx.x] += adjRow[id]; } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { sum_x[threadIdx.x] += sum_x[threadIdx.x + skip]; sum_adj[threadIdx.x] += sum_adj[threadIdx.x + skip]; sum_adj_x[threadIdx.x] += sum_adj_x[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); float mean = sum_x[0] / cols; __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float ex = xRow[id] - mean; sum_sqr[threadIdx.x] += ex * ex; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) sum_sqr[threadIdx.x] += sum_sqr[threadIdx.x + skip]; len = (len + 1) >> 1; } __syncthreads(); float sigma = sqrtf(eps + (sum_sqr[0] / cols)); __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x) { int id = tid + threadIdx.x; if(id < cols) { float grad_x = 0.0f; float x_hat = (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id]; grad_x += cols * adjRow[id]; grad_x -= sum_adj[0]; grad_x -= sum_adj_x[0] * x_hat; grad_x /= (cols * sigma); float valX = gamma[id] * grad_x; float sign = (0.f < valX) - (valX < 0.f); valX = fabs(valX) > 1000 ? sign * 1000 : valX; gradXRow[id] += valX; atomicAdd(gradGamma + id, adjRow[id] * x_hat); if(beta) { atomicAdd(gradBeta + id, adjRow[id]); } } } } } } void LayerNormalizationGrad(Tensor gradX, Tensor gradGamma, Tensor gradBeta, Tensor adj, Tensor y, Tensor x, Tensor gamma, Tensor beta, float eps) { cudaSetDevice(adj->getDevice().no); int rows = y->shape().elements() / y->shape()[-1]; int cols = y->shape()[-1]; int threads = std::min(MAX_THREADS, cols); int blocks = std::min(MAX_BLOCKS, rows); int shared = sizeof(float) * threads * 4; gLayerNormalizationGrad<<<blocks, threads, shared>>>( gradX->data(), gradGamma->data(), (gradBeta) ? gradBeta->data() : nullptr, adj->data(), y->data(), x->data(), gamma->data(), (beta) ? beta->data() : nullptr, rows, cols, eps); } __global__ void gShift(float* out, const float* in, int length, int offset) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { if(index - offset < 0 || index - offset >= length) out[index] = 0; else out[index] = in[index - offset]; } } } void Shift(Tensor out, Tensor in, marian::Shape shift, bool invert) { ABORT_IF(in->shape().size() != shift.size(), "bad dimensions"); int offset = 0; for(int i = 0; i < shift.size(); ++i) offset += in->shape().stride(i) * shift[i]; if(invert) offset = -offset; cudaSetDevice(out->getDevice().no); int length = out->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); gShift<<<blocks, threads>>>(out->data(), in->data(), length, offset); } __global__ void gSetSparse(float* out, const size_t* indices, const float* values, int length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { out[indices[index]] = values[index]; } } } void SetSparse(float* out, const std::vector<size_t>& indices, const std::vector<float>& values) { int length = indices.size(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); size_t* d_indices; CUDA_CHECK(cudaMalloc(&d_indices, length * sizeof(size_t))); CUDA_CHECK(cudaMemcpy(d_indices, indices.data(), length * sizeof(size_t), cudaMemcpyHostToDevice)); float* d_values; CUDA_CHECK(cudaMalloc(&d_values, length * sizeof(float))); CUDA_CHECK(cudaMemcpy( d_values, values.data(), length * sizeof(float), cudaMemcpyHostToDevice)); gSetSparse<<<blocks, threads>>>(out, d_indices, d_values, length); cudaFree(d_indices); cudaFree(d_values); } /******************************************************************************/ __global__ void gLSTMCellForward(float* out, const float* cell, const float* xW, const float* sU, const float* b, const float* mask, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOut = out + j * cols; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float gc = tanhf(xWrow[l] + sUrow[l] + b[l]); float cout = gf * rowCell[i] + gi * gc; rowOut[i] = m * cout + (1 - m) * rowCell[i]; } } } } } void LSTMCellForward(Tensor out, std::vector<Tensor> inputs) { cudaSetDevice(out->getDevice().no); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gLSTMCellForward<<<blocks, threads>>>( out->data(), // output inputs[0]->data(), // cell state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask rows, cols); } __global__ void gLSTMOutputForward(float* out, const float* cell, const float* xW, const float* sU, const float* b, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOut = out + j * cols; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + 3 * cols; float go = stableLogit(xWrow[k] + sUrow[k] + b[k]); rowOut[i] = go * tanhf(rowCell[i]); } } } } } void LSTMOutputForward(Tensor out, std::vector<Tensor> inputs) { cudaSetDevice(out->getDevice().no); int rows = out->shape().elements() / out->shape().back(); int cols = out->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gLSTMOutputForward<<<blocks, threads>>>(out->data(), // output inputs[0]->data(), // cell state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b rows, cols); } __global__ void gLSTMCellBackward(float* outCell, float* outXW, float* outSU, float* outB, const float* cell, const float* xW, const float* sU, const float* b, const float* mask, const float* adj, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOutCell = outCell + j * cols; float* rowOutXW = outXW + j * cols * 4; float* rowOutSU = outSU + j * cols * 4; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]); int k = i + cols; float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]); int l = i + 2 * cols; float gc = tanhf(xWrow[l] + sUrow[l] + b[l]); float adj = rowAdj[i]; // dc/dc_{t-1} if(outCell) rowOutCell[i] += (m * gf - m + 1) * adj; // dc/d(b_f) = dc/d(xW_f) ... float dcdxf = m * rowCell[i] * gf * (1 - gf) * adj; if(outXW) rowOutXW[i] += dcdxf; if(outSU) rowOutSU[i] += dcdxf; if(outB) atomicAdd(outB + i, dcdxf); // dc/d(b_i) ... float dcdb_i = m * gc * gi * (1 - gi) * adj; if(outXW) rowOutXW[k] += dcdb_i; if(outSU) rowOutSU[k] += dcdb_i; if(outB) atomicAdd(outB + k, dcdb_i); // dc/d(b_c) ... float dcdxc = m * gi * (1 - gc * gc) * adj; if(outXW) rowOutXW[l] += dcdxc; if(outSU) rowOutSU[l] += dcdxc; if(outB) atomicAdd(outB + l, dcdxc); } } } } } void LSTMCellBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj) { cudaSetDevice(adj->getDevice().no); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gLSTMCellBackward<<<blocks, threads>>>( outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b inputs.size() > 4 ? inputs[4]->data() : 0, // mask adj->data(), rows, cols); } __global__ void gLSTMOutputBackward(float* outCell, float* outXW, float* outSU, float* outB, const float* cell, const float* xW, const float* sU, const float* b, const float* adj, size_t rows, size_t cols) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOutCell = outCell + j * cols; float* rowOutXW = outXW + j * cols * 4; float* rowOutSU = outSU + j * cols * 4; const float* rowCell = cell + j * cols; const float* xWrow = xW + j * cols * 4; const float* sUrow = sU + j * cols * 4; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + 3 * cols; float go = stableLogit(xWrow[k] + sUrow[k] + b[k]); float t = tanhf(rowCell[i]); float adj = rowAdj[i]; // dc/dc_{t-1} if(outCell) rowOutCell[i] += go * (1 - t * t) * adj; // dc/d(b_o) = dc/d(xW_f) ... float dcdxo = t * go * (1 - go) * adj; if(outXW) rowOutXW[k] += dcdxo; if(outSU) rowOutSU[k] += dcdxo; if(outB) atomicAdd(outB + k, dcdxo); } } } } } void LSTMOutputBackward(std::vector<Tensor> outputs, std::vector<Tensor> inputs, Tensor adj) { cudaSetDevice(adj->getDevice().no); int rows = adj->shape().elements() / adj->shape().back(); int cols = adj->shape().back(); int blocks = std::min(MAX_BLOCKS, rows); int threads = std::min(MAX_THREADS, cols); gLSTMOutputBackward<<<blocks, threads>>>( outputs[0] ? outputs[0]->data() : 0, // state - adj outputs[1] ? outputs[1]->data() : 0, // xW - adj outputs[2] ? outputs[2]->data() : 0, // sU - adj outputs[3] ? outputs[3]->data() : 0, // b - adj inputs[0]->data(), // state inputs[1]->data(), // xW inputs[2]->data(), // sU inputs[3]->data(), // b adj->data(), rows, cols); } __global__ void gHighwayForward(float* out, const float* in1, const float* in2, const float* t, size_t length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { float sigma = stableLogit(t[index]); out[index] = in1[index] * sigma + in2[index] * (1.f - sigma); } } } void HighwayForward(Tensor out, const Tensor in1, const Tensor in2, const Tensor t) { cudaSetDevice(out->getDevice().no); int length = out->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); gHighwayForward<<<blocks, threads>>>( out->data(), in1->data(), in2->data(), t->data(), length); } __global__ void gHighwayBackward(float* out1, float* out2, float* outt, const float* in1, const float* in2, const float* t, const float* adj, size_t length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { float sigma = stableLogit(t[index]); out1[index] = sigma * adj[index]; out2[index] = (1.f - sigma) * adj[index]; outt[index] = sigma * (1.f - sigma) * (in1[index] - in2[index]) * adj[index]; } } } void HighwayBackward(Tensor out1, Tensor out2, Tensor outt, const Tensor in1, const Tensor in2, const Tensor t, const Tensor adj) { cudaSetDevice(out1->getDevice().no); int length = out1->shape().elements(); int threads = std::min(MAX_THREADS, length); int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0)); gHighwayBackward<<<blocks, threads>>>(out1->data(), out2->data(), outt->data(), in1->data(), in2->data(), t->data(), adj->data(), length); } __global__ void gMaxPoolingForward(float* out, int outRows, int outCols, float* in, int inRows, int inCols, float* mask, int numKernels, int maskCols, int width, int lastWidth) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= outRows * outCols) return; int rowId = tid / outRows; int colId = tid % outRows; float* b = in + (rowId * inCols) + (colId * width); float* localMask = mask + (rowId / numKernels) * maskCols + colId * width; if(colId == outRows - 1) { width = lastWidth; } float currentMax = b[0] * localMask[0]; for(int i = 1; i < width; ++i) { if(b[i] * localMask[i] > currentMax) { currentMax = b[i] * localMask[i]; } } out[rowId + (colId * outCols)] = currentMax; } void PoolingWithMaskingForward(Tensor out, Tensor in, Tensor mask, int width, bool isEven) { int n = out->shape().elements(); int threads = std::min(n, MAX_THREADS); int blocks = n / threads + (n % threads != 0); auto& inShape = in->shape(); int inRows = inShape[0] * inShape[1]; int inCols = inShape[2]; auto& outShape = out->shape(); int outRows = outShape[2]; int outCols = outShape[0] * outShape[1]; int lastWidth = ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width; gMaxPoolingForward<<<blocks, threads>>>(out->data(), outRows, outCols, in->data(), inRows, inCols, mask->data(), outShape[1], mask->shape()[2], width, lastWidth); } __global__ void gMaxPoolingBackward(float* adj, int adjRows, int adjCols, float* in, float* adjIn, int inRows, int inCols, float* mask, int numKernels, int maskCols, int width, int lastWidth) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= adjRows * adjCols) return; int rowId = tid / adjRows; int colId = tid % adjRows; float* b = in + (rowId * inCols) + (colId * width); if(colId == adjRows - 1) { width = lastWidth; } float* localMask = mask + (rowId / numKernels) * maskCols + colId * width; size_t currentMaxIdx = 0; for(int i = 1; i < width; ++i) { if(b[i] * localMask[i] > b[currentMaxIdx] * localMask[currentMaxIdx]) { currentMaxIdx = i; } } adjIn[(rowId * inCols) + (colId * width) + currentMaxIdx] += adj[rowId + (colId * adjCols)]; } void PoolingWithMaskingBackward(Tensor adj, Tensor adjIn, Tensor in, Tensor mask, int width, bool isEven) { int n = adj->shape().elements(); int threads = std::min(n, 512); int blocks = n / threads + (n % threads != 0); auto& inShape = in->shape(); int inRows = inShape[0] * inShape[1]; int inCols = inShape[2]; auto& adjShape = adj->shape(); int adjRows = adjShape[2]; int adjCols = adjShape[0] * adjShape[1]; int lastWidth = ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width; gMaxPoolingBackward<<<blocks, threads>>>(adj->data(), adjRows, adjCols, in->data(), adjIn->data(), inRows, inCols, mask->data(), adjShape[1], mask->shape()[2], width, lastWidth); } } } // namespace marian
e65afe6a1653641db320af710e778fc66745e810.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 100 __global__ void assign(int *arr) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N && tid > 0) { for (int i = 0; i < 50; i++) { int tmp = arr[tid-1]; __syncthreads(); arr[tid] = tmp; //arr[tid] = arr[tid-1]; //false operation } } } int main() { int arr[N] = {0}; for (int i = 0; i < N; i++) arr[i] = i; int b[N] = {0}; int *dev_arr; hipMalloc(&dev_arr, N * sizeof(int)); hipMemcpy(dev_arr, arr, N * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( assign), dim3(16), dim3(16), 0, 0, dev_arr); hipMemcpy(b, dev_arr, N * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_arr); for (int i = 0; i < N; i++) { printf("%d ", b[i]); } printf("\n"); }
e65afe6a1653641db320af710e778fc66745e810.cu
#include <stdio.h> #define N 100 __global__ void assign(int *arr) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N && tid > 0) { for (int i = 0; i < 50; i++) { int tmp = arr[tid-1]; __syncthreads(); arr[tid] = tmp; //arr[tid] = arr[tid-1]; //false operation } } } int main() { int arr[N] = {0}; for (int i = 0; i < N; i++) arr[i] = i; int b[N] = {0}; int *dev_arr; cudaMalloc(&dev_arr, N * sizeof(int)); cudaMemcpy(dev_arr, arr, N * sizeof(int), cudaMemcpyHostToDevice); assign<<<16, 16>>>(dev_arr); cudaMemcpy(b, dev_arr, N * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_arr); for (int i = 0; i < N; i++) { printf("%d ", b[i]); } printf("\n"); }
279c9b7d244eaf19e98fbef2c5dea2e28db9022e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // pbrt is Copyright(c) 1998-2020 Matt Pharr, Wenzel Jakob, and Greg Humphreys. // The pbrt source code is licensed under the Apache License, Version 2.0. // SPDX: Apache-2.0 #include <pbrt/pbrt.h> #include <pbrt/gpu/aggregate.h> #include <pbrt/gpu/optix.h> #include <pbrt/interaction.h> #include <pbrt/materials.h> #include <pbrt/media.h> #include <pbrt/shapes.h> #include <pbrt/textures.h> #include <pbrt/util/float.h> #include <pbrt/util/rng.h> #include <pbrt/util/transform.h> #include <pbrt/util/vecmath.h> #include <pbrt/wavefront/intersect.h> // Make various functions visible to OptiX, which doesn't get to link // shader code with the CUDA code in the main executable... #include <pbrt/util/color.cpp> #include <pbrt/util/colorspace.cpp> #include <pbrt/util/log.cpp> #include <pbrt/util/noise.cpp> #include <pbrt/util/spectrum.cpp> #include <pbrt/util/transform.cpp> #include <optix_device.h> #include <utility> using namespace pbrt; extern "C" { extern __constant__ pbrt::RayIntersectParameters params; } /////////////////////////////////////////////////////////////////////////// // Utility functions // Payload management __device__ inline uint32_t packPointer0(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uptr >> 32; } __device__ inline uint32_t packPointer1(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uint32_t(uptr); } template <typename T> static __forceinline__ __device__ T *getPayload() { uint32_t p0 = optixGetPayload_0(), p1 = optixGetPayload_1(); const uint64_t uptr = (uint64_t(p0) << 32) | p1; return reinterpret_cast<T *>(uptr); } template <typename... Args> __device__ inline void Trace(OptixTraversableHandle traversable, Ray ray, Float tMin, Float tMax, OptixRayFlags flags, Args &&... payload) { optixTrace(traversable, make_float3(ray.o.x, ray.o.y, ray.o.z), make_float3(ray.d.x, ray.d.y, ray.d.z), tMin, tMax, ray.time, OptixVisibilityMask(255), flags, 0, /* ray type */ 1, /* number of ray types */ 0, /* missSBTIndex */ std::forward<Args>(payload)...); } /////////////////////////////////////////////////////////////////////////// // Closest hit struct ClosestHitContext { ClosestHitContext() = default; PBRT_GPU ClosestHitContext(Medium rayMedium, bool shadowRay) : rayMedium(rayMedium), shadowRay(shadowRay) {} Medium rayMedium; bool shadowRay; // out Point3fi piHit; Normal3f nHit; Material material; MediumInterface mediumInterface; PBRT_GPU Ray SpawnRayTo(const Point3f &p) const { Interaction intr(piHit, nHit); intr.mediumInterface = &mediumInterface; return intr.SpawnRayTo(p); } }; extern "C" __global__ void __raygen__findClosest() { int rayIndex(optixGetLaunchIndex().x); if (rayIndex >= params.rayQueue->Size()) return; RayWorkItem r = (*params.rayQueue)[rayIndex]; Ray ray = r.ray; Float tMax = 1e30f; ClosestHitContext ctx(ray.medium, false); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); PBRT_DBG("ray o %f %f %f dir %f %f %f tmax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); uint32_t missed = 0; Trace(params.traversable, ray, 1e-5f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); if (missed) EnqueueWorkAfterMiss(r, params.mediumSampleQueue, params.escapedRayQueue); } extern "C" __global__ void __miss__noop() { optixSetPayload_2(1); } static __forceinline__ __device__ void ProcessClosestIntersection( SurfaceInteraction intr) { int rayIndex = optixGetLaunchIndex().x; Medium rayMedium = getPayload<ClosestHitContext>()->rayMedium; if (intr.mediumInterface) getPayload<ClosestHitContext>()->mediumInterface = *intr.mediumInterface; else getPayload<ClosestHitContext>()->mediumInterface = MediumInterface(rayMedium); getPayload<ClosestHitContext>()->piHit = intr.pi; getPayload<ClosestHitContext>()->nHit = intr.n; getPayload<ClosestHitContext>()->material = intr.material; if (getPayload<ClosestHitContext>()->shadowRay) return; // We only have the ray queue (and it only makes sense to access) for // regular closest hit rays. RayWorkItem r = (*params.rayQueue)[rayIndex]; EnqueueWorkAfterIntersection(r, rayMedium, optixGetRayTmax(), intr, params.mediumSampleQueue, params.nextRayQueue, params.hitAreaLightQueue, params.basicEvalMaterialQueue, params.universalEvalMaterialQueue); } static __forceinline__ __device__ Transform getWorldFromInstance() { assert(optixGetTransformListSize() == 1); float worldFromObj[12], objFromWorld[12]; optixGetObjectToWorldTransformMatrix(worldFromObj); optixGetWorldToObjectTransformMatrix(objFromWorld); SquareMatrix<4> worldFromObjM(worldFromObj[0], worldFromObj[1], worldFromObj[2], worldFromObj[3], worldFromObj[4], worldFromObj[5], worldFromObj[6], worldFromObj[7], worldFromObj[8], worldFromObj[9], worldFromObj[10], worldFromObj[11], 0.f, 0.f, 0.f, 1.f); SquareMatrix<4> objFromWorldM(objFromWorld[0], objFromWorld[1], objFromWorld[2], objFromWorld[3], objFromWorld[4], objFromWorld[5], objFromWorld[6], objFromWorld[7], objFromWorld[8], objFromWorld[9], objFromWorld[10], objFromWorld[11], 0.f, 0.f, 0.f, 1.f); return Transform(worldFromObjM, objFromWorldM); } /////////////////////////////////////////////////////////////////////////// // Triangles static __forceinline__ __device__ SurfaceInteraction getTriangleIntersection() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); float b1 = optixGetTriangleBarycentrics().x; float b2 = optixGetTriangleBarycentrics().y; float b0 = 1 - b1 - b2; float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); Transform worldFromInstance = getWorldFromInstance(); Float time = optixGetRayTime(); wo = worldFromInstance.ApplyInverse(wo); TriangleIntersection ti{b0, b1, b2, optixGetRayTmax()}; SurfaceInteraction intr = Triangle::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), ti, time, wo); return worldFromInstance(intr); } static __forceinline__ __device__ bool alphaKilled(const TriangleMeshRecord &rec) { if (!rec.alphaTexture) return false; SurfaceInteraction intr = getTriangleIntersection(); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha >= 1) return false; if (alpha <= 0) return true; else { float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = HashFloat(o, d); return u > alpha; } } extern "C" __global__ void __closesthit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); SurfaceInteraction intr = getTriangleIntersection(); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (alphaKilled(rec)) optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__shadowTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (alphaKilled(rec)) optixIgnoreIntersection(); } /////////////////////////////////////////////////////////////////////////// // Shadow rays extern "C" __global__ void __raygen__shadow() { int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; PBRT_DBG("Tracing shadow ray index %d o %f %f %f d %f %f %f\n", index, sr.ray.o.x, sr.ray.o.y, sr.ray.o.z, sr.ray.d.x, sr.ray.d.y, sr.ray.d.z); uint32_t missed = 0; Trace(params.traversable, sr.ray, 1e-5f /* tMin */, sr.tMax, OPTIX_RAY_FLAG_NONE, missed); RecordShadowRayResult(sr, &params.pixelSampleState, !missed); } extern "C" __global__ void __miss__shadow() { optixSetPayload_0(1); } extern "C" __global__ void __raygen__shadow_Tr() { PBRT_DBG("raygen shadow tr %d\n", optixGetLaunchIndex().x); int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; ClosestHitContext ctx; TraceTransmittance(sr, &params.pixelSampleState, [&](Ray ray, Float tMax) -> TransmittanceTraceResult { ctx = ClosestHitContext(ray.medium, true); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); uint32_t missed = 0; Trace(params.traversable, ray, 1e-5f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); return TransmittanceTraceResult{!missed, Point3f(ctx.piHit), ctx.material}; }, [&](Point3f p) -> Ray { return ctx.SpawnRayTo(p); }); } extern "C" __global__ void __miss__shadow_Tr() { optixSetPayload_2(1); } ///////////////////////////////////////////////////////////////////////////////////// // Quadrics static __device__ inline SurfaceInteraction getQuadricIntersection( const QuadricIntersection &si) { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); Float time = optixGetRayTime(); SurfaceInteraction intr; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) intr = sphere->InteractionFromIntersection(si, wo, time); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) intr = cylinder->InteractionFromIntersection(si, wo, time); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) intr = disk->InteractionFromIntersection(si, wo, time); else assert(!"unexpected quadric"); return intr; } extern "C" __global__ void __closesthit__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); SurfaceInteraction intr = getQuadricIntersection(qi); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (rec.areaLight) intr.areaLight = rec.areaLight; Transform worldFromInstance = getWorldFromInstance(); intr = worldFromInstance(intr); ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowQuadric() { } extern "C" __global__ void __intersection__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); pstd::optional<QuadricIntersection> isect; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) isect = sphere->BasicIntersect(ray, tMax); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) isect = cylinder->BasicIntersect(ray, tMax); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) isect = disk->BasicIntersect(ray, tMax); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getQuadricIntersection(*isect); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha < 1) { if (alpha == 0) // No hit return; float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = HashFloat(o.x, o.y, o.z, d.x, d.y, d.z); if (u > alpha) // no hit return; } } optixReportIntersection(isect->tHit, 0 /* hit kind */, FloatToBits(isect->pObj.x), FloatToBits(isect->pObj.y), FloatToBits(isect->pObj.z), FloatToBits(isect->phi)); } /////////////////////////////////////////////////////////////////////////// // Bilinear patches static __forceinline__ __device__ SurfaceInteraction getBilinearPatchIntersection(Point2f uv) { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); return BilinearPatch::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), uv, optixGetRayTime(), wo); } extern "C" __global__ void __closesthit__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); SurfaceInteraction intr = getBilinearPatchIntersection(uv); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; Transform worldFromInstance = getWorldFromInstance(); intr = worldFromInstance(intr); ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowBilinearPatch() { } extern "C" __global__ void __intersection__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); int vertexIndex = 4 * optixGetPrimitiveIndex(); Point3f p00 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex]]; Point3f p10 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 1]]; Point3f p01 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 2]]; Point3f p11 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 3]]; pstd::optional<BilinearIntersection> isect = IntersectBilinearPatch(ray, tMax, p00, p10, p01, p11); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getBilinearPatchIntersection(isect->uv); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha < 1) { if (alpha == 0) // No hit return; float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = HashFloat(o, d); if (u > alpha) // no hit return; } } optixReportIntersection(isect->t, 0 /* hit kind */, FloatToBits(isect->uv[0]), FloatToBits(isect->uv[1])); } /////////////////////////////////////////////////////////////////////////// // Random hit (for subsurface scattering) struct RandomHitPayload { WeightedReservoirSampler<SubsurfaceInteraction> wrs; Material material; pstd::optional<SurfaceInteraction> intr; }; extern "C" __global__ void __raygen__randomHit() { // Keep as uint32_t so can pass directly to optixTrace. uint32_t index = optixGetLaunchIndex().x; if (index >= params.subsurfaceScatterQueue->Size()) return; SubsurfaceScatterWorkItem s = (*params.subsurfaceScatterQueue)[index]; Ray ray(s.p0, s.p1 - s.p0); RandomHitPayload payload; payload.wrs.Seed(Hash(s.p0, s.p1)); payload.material = s.material; uint32_t ptr0 = packPointer0(&payload), ptr1 = packPointer1(&payload); PBRT_DBG("Randomhit raygen ray.o %f %f %f ray.d %f %f %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z); while (true) { Trace(params.traversable, ray, 0.f /* tMin */, 1.f /* tMax */, OPTIX_RAY_FLAG_NONE, ptr0, ptr1); if (payload.intr) { ray = payload.intr->SpawnRayTo(s.p1); payload.intr.reset(); } else break; } if (payload.wrs.HasSample() && payload.wrs.WeightSum() > 0) { // TODO: latter check shouldn't be needed... const SubsurfaceInteraction &si = payload.wrs.GetSample(); PBRT_DBG("optix si p %f %f %f n %f %f %f\n", si.p().x, si.p().y, si.p().z, si.n.x, si.n.y, si.n.z); params.subsurfaceScatterQueue->reservoirPDF[index] = payload.wrs.SampleProbability(); params.subsurfaceScatterQueue->ssi[index] = payload.wrs.GetSample(); } else params.subsurfaceScatterQueue->reservoirPDF[index] = 0; } extern "C" __global__ void __closesthit__randomHitTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit triangle for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); SurfaceInteraction intr = getTriangleIntersection(); p->intr = intr; if (rec.material == p->material) p->wrs.Add([&] PBRT_CPU_GPU() { return intr; }, 1.f); } extern "C" __global__ void __closesthit__randomHitBilinearPatch() { BilinearMeshRecord &rec = *(BilinearMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit blp for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); SurfaceInteraction intr = getBilinearPatchIntersection(uv); p->intr = intr; if (rec.material == p->material) p->wrs.Add([&] PBRT_CPU_GPU() { return intr; }, 1.f); } extern "C" __global__ void __closesthit__randomHitQuadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit quadric for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); SurfaceInteraction intr = getQuadricIntersection(qi); p->intr = intr; if (rec.material == p->material) p->wrs.Add([&] PBRT_CPU_GPU() { return intr; }, 1.f); }
279c9b7d244eaf19e98fbef2c5dea2e28db9022e.cu
// pbrt is Copyright(c) 1998-2020 Matt Pharr, Wenzel Jakob, and Greg Humphreys. // The pbrt source code is licensed under the Apache License, Version 2.0. // SPDX: Apache-2.0 #include <pbrt/pbrt.h> #include <pbrt/gpu/aggregate.h> #include <pbrt/gpu/optix.h> #include <pbrt/interaction.h> #include <pbrt/materials.h> #include <pbrt/media.h> #include <pbrt/shapes.h> #include <pbrt/textures.h> #include <pbrt/util/float.h> #include <pbrt/util/rng.h> #include <pbrt/util/transform.h> #include <pbrt/util/vecmath.h> #include <pbrt/wavefront/intersect.h> // Make various functions visible to OptiX, which doesn't get to link // shader code with the CUDA code in the main executable... #include <pbrt/util/color.cpp> #include <pbrt/util/colorspace.cpp> #include <pbrt/util/log.cpp> #include <pbrt/util/noise.cpp> #include <pbrt/util/spectrum.cpp> #include <pbrt/util/transform.cpp> #include <optix_device.h> #include <utility> using namespace pbrt; extern "C" { extern __constant__ pbrt::RayIntersectParameters params; } /////////////////////////////////////////////////////////////////////////// // Utility functions // Payload management __device__ inline uint32_t packPointer0(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uptr >> 32; } __device__ inline uint32_t packPointer1(void *ptr) { uint64_t uptr = reinterpret_cast<uint64_t>(ptr); return uint32_t(uptr); } template <typename T> static __forceinline__ __device__ T *getPayload() { uint32_t p0 = optixGetPayload_0(), p1 = optixGetPayload_1(); const uint64_t uptr = (uint64_t(p0) << 32) | p1; return reinterpret_cast<T *>(uptr); } template <typename... Args> __device__ inline void Trace(OptixTraversableHandle traversable, Ray ray, Float tMin, Float tMax, OptixRayFlags flags, Args &&... payload) { optixTrace(traversable, make_float3(ray.o.x, ray.o.y, ray.o.z), make_float3(ray.d.x, ray.d.y, ray.d.z), tMin, tMax, ray.time, OptixVisibilityMask(255), flags, 0, /* ray type */ 1, /* number of ray types */ 0, /* missSBTIndex */ std::forward<Args>(payload)...); } /////////////////////////////////////////////////////////////////////////// // Closest hit struct ClosestHitContext { ClosestHitContext() = default; PBRT_GPU ClosestHitContext(Medium rayMedium, bool shadowRay) : rayMedium(rayMedium), shadowRay(shadowRay) {} Medium rayMedium; bool shadowRay; // out Point3fi piHit; Normal3f nHit; Material material; MediumInterface mediumInterface; PBRT_GPU Ray SpawnRayTo(const Point3f &p) const { Interaction intr(piHit, nHit); intr.mediumInterface = &mediumInterface; return intr.SpawnRayTo(p); } }; extern "C" __global__ void __raygen__findClosest() { int rayIndex(optixGetLaunchIndex().x); if (rayIndex >= params.rayQueue->Size()) return; RayWorkItem r = (*params.rayQueue)[rayIndex]; Ray ray = r.ray; Float tMax = 1e30f; ClosestHitContext ctx(ray.medium, false); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); PBRT_DBG("ray o %f %f %f dir %f %f %f tmax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax); uint32_t missed = 0; Trace(params.traversable, ray, 1e-5f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); if (missed) EnqueueWorkAfterMiss(r, params.mediumSampleQueue, params.escapedRayQueue); } extern "C" __global__ void __miss__noop() { optixSetPayload_2(1); } static __forceinline__ __device__ void ProcessClosestIntersection( SurfaceInteraction intr) { int rayIndex = optixGetLaunchIndex().x; Medium rayMedium = getPayload<ClosestHitContext>()->rayMedium; if (intr.mediumInterface) getPayload<ClosestHitContext>()->mediumInterface = *intr.mediumInterface; else getPayload<ClosestHitContext>()->mediumInterface = MediumInterface(rayMedium); getPayload<ClosestHitContext>()->piHit = intr.pi; getPayload<ClosestHitContext>()->nHit = intr.n; getPayload<ClosestHitContext>()->material = intr.material; if (getPayload<ClosestHitContext>()->shadowRay) return; // We only have the ray queue (and it only makes sense to access) for // regular closest hit rays. RayWorkItem r = (*params.rayQueue)[rayIndex]; EnqueueWorkAfterIntersection(r, rayMedium, optixGetRayTmax(), intr, params.mediumSampleQueue, params.nextRayQueue, params.hitAreaLightQueue, params.basicEvalMaterialQueue, params.universalEvalMaterialQueue); } static __forceinline__ __device__ Transform getWorldFromInstance() { assert(optixGetTransformListSize() == 1); float worldFromObj[12], objFromWorld[12]; optixGetObjectToWorldTransformMatrix(worldFromObj); optixGetWorldToObjectTransformMatrix(objFromWorld); SquareMatrix<4> worldFromObjM(worldFromObj[0], worldFromObj[1], worldFromObj[2], worldFromObj[3], worldFromObj[4], worldFromObj[5], worldFromObj[6], worldFromObj[7], worldFromObj[8], worldFromObj[9], worldFromObj[10], worldFromObj[11], 0.f, 0.f, 0.f, 1.f); SquareMatrix<4> objFromWorldM(objFromWorld[0], objFromWorld[1], objFromWorld[2], objFromWorld[3], objFromWorld[4], objFromWorld[5], objFromWorld[6], objFromWorld[7], objFromWorld[8], objFromWorld[9], objFromWorld[10], objFromWorld[11], 0.f, 0.f, 0.f, 1.f); return Transform(worldFromObjM, objFromWorldM); } /////////////////////////////////////////////////////////////////////////// // Triangles static __forceinline__ __device__ SurfaceInteraction getTriangleIntersection() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); float b1 = optixGetTriangleBarycentrics().x; float b2 = optixGetTriangleBarycentrics().y; float b0 = 1 - b1 - b2; float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); Transform worldFromInstance = getWorldFromInstance(); Float time = optixGetRayTime(); wo = worldFromInstance.ApplyInverse(wo); TriangleIntersection ti{b0, b1, b2, optixGetRayTmax()}; SurfaceInteraction intr = Triangle::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), ti, time, wo); return worldFromInstance(intr); } static __forceinline__ __device__ bool alphaKilled(const TriangleMeshRecord &rec) { if (!rec.alphaTexture) return false; SurfaceInteraction intr = getTriangleIntersection(); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha >= 1) return false; if (alpha <= 0) return true; else { float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = HashFloat(o, d); return u > alpha; } } extern "C" __global__ void __closesthit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); SurfaceInteraction intr = getTriangleIntersection(); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__triangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (alphaKilled(rec)) optixIgnoreIntersection(); } extern "C" __global__ void __anyhit__shadowTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); if (alphaKilled(rec)) optixIgnoreIntersection(); } /////////////////////////////////////////////////////////////////////////// // Shadow rays extern "C" __global__ void __raygen__shadow() { int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; PBRT_DBG("Tracing shadow ray index %d o %f %f %f d %f %f %f\n", index, sr.ray.o.x, sr.ray.o.y, sr.ray.o.z, sr.ray.d.x, sr.ray.d.y, sr.ray.d.z); uint32_t missed = 0; Trace(params.traversable, sr.ray, 1e-5f /* tMin */, sr.tMax, OPTIX_RAY_FLAG_NONE, missed); RecordShadowRayResult(sr, &params.pixelSampleState, !missed); } extern "C" __global__ void __miss__shadow() { optixSetPayload_0(1); } extern "C" __global__ void __raygen__shadow_Tr() { PBRT_DBG("raygen shadow tr %d\n", optixGetLaunchIndex().x); int index = optixGetLaunchIndex().x; if (index >= params.shadowRayQueue->Size()) return; ShadowRayWorkItem sr = (*params.shadowRayQueue)[index]; ClosestHitContext ctx; TraceTransmittance(sr, &params.pixelSampleState, [&](Ray ray, Float tMax) -> TransmittanceTraceResult { ctx = ClosestHitContext(ray.medium, true); uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx); uint32_t missed = 0; Trace(params.traversable, ray, 1e-5f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1, missed); return TransmittanceTraceResult{!missed, Point3f(ctx.piHit), ctx.material}; }, [&](Point3f p) -> Ray { return ctx.SpawnRayTo(p); }); } extern "C" __global__ void __miss__shadow_Tr() { optixSetPayload_2(1); } ///////////////////////////////////////////////////////////////////////////////////// // Quadrics static __device__ inline SurfaceInteraction getQuadricIntersection( const QuadricIntersection &si) { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); Float time = optixGetRayTime(); SurfaceInteraction intr; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) intr = sphere->InteractionFromIntersection(si, wo, time); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) intr = cylinder->InteractionFromIntersection(si, wo, time); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) intr = disk->InteractionFromIntersection(si, wo, time); else assert(!"unexpected quadric"); return intr; } extern "C" __global__ void __closesthit__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); SurfaceInteraction intr = getQuadricIntersection(qi); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (rec.areaLight) intr.areaLight = rec.areaLight; Transform worldFromInstance = getWorldFromInstance(); intr = worldFromInstance(intr); ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowQuadric() { } extern "C" __global__ void __intersection__quadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); pstd::optional<QuadricIntersection> isect; if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>()) isect = sphere->BasicIntersect(ray, tMax); else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>()) isect = cylinder->BasicIntersect(ray, tMax); else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>()) isect = disk->BasicIntersect(ray, tMax); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getQuadricIntersection(*isect); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha < 1) { if (alpha == 0) // No hit return; float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = HashFloat(o.x, o.y, o.z, d.x, d.y, d.z); if (u > alpha) // no hit return; } } optixReportIntersection(isect->tHit, 0 /* hit kind */, FloatToBits(isect->pObj.x), FloatToBits(isect->pObj.y), FloatToBits(isect->pObj.z), FloatToBits(isect->phi)); } /////////////////////////////////////////////////////////////////////////// // Bilinear patches static __forceinline__ __device__ SurfaceInteraction getBilinearPatchIntersection(Point2f uv) { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 rd = optixGetWorldRayDirection(); Vector3f wo = -Vector3f(rd.x, rd.y, rd.z); return BilinearPatch::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(), uv, optixGetRayTime(), wo); } extern "C" __global__ void __closesthit__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); SurfaceInteraction intr = getBilinearPatchIntersection(uv); if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition()) intr.mediumInterface = rec.mediumInterface; intr.material = rec.material; if (!rec.areaLights.empty()) intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()]; Transform worldFromInstance = getWorldFromInstance(); intr = worldFromInstance(intr); ProcessClosestIntersection(intr); } extern "C" __global__ void __anyhit__shadowBilinearPatch() { } extern "C" __global__ void __intersection__bilinearPatch() { BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer()); float3 org = optixGetObjectRayOrigin(); float3 dir = optixGetObjectRayDirection(); Float tMax = optixGetRayTmax(); Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z)); int vertexIndex = 4 * optixGetPrimitiveIndex(); Point3f p00 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex]]; Point3f p10 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 1]]; Point3f p01 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 2]]; Point3f p11 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 3]]; pstd::optional<BilinearIntersection> isect = IntersectBilinearPatch(ray, tMax, p00, p10, p01, p11); if (!isect) return; if (rec.alphaTexture) { SurfaceInteraction intr = getBilinearPatchIntersection(isect->uv); BasicTextureEvaluator eval; Float alpha = eval(rec.alphaTexture, intr); if (alpha < 1) { if (alpha == 0) // No hit return; float3 o = optixGetWorldRayOrigin(); float3 d = optixGetWorldRayDirection(); Float u = HashFloat(o, d); if (u > alpha) // no hit return; } } optixReportIntersection(isect->t, 0 /* hit kind */, FloatToBits(isect->uv[0]), FloatToBits(isect->uv[1])); } /////////////////////////////////////////////////////////////////////////// // Random hit (for subsurface scattering) struct RandomHitPayload { WeightedReservoirSampler<SubsurfaceInteraction> wrs; Material material; pstd::optional<SurfaceInteraction> intr; }; extern "C" __global__ void __raygen__randomHit() { // Keep as uint32_t so can pass directly to optixTrace. uint32_t index = optixGetLaunchIndex().x; if (index >= params.subsurfaceScatterQueue->Size()) return; SubsurfaceScatterWorkItem s = (*params.subsurfaceScatterQueue)[index]; Ray ray(s.p0, s.p1 - s.p0); RandomHitPayload payload; payload.wrs.Seed(Hash(s.p0, s.p1)); payload.material = s.material; uint32_t ptr0 = packPointer0(&payload), ptr1 = packPointer1(&payload); PBRT_DBG("Randomhit raygen ray.o %f %f %f ray.d %f %f %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z); while (true) { Trace(params.traversable, ray, 0.f /* tMin */, 1.f /* tMax */, OPTIX_RAY_FLAG_NONE, ptr0, ptr1); if (payload.intr) { ray = payload.intr->SpawnRayTo(s.p1); payload.intr.reset(); } else break; } if (payload.wrs.HasSample() && payload.wrs.WeightSum() > 0) { // TODO: latter check shouldn't be needed... const SubsurfaceInteraction &si = payload.wrs.GetSample(); PBRT_DBG("optix si p %f %f %f n %f %f %f\n", si.p().x, si.p().y, si.p().z, si.n.x, si.n.y, si.n.z); params.subsurfaceScatterQueue->reservoirPDF[index] = payload.wrs.SampleProbability(); params.subsurfaceScatterQueue->ssi[index] = payload.wrs.GetSample(); } else params.subsurfaceScatterQueue->reservoirPDF[index] = 0; } extern "C" __global__ void __closesthit__randomHitTriangle() { const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit triangle for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); SurfaceInteraction intr = getTriangleIntersection(); p->intr = intr; if (rec.material == p->material) p->wrs.Add([&] PBRT_CPU_GPU() { return intr; }, 1.f); } extern "C" __global__ void __closesthit__randomHitBilinearPatch() { BilinearMeshRecord &rec = *(BilinearMeshRecord *)optixGetSbtDataPointer(); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit blp for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1())); SurfaceInteraction intr = getBilinearPatchIntersection(uv); p->intr = intr; if (rec.material == p->material) p->wrs.Add([&] PBRT_CPU_GPU() { return intr; }, 1.f); } extern "C" __global__ void __closesthit__randomHitQuadric() { QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer()); RandomHitPayload *p = getPayload<RandomHitPayload>(); PBRT_DBG("Anyhit quadric for random hit: rec.material %p params.materials %p\n", rec.material.ptr(), p->material.ptr()); QuadricIntersection qi; qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()), BitsToFloat(optixGetAttribute_2())); qi.phi = BitsToFloat(optixGetAttribute_3()); SurfaceInteraction intr = getQuadricIntersection(qi); p->intr = intr; if (rec.material == p->material) p->wrs.Add([&] PBRT_CPU_GPU() { return intr; }, 1.f); }
424a7abe5d67f8a15b61c9775cb0db92ff5e907e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2018 BlazingDB, Inc. * Copyright 2018 Felipe Aramburu <felipe@blazingdb.com> * Copyright 2018 Alexander Ocsa <alexander@blazingdb.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include <vector> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/remove.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/execution_policy.h> #include <thrust/iterator/iterator_adaptor.h> #include <thrust/iterator/transform_iterator.h> #include "cudf.h" #include "utilities/cudf_utils.h" #include "utilities/error_utils.hpp" #include "rmm/thrust_rmm_allocator.h" //std lib #include <map> //wow the freaking example from iterator_adaptpr, what a break right! template<typename Iterator> class repeat_iterator : public thrust::iterator_adaptor< repeat_iterator<Iterator>, // the first template parameter is the name of the iterator we're creating Iterator // the second template parameter is the name of the iterator we're adapting // we can use the default for the additional template parameters > { public: // shorthand for the name of the iterator_adaptor we're deriving from typedef thrust::iterator_adaptor< repeat_iterator<Iterator>, Iterator > super_t; __host__ __device__ repeat_iterator(const Iterator &x, int n) : super_t(x), begin(x), n(n) {} // befriend thrust::iterator_core_access to allow it access to the private interface below friend class thrust::iterator_core_access; private: // repeat each element of the adapted range n times unsigned int n; // used to keep track of where we began const Iterator begin; // it is private because only thrust::iterator_core_access needs access to it __host__ __device__ typename super_t::reference dereference() const { return *(begin + (this->base() - begin) / n); } }; typedef repeat_iterator<thrust::detail::normal_iterator<thrust::device_ptr<gdf_valid_type> > > gdf_valid_iterator; gdf_size_type get_number_of_bytes_for_valid (gdf_size_type column_size) { return sizeof(gdf_valid_type) * (column_size + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE; } // note: functor inherits from unary_function struct modulus_bit_width : public thrust::unary_function<gdf_size_type,gdf_size_type> { gdf_size_type n_bytes; gdf_size_type column_size; modulus_bit_width (gdf_size_type b_nytes, gdf_size_type column_size) { this->n_bytes = n_bytes; this->column_size = column_size; } __host__ __device__ gdf_size_type operator()(gdf_size_type x) const { gdf_size_type col_position = x / 8; gdf_size_type length_col = n_bytes != col_position+1 ? GDF_VALID_BITSIZE : column_size - GDF_VALID_BITSIZE * (n_bytes - 1); //return x % GDF_VALID_BITSIZE; return (length_col - 1) - (x % 8); // x << } }; struct shift_left: public thrust::unary_function<gdf_valid_type,gdf_valid_type> { gdf_valid_type num_bits; shift_left(gdf_valid_type num_bits): num_bits(num_bits){ } __host__ __device__ gdf_valid_type operator()(gdf_valid_type x) const { return x << num_bits; } }; struct shift_right: public thrust::unary_function<gdf_valid_type,gdf_valid_type> { gdf_valid_type num_bits; bool not_too_many; shift_right(gdf_valid_type num_bits, bool not_too_many) : num_bits(num_bits), not_too_many(not_too_many){ } __host__ __device__ gdf_valid_type operator()(gdf_valid_type x) const { //if you want to force the shift to be fill bits with 0 you need to use an unsigned type /*if (not_too_many) { // is the last return x; }*/ return *((unsigned char *) &x) >> num_bits; } }; struct bit_or: public thrust::unary_function<thrust::tuple<gdf_valid_type,gdf_valid_type>,gdf_valid_type> { __host__ __device__ gdf_valid_type operator()(thrust::tuple<gdf_valid_type,gdf_valid_type> x) const { return thrust::get<0>(x) | thrust::get<1>(x); } }; typedef thrust::transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> > bit_position_iterator; template<typename stencil_type> struct is_stencil_true { __host__ __device__ bool operator()(const thrust::tuple<stencil_type, gdf_valid_iterator::value_type, bit_position_iterator::value_type> value) { gdf_size_type position = thrust::get<2>(value); return ((thrust::get<1>(value) >> position) & 1) && (thrust::get<0>(value) != 0); } }; struct is_bit_set { __host__ __device__ bool operator()(const thrust::tuple< gdf_valid_iterator::value_type, bit_position_iterator::value_type> value) { gdf_size_type position = thrust::get<1>(value); return ((thrust::get<0>(value) >> position) & 1); } }; struct bit_mask_pack_op : public thrust::unary_function<int64_t,gdf_valid_type> { __host__ __device__ gdf_valid_type operator()(const int64_t expanded) { gdf_valid_type result = 0; for(unsigned int i = 0; i < GDF_VALID_BITSIZE; i++){ // 0, 8, 16, ....,48, 56 unsigned char byte = (expanded >> ( (GDF_VALID_BITSIZE - 1 - i ) * 8)); result |= (byte & 1) << i; } return (result); } }; std::map<gdf_dtype, int16_t> column_type_width = {{GDF_INT8, sizeof(int8_t)}, {GDF_INT16, sizeof(int16_t)},{GDF_INT32, sizeof(int32_t)}, {GDF_INT64, sizeof(int64_t)}, {GDF_FLOAT32, sizeof(float)}, {GDF_FLOAT64, sizeof(double)} }; //because applying a stencil only needs to know the WIDTH of a type for copying to output, we won't be making a bunch of templated version to store this but rather //storing a map from gdf_type to width //TODO: add a way for the space where we store temp bitmaps for compaction be allocated //on the outside gdf_error gdf_apply_stencil(gdf_column *lhs, gdf_column * stencil, gdf_column * output){ //OK: add a rquire here that output and lhs are the same size GDF_REQUIRE(output->size == lhs->size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(lhs->dtype == output->dtype, GDF_DTYPE_MISMATCH); GDF_REQUIRE(!lhs->valid || !lhs->null_count, GDF_VALIDITY_UNSUPPORTED); //find the width in bytes of this data type auto searched_item = column_type_width.find(lhs->dtype); int16_t width = searched_item->second; //width in bytes searched_item = column_type_width.find(stencil->dtype); int16_t stencil_width= searched_item->second; //width in bytes hipStream_t stream; hipStreamCreate(&stream); size_t n_bytes = get_number_of_bytes_for_valid(stencil->size); bit_position_iterator bit_position_iter(thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)); gdf_valid_iterator valid_iterator(thrust::detail::make_normal_iterator(thrust::device_pointer_cast(stencil->valid)),GDF_VALID_BITSIZE); //TODO: can probably make this happen with some kind of iterator so it can work on any width size //zip the stencil and the valid iterator together typedef thrust::tuple<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >,gdf_valid_iterator, bit_position_iterator > zipped_stencil_tuple; typedef thrust::zip_iterator<zipped_stencil_tuple> zipped_stencil_iterator; //what kind of shit is that you might wonder? //well basically we are zipping up an iterator to the stencil, one to the bit masks, and one which lets us get the bit position based on our index zipped_stencil_iterator zipped_stencil_iter( thrust::make_tuple( thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t * )stencil->data)), valid_iterator, thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >( thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)) )); //NOTE!!!! the output column is getting set to a specific size but we are NOT compacting the allocation, //whoever calls that should handle that if(width == 1){ thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 2){ thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 4){ thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 8){ thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; } gdf_size_type num_values = lhs->size; //TODO:BRING OVER THE BITMASK!!! //need to store a prefix sum //align to size 8 rmm::device_vector<gdf_valid_type> valid_bit_mask; //we are expanding the bit mask to an int8 because I can't envision an algorithm that operates on the bitmask that if(num_values % GDF_VALID_BITSIZE != 0){ valid_bit_mask.resize(num_values + (GDF_VALID_BITSIZE - (num_values % GDF_VALID_BITSIZE))); //align this allocation on GDF_VALID_BITSIZE so we don't have to bounds check }else{ valid_bit_mask.resize(num_values); } // doesn't require the use for a prefix sum which will have size 8 * num rows which is much larger than this typedef thrust::tuple<gdf_valid_iterator, bit_position_iterator > mask_tuple; typedef thrust::zip_iterator<mask_tuple> zipped_mask; zipped_mask zipped_mask_iter( thrust::make_tuple( valid_iterator, thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >( thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)) ) ); typedef thrust::transform_iterator<is_bit_set, zipped_mask > bit_set_iterator; bit_set_iterator bit_set_iter = thrust::make_transform_iterator<is_bit_set,zipped_mask>( zipped_mask_iter, is_bit_set() ); //copy the bitmask to device_vector of int8 thrust::copy(rmm::exec_policy(stream)->on(stream), bit_set_iter, bit_set_iter + num_values, valid_bit_mask.begin()); //remove the values that don't pass the stencil thrust::remove_if(rmm::exec_policy(stream)->on(stream),valid_bit_mask.begin(), valid_bit_mask.begin() + num_values,zipped_stencil_iter, is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); //recompact the values and store them in the output bitmask //we can group them into pieces of 8 because we aligned this earlier on when we made the device_vector thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > valid_bit_mask_group_8_iter = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) valid_bit_mask.data().get())); //you may notice that we can write out more bytes than our valid_num_bytes, this only happens when we are not aligned to GDF_VALID_BITSIZE bytes, becasue the //arrow standard requires 64 byte alignment, this is a safe assumption to make thrust::transform(rmm::exec_policy(stream)->on(stream), valid_bit_mask_group_8_iter, valid_bit_mask_group_8_iter + ((num_values + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE), thrust::detail::make_normal_iterator(thrust::device_pointer_cast(output->valid)),bit_mask_pack_op()); hipStreamSynchronize(stream); hipStreamDestroy(stream); return GDF_SUCCESS; }
424a7abe5d67f8a15b61c9775cb0db92ff5e907e.cu
/* * Copyright 2018 BlazingDB, Inc. * Copyright 2018 Felipe Aramburu <felipe@blazingdb.com> * Copyright 2018 Alexander Ocsa <alexander@blazingdb.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_runtime.h> #include <vector> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/remove.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/execution_policy.h> #include <thrust/iterator/iterator_adaptor.h> #include <thrust/iterator/transform_iterator.h> #include "cudf.h" #include "utilities/cudf_utils.h" #include "utilities/error_utils.hpp" #include "rmm/thrust_rmm_allocator.h" //std lib #include <map> //wow the freaking example from iterator_adaptpr, what a break right! template<typename Iterator> class repeat_iterator : public thrust::iterator_adaptor< repeat_iterator<Iterator>, // the first template parameter is the name of the iterator we're creating Iterator // the second template parameter is the name of the iterator we're adapting // we can use the default for the additional template parameters > { public: // shorthand for the name of the iterator_adaptor we're deriving from typedef thrust::iterator_adaptor< repeat_iterator<Iterator>, Iterator > super_t; __host__ __device__ repeat_iterator(const Iterator &x, int n) : super_t(x), begin(x), n(n) {} // befriend thrust::iterator_core_access to allow it access to the private interface below friend class thrust::iterator_core_access; private: // repeat each element of the adapted range n times unsigned int n; // used to keep track of where we began const Iterator begin; // it is private because only thrust::iterator_core_access needs access to it __host__ __device__ typename super_t::reference dereference() const { return *(begin + (this->base() - begin) / n); } }; typedef repeat_iterator<thrust::detail::normal_iterator<thrust::device_ptr<gdf_valid_type> > > gdf_valid_iterator; gdf_size_type get_number_of_bytes_for_valid (gdf_size_type column_size) { return sizeof(gdf_valid_type) * (column_size + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE; } // note: functor inherits from unary_function struct modulus_bit_width : public thrust::unary_function<gdf_size_type,gdf_size_type> { gdf_size_type n_bytes; gdf_size_type column_size; modulus_bit_width (gdf_size_type b_nytes, gdf_size_type column_size) { this->n_bytes = n_bytes; this->column_size = column_size; } __host__ __device__ gdf_size_type operator()(gdf_size_type x) const { gdf_size_type col_position = x / 8; gdf_size_type length_col = n_bytes != col_position+1 ? GDF_VALID_BITSIZE : column_size - GDF_VALID_BITSIZE * (n_bytes - 1); //return x % GDF_VALID_BITSIZE; return (length_col - 1) - (x % 8); // x << } }; struct shift_left: public thrust::unary_function<gdf_valid_type,gdf_valid_type> { gdf_valid_type num_bits; shift_left(gdf_valid_type num_bits): num_bits(num_bits){ } __host__ __device__ gdf_valid_type operator()(gdf_valid_type x) const { return x << num_bits; } }; struct shift_right: public thrust::unary_function<gdf_valid_type,gdf_valid_type> { gdf_valid_type num_bits; bool not_too_many; shift_right(gdf_valid_type num_bits, bool not_too_many) : num_bits(num_bits), not_too_many(not_too_many){ } __host__ __device__ gdf_valid_type operator()(gdf_valid_type x) const { //if you want to force the shift to be fill bits with 0 you need to use an unsigned type /*if (not_too_many) { // is the last return x; }*/ return *((unsigned char *) &x) >> num_bits; } }; struct bit_or: public thrust::unary_function<thrust::tuple<gdf_valid_type,gdf_valid_type>,gdf_valid_type> { __host__ __device__ gdf_valid_type operator()(thrust::tuple<gdf_valid_type,gdf_valid_type> x) const { return thrust::get<0>(x) | thrust::get<1>(x); } }; typedef thrust::transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> > bit_position_iterator; template<typename stencil_type> struct is_stencil_true { __host__ __device__ bool operator()(const thrust::tuple<stencil_type, gdf_valid_iterator::value_type, bit_position_iterator::value_type> value) { gdf_size_type position = thrust::get<2>(value); return ((thrust::get<1>(value) >> position) & 1) && (thrust::get<0>(value) != 0); } }; struct is_bit_set { __host__ __device__ bool operator()(const thrust::tuple< gdf_valid_iterator::value_type, bit_position_iterator::value_type> value) { gdf_size_type position = thrust::get<1>(value); return ((thrust::get<0>(value) >> position) & 1); } }; struct bit_mask_pack_op : public thrust::unary_function<int64_t,gdf_valid_type> { __host__ __device__ gdf_valid_type operator()(const int64_t expanded) { gdf_valid_type result = 0; for(unsigned int i = 0; i < GDF_VALID_BITSIZE; i++){ // 0, 8, 16, ....,48, 56 unsigned char byte = (expanded >> ( (GDF_VALID_BITSIZE - 1 - i ) * 8)); result |= (byte & 1) << i; } return (result); } }; std::map<gdf_dtype, int16_t> column_type_width = {{GDF_INT8, sizeof(int8_t)}, {GDF_INT16, sizeof(int16_t)},{GDF_INT32, sizeof(int32_t)}, {GDF_INT64, sizeof(int64_t)}, {GDF_FLOAT32, sizeof(float)}, {GDF_FLOAT64, sizeof(double)} }; //because applying a stencil only needs to know the WIDTH of a type for copying to output, we won't be making a bunch of templated version to store this but rather //storing a map from gdf_type to width //TODO: add a way for the space where we store temp bitmaps for compaction be allocated //on the outside gdf_error gdf_apply_stencil(gdf_column *lhs, gdf_column * stencil, gdf_column * output){ //OK: add a rquire here that output and lhs are the same size GDF_REQUIRE(output->size == lhs->size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(lhs->dtype == output->dtype, GDF_DTYPE_MISMATCH); GDF_REQUIRE(!lhs->valid || !lhs->null_count, GDF_VALIDITY_UNSUPPORTED); //find the width in bytes of this data type auto searched_item = column_type_width.find(lhs->dtype); int16_t width = searched_item->second; //width in bytes searched_item = column_type_width.find(stencil->dtype); int16_t stencil_width= searched_item->second; //width in bytes cudaStream_t stream; cudaStreamCreate(&stream); size_t n_bytes = get_number_of_bytes_for_valid(stencil->size); bit_position_iterator bit_position_iter(thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)); gdf_valid_iterator valid_iterator(thrust::detail::make_normal_iterator(thrust::device_pointer_cast(stencil->valid)),GDF_VALID_BITSIZE); //TODO: can probably make this happen with some kind of iterator so it can work on any width size //zip the stencil and the valid iterator together typedef thrust::tuple<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >,gdf_valid_iterator, bit_position_iterator > zipped_stencil_tuple; typedef thrust::zip_iterator<zipped_stencil_tuple> zipped_stencil_iterator; //what kind of shit is that you might wonder? //well basically we are zipping up an iterator to the stencil, one to the bit masks, and one which lets us get the bit position based on our index zipped_stencil_iterator zipped_stencil_iter( thrust::make_tuple( thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t * )stencil->data)), valid_iterator, thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >( thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)) )); //NOTE!!!! the output column is getting set to a specific size but we are NOT compacting the allocation, //whoever calls that should handle that if(width == 1){ thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 2){ thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 4){ thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; }else if(width == 8){ thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > input_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) lhs->data)); thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_start = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) output->data)); thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); output->size = output_end - output_start; } gdf_size_type num_values = lhs->size; //TODO:BRING OVER THE BITMASK!!! //need to store a prefix sum //align to size 8 rmm::device_vector<gdf_valid_type> valid_bit_mask; //we are expanding the bit mask to an int8 because I can't envision an algorithm that operates on the bitmask that if(num_values % GDF_VALID_BITSIZE != 0){ valid_bit_mask.resize(num_values + (GDF_VALID_BITSIZE - (num_values % GDF_VALID_BITSIZE))); //align this allocation on GDF_VALID_BITSIZE so we don't have to bounds check }else{ valid_bit_mask.resize(num_values); } // doesn't require the use for a prefix sum which will have size 8 * num rows which is much larger than this typedef thrust::tuple<gdf_valid_iterator, bit_position_iterator > mask_tuple; typedef thrust::zip_iterator<mask_tuple> zipped_mask; zipped_mask zipped_mask_iter( thrust::make_tuple( valid_iterator, thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >( thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size)) ) ); typedef thrust::transform_iterator<is_bit_set, zipped_mask > bit_set_iterator; bit_set_iterator bit_set_iter = thrust::make_transform_iterator<is_bit_set,zipped_mask>( zipped_mask_iter, is_bit_set() ); //copy the bitmask to device_vector of int8 thrust::copy(rmm::exec_policy(stream)->on(stream), bit_set_iter, bit_set_iter + num_values, valid_bit_mask.begin()); //remove the values that don't pass the stencil thrust::remove_if(rmm::exec_policy(stream)->on(stream),valid_bit_mask.begin(), valid_bit_mask.begin() + num_values,zipped_stencil_iter, is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >()); //recompact the values and store them in the output bitmask //we can group them into pieces of 8 because we aligned this earlier on when we made the device_vector thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > valid_bit_mask_group_8_iter = thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) valid_bit_mask.data().get())); //you may notice that we can write out more bytes than our valid_num_bytes, this only happens when we are not aligned to GDF_VALID_BITSIZE bytes, becasue the //arrow standard requires 64 byte alignment, this is a safe assumption to make thrust::transform(rmm::exec_policy(stream)->on(stream), valid_bit_mask_group_8_iter, valid_bit_mask_group_8_iter + ((num_values + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE), thrust::detail::make_normal_iterator(thrust::device_pointer_cast(output->valid)),bit_mask_pack_op()); cudaStreamSynchronize(stream); cudaStreamDestroy(stream); return GDF_SUCCESS; }
df986a253bcd9dd67c40073d7e24db5bb801ce13.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include "../defines.h" #include "../gpu.h" //#include "two-phase.h" // ! void data_initialization(ptr_Arrays HostArraysPtr, long int* t, consts def) { *t = 0; for (int i = 0; i < def.locNx; i++) for (int j = 0; j < def.locNy; j++) for (int k = 0; k < def.locNz; k++) if (is_active_point(i, j, k, def)) { // int I = local_to_global(i, 'x', def); int local = i + j * (def.locNx) + k * (def.locNx) * (def.locNy); HostArraysPtr.m[local]=def.porosity[0]; // , (def.source) , // . , if ((j == 0) && (I >= (def.Nx) / 2 - (def.source)) && (I <= (def.Nx) / 2 + (def.source)) && (k >= (def.Nz) / 2 - (def.source)) && (k <= (def.Nz) / 2 + (def.source))) { HostArraysPtr.S_n[local] = def.S_n_gr; } else { HostArraysPtr.S_n[local] = 0; } if (j == 0) { HostArraysPtr.P_w[local] = def.P_atm; } else { HostArraysPtr.P_w[local] = HostArraysPtr.P_w[local - (def.locNx)] + ro_eff_gdy(HostArraysPtr, local - (def.locNx), def); } HostArraysPtr.ro_w[local] = def.ro0_w * (1. + (def.beta_w) * (HostArraysPtr.P_w[local] - def.P_atm)); ///!!!! ! P_n HostArraysPtr.ro_n[local] = def.ro0_n * (1. + (def.beta_n) * (HostArraysPtr.P_w[local] - def.P_atm)); /* if ((HostArraysPtr.x[local]>=(def.NX)/2.*(def.h1)) && (HostArraysPtr.x[local]<=4.*(def.NX)/5.*(def.h1))) if ((HostArraysPtr.y[local]<=2./5.*def.locNy*(def.h2)) && (HostArraysPtr.y[local]>=(-1.)*HostArraysPtr.x[local]/4.+2./5.*def.locNy*(def.h2))) HostArraysPtr.media[local]=1; if ((HostArraysPtr.x[local]>=(def.NX)/5.*(def.h1)) && (HostArraysPtr.x[local]<=2.*(def.NX)/5.*(def.h1))) if ((HostArraysPtr.y[local]<=4./5.*def.locNy*(def.h2)) && (HostArraysPtr.y[local]>=3./5.*def.locNy*(def.h2))) HostArraysPtr.media[local]=1; */ /* if ((HostArraysPtr.x[local]>=2.*(def.NX)/5.*(def.h1)) && (HostArraysPtr.x[local]<=3.*(def.NX)/5.*(def.h1))) if ((HostArraysPtr.y[local]>=1./10.*def.locNy*(def.h2)) && (HostArraysPtr.y[local]<=3./10.*def.locNy*(def.h2))) HostArraysPtr.media[local]=1; */ test_nan(HostArraysPtr.S_n[local], __FILE__, __LINE__); test_nan(HostArraysPtr.P_w[local], __FILE__, __LINE__); test_nan(HostArraysPtr.m[local], __FILE__, __LINE__); } } // , NAPL P2 Xi ( ) __global__ void assign_P_Xi_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < (gpu_def->locNx)) && (j < (gpu_def->locNy)) && (k < (gpu_def->locNz)) && (device_is_active_point(i, j, k) == 1)) { int media = 0; int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); double S_n = DevArraysPtr.S_n[local]; double P_w = DevArraysPtr.P_w[local]; double S_e = (1. - S_n - gpu_def->S_wr[media]) / (1. - gpu_def->S_wr[media]); double k_w = pow(S_e, (2. + 3. * gpu_def->lambda[media]) / gpu_def->lambda[media]); double k_n = (1. - S_e) * (1. - S_e) * (1 - pow(S_e, (2. + gpu_def->lambda[media]) / gpu_def->lambda[media])); double P_k = gpu_def->P_d[media] * pow((1. - S_n - gpu_def->S_wr[media]) / (1. - gpu_def->S_wr[media]), -1. / gpu_def->lambda[media]); DevArraysPtr.P_n[local] = P_w + P_k; DevArraysPtr.Xi_w[local] = -1 * gpu_def->K[media] * k_w / gpu_def->mu_w; DevArraysPtr.Xi_n[local] = -1 * gpu_def->K[media] * k_n / gpu_def->mu_n; device_test_positive(DevArraysPtr.P_n[local], __FILE__, __LINE__); device_test_nan(DevArraysPtr.Xi_w[local], __FILE__, __LINE__); device_test_nan(DevArraysPtr.Xi_n[local], __FILE__, __LINE__); } } // ( ) __global__ void Newton_method_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < (gpu_def->locNx) - 1) && (j < gpu_def->locNy - 1) && (k < (gpu_def->locNz)) && (i != 0) && (j != 0) && (((k != 0) && (k != (gpu_def->locNz) - 1)) || ((gpu_def->locNz) < 2))) { int media = 0; int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); double S_e, S_n, P_w, P_k, AAA, F1, F2, PkS, F1P, F2P, F1S, F2S, det; for (int w = 1; w <= gpu_def->newton_iterations; w++) { S_n = DevArraysPtr.S_n[local]; P_w = DevArraysPtr.P_w[local]; S_e = (1 - S_n - gpu_def->S_wr[media]) / (1 - gpu_def->S_wr[media]); P_k = gpu_def->P_d[media] * pow(S_e, -1 / gpu_def->lambda[media]); AAA = pow(S_e, ((-1 / gpu_def->lambda[media]) - 1)); F1 = gpu_def->ro0_w * (1 + (gpu_def->beta_w) * (P_w - gpu_def->P_atm)) * (1 - S_n) - DevArraysPtr.roS_w[local]; F2 = gpu_def->ro0_n * (1 + (gpu_def->beta_n) * (P_w + P_k - gpu_def->P_atm)) * S_n - DevArraysPtr.roS_n[local]; PkS = AAA * gpu_def->P_d[media] / (gpu_def->lambda[media] * (1 - gpu_def->S_wr[media])); F1P = gpu_def->ro0_w * (gpu_def->beta_w) * (1 - S_n); F2P = gpu_def->ro0_n * (gpu_def->beta_n) * S_n; F1S = (-1) * gpu_def->ro0_w * (1 + (gpu_def->beta_w) * (P_w - gpu_def->P_atm)); F2S = gpu_def->ro0_n * (1 + (gpu_def->beta_n) * (P_w + P_k - gpu_def->P_atm + (S_n * PkS))); det = F1P * F2S - F1S * F2P; DevArraysPtr.P_w[local] = P_w - (1 / det) * (F2S * F1 - F1S * F2); DevArraysPtr.S_n[local] = S_n - (1 / det) * (F1P * F2 - F2P * F1); } device_test_positive(DevArraysPtr.P_w[local], __FILE__, __LINE__); device_test_S(DevArraysPtr.S_n[local], __FILE__, __LINE__); } } // __global__ void Border_S_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < gpu_def->locNx) && (j < gpu_def->locNy) && (k < gpu_def->locNz)) if (((i == 0) || (i == (gpu_def->locNx) - 1) || (j == 0) || (j == (gpu_def->locNy) - 1) || (((k == 0) || (k == (gpu_def->locNz) - 1)) && ((gpu_def->locNz) >= 2))) && (device_is_active_point(i, j, k) == 1)) { int local1 = device_set_boundary_basic_coordinate(i, j, k); int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); if ((j != 0) || ((gpu_def->source) <= 0)) { DevArraysPtr.S_n[local] = DevArraysPtr.S_n[local1]; } if ((j == 0) && ((gpu_def->source) > 0)) { int I = device_local_to_global(i, 'x'); if ((I >= (gpu_def->Nx) / 2 - (gpu_def->source)) && (I <= (gpu_def->Nx) / 2 + (gpu_def->source)) && (k >= (gpu_def->Nz) / 2 - (gpu_def->source)) && (k <= (gpu_def->Nz) / 2 + (gpu_def->source))) { DevArraysPtr.S_n[local] = gpu_def->S_n_gr; } else //DevArraysPtr.S_n[local] = 0; { DevArraysPtr.S_n[local] = DevArraysPtr.S_n[local1]; } } device_test_S(DevArraysPtr.S_n[local], __FILE__, __LINE__); } } __global__ void Border_P_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < gpu_def->locNx) && (j < gpu_def->locNy) && (k < gpu_def->locNz)) if (((i == 0) || (i == (gpu_def->locNx) - 1) || (j == 0) || (j == (gpu_def->locNy) - 1) || (((k == 0) || (k == (gpu_def->locNz) - 1)) && ((gpu_def->locNz) >= 2))) && (device_is_active_point(i, j, k) == 1)) { int local1 = device_set_boundary_basic_coordinate(i, j, k); int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); if ((j != 0) && (j != (gpu_def->locNy) - 1)) { DevArraysPtr.P_w[local] = DevArraysPtr.P_w[local1]; } else if (j == 0) { DevArraysPtr.P_w[local] = gpu_def->P_atm; } else { DevArraysPtr.P_w[local] = DevArraysPtr.P_w[local1] + device_ro_eff_gdy(DevArraysPtr, local1); } device_test_positive(DevArraysPtr.P_w[local], __FILE__, __LINE__); } } // __device__ int device_is_injection_well(int i, int j, int k) { return 0; } // __device__ int device_is_output_well(int i, int j, int k) { return 0; } // / q_i __device__ void device_wells_q(ptr_Arrays DevArraysPtr, int i, int j, int k, double* q_w, double* q_n, double* q_g) { *q_w = 0.0; *q_g = 0.0; *q_n = 0.0; }
df986a253bcd9dd67c40073d7e24db5bb801ce13.cu
//#include "../defines.h" #include "../gpu.h" //#include "two-phase.h" // Заглушка! Убрать как функция будет перенесена void data_initialization(ptr_Arrays HostArraysPtr, long int* t, consts def) { *t = 0; for (int i = 0; i < def.locNx; i++) for (int j = 0; j < def.locNy; j++) for (int k = 0; k < def.locNz; k++) if (is_active_point(i, j, k, def)) { // Преобразование локальных координат процессора к глобальным int I = local_to_global(i, 'x', def); int local = i + j * (def.locNx) + k * (def.locNx) * (def.locNy); HostArraysPtr.m[local]=def.porosity[0]; // Если точка на верхней границе, не далее (def.source) точек от центра, // то в ней начальная насыщенность. Иначе, нулевая if ((j == 0) && (I >= (def.Nx) / 2 - (def.source)) && (I <= (def.Nx) / 2 + (def.source)) && (k >= (def.Nz) / 2 - (def.source)) && (k <= (def.Nz) / 2 + (def.source))) { HostArraysPtr.S_n[local] = def.S_n_gr; } else { HostArraysPtr.S_n[local] = 0; } if (j == 0) { HostArraysPtr.P_w[local] = def.P_atm; } else { HostArraysPtr.P_w[local] = HostArraysPtr.P_w[local - (def.locNx)] + ro_eff_gdy(HostArraysPtr, local - (def.locNx), def); } HostArraysPtr.ro_w[local] = def.ro0_w * (1. + (def.beta_w) * (HostArraysPtr.P_w[local] - def.P_atm)); ///!!!! Не учитываются капиллярные силы! Или надо считать перед этим шагом P_n HostArraysPtr.ro_n[local] = def.ro0_n * (1. + (def.beta_n) * (HostArraysPtr.P_w[local] - def.P_atm)); /* if ((HostArraysPtr.x[local]>=(def.NX)/2.*(def.h1)) && (HostArraysPtr.x[local]<=4.*(def.NX)/5.*(def.h1))) if ((HostArraysPtr.y[local]<=2./5.*def.locNy*(def.h2)) && (HostArraysPtr.y[local]>=(-1.)*HostArraysPtr.x[local]/4.+2./5.*def.locNy*(def.h2))) HostArraysPtr.media[local]=1; if ((HostArraysPtr.x[local]>=(def.NX)/5.*(def.h1)) && (HostArraysPtr.x[local]<=2.*(def.NX)/5.*(def.h1))) if ((HostArraysPtr.y[local]<=4./5.*def.locNy*(def.h2)) && (HostArraysPtr.y[local]>=3./5.*def.locNy*(def.h2))) HostArraysPtr.media[local]=1; */ /* if ((HostArraysPtr.x[local]>=2.*(def.NX)/5.*(def.h1)) && (HostArraysPtr.x[local]<=3.*(def.NX)/5.*(def.h1))) if ((HostArraysPtr.y[local]>=1./10.*def.locNy*(def.h2)) && (HostArraysPtr.y[local]<=3./10.*def.locNy*(def.h2))) HostArraysPtr.media[local]=1; */ test_nan(HostArraysPtr.S_n[local], __FILE__, __LINE__); test_nan(HostArraysPtr.P_w[local], __FILE__, __LINE__); test_nan(HostArraysPtr.m[local], __FILE__, __LINE__); } } // Расчет плотностей, давления NAPL P2 и Xi в каждой точке сетки (независимо от остальных точек) __global__ void assign_P_Xi_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < (gpu_def->locNx)) && (j < (gpu_def->locNy)) && (k < (gpu_def->locNz)) && (device_is_active_point(i, j, k) == 1)) { int media = 0; int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); double S_n = DevArraysPtr.S_n[local]; double P_w = DevArraysPtr.P_w[local]; double S_e = (1. - S_n - gpu_def->S_wr[media]) / (1. - gpu_def->S_wr[media]); double k_w = pow(S_e, (2. + 3. * gpu_def->lambda[media]) / gpu_def->lambda[media]); double k_n = (1. - S_e) * (1. - S_e) * (1 - pow(S_e, (2. + gpu_def->lambda[media]) / gpu_def->lambda[media])); double P_k = gpu_def->P_d[media] * pow((1. - S_n - gpu_def->S_wr[media]) / (1. - gpu_def->S_wr[media]), -1. / gpu_def->lambda[media]); DevArraysPtr.P_n[local] = P_w + P_k; DevArraysPtr.Xi_w[local] = -1 * gpu_def->K[media] * k_w / gpu_def->mu_w; DevArraysPtr.Xi_n[local] = -1 * gpu_def->K[media] * k_n / gpu_def->mu_n; device_test_positive(DevArraysPtr.P_n[local], __FILE__, __LINE__); device_test_nan(DevArraysPtr.Xi_w[local], __FILE__, __LINE__); device_test_nan(DevArraysPtr.Xi_n[local], __FILE__, __LINE__); } } // Метод Ньютона для каждой точки сетки (независимо от остальных точек) __global__ void Newton_method_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < (gpu_def->locNx) - 1) && (j < gpu_def->locNy - 1) && (k < (gpu_def->locNz)) && (i != 0) && (j != 0) && (((k != 0) && (k != (gpu_def->locNz) - 1)) || ((gpu_def->locNz) < 2))) { int media = 0; int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); double S_e, S_n, P_w, P_k, AAA, F1, F2, PkS, F1P, F2P, F1S, F2S, det; for (int w = 1; w <= gpu_def->newton_iterations; w++) { S_n = DevArraysPtr.S_n[local]; P_w = DevArraysPtr.P_w[local]; S_e = (1 - S_n - gpu_def->S_wr[media]) / (1 - gpu_def->S_wr[media]); P_k = gpu_def->P_d[media] * pow(S_e, -1 / gpu_def->lambda[media]); AAA = pow(S_e, ((-1 / gpu_def->lambda[media]) - 1)); F1 = gpu_def->ro0_w * (1 + (gpu_def->beta_w) * (P_w - gpu_def->P_atm)) * (1 - S_n) - DevArraysPtr.roS_w[local]; F2 = gpu_def->ro0_n * (1 + (gpu_def->beta_n) * (P_w + P_k - gpu_def->P_atm)) * S_n - DevArraysPtr.roS_n[local]; PkS = AAA * gpu_def->P_d[media] / (gpu_def->lambda[media] * (1 - gpu_def->S_wr[media])); F1P = gpu_def->ro0_w * (gpu_def->beta_w) * (1 - S_n); F2P = gpu_def->ro0_n * (gpu_def->beta_n) * S_n; F1S = (-1) * gpu_def->ro0_w * (1 + (gpu_def->beta_w) * (P_w - gpu_def->P_atm)); F2S = gpu_def->ro0_n * (1 + (gpu_def->beta_n) * (P_w + P_k - gpu_def->P_atm + (S_n * PkS))); det = F1P * F2S - F1S * F2P; DevArraysPtr.P_w[local] = P_w - (1 / det) * (F2S * F1 - F1S * F2); DevArraysPtr.S_n[local] = S_n - (1 / det) * (F1P * F2 - F2P * F1); } device_test_positive(DevArraysPtr.P_w[local], __FILE__, __LINE__); device_test_S(DevArraysPtr.S_n[local], __FILE__, __LINE__); } } // Задание граничных условий __global__ void Border_S_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < gpu_def->locNx) && (j < gpu_def->locNy) && (k < gpu_def->locNz)) if (((i == 0) || (i == (gpu_def->locNx) - 1) || (j == 0) || (j == (gpu_def->locNy) - 1) || (((k == 0) || (k == (gpu_def->locNz) - 1)) && ((gpu_def->locNz) >= 2))) && (device_is_active_point(i, j, k) == 1)) { int local1 = device_set_boundary_basic_coordinate(i, j, k); int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); if ((j != 0) || ((gpu_def->source) <= 0)) { DevArraysPtr.S_n[local] = DevArraysPtr.S_n[local1]; } if ((j == 0) && ((gpu_def->source) > 0)) { int I = device_local_to_global(i, 'x'); if ((I >= (gpu_def->Nx) / 2 - (gpu_def->source)) && (I <= (gpu_def->Nx) / 2 + (gpu_def->source)) && (k >= (gpu_def->Nz) / 2 - (gpu_def->source)) && (k <= (gpu_def->Nz) / 2 + (gpu_def->source))) { DevArraysPtr.S_n[local] = gpu_def->S_n_gr; } else //DevArraysPtr.S_n[local] = 0; { DevArraysPtr.S_n[local] = DevArraysPtr.S_n[local1]; } } device_test_S(DevArraysPtr.S_n[local], __FILE__, __LINE__); } } __global__ void Border_P_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < gpu_def->locNx) && (j < gpu_def->locNy) && (k < gpu_def->locNz)) if (((i == 0) || (i == (gpu_def->locNx) - 1) || (j == 0) || (j == (gpu_def->locNy) - 1) || (((k == 0) || (k == (gpu_def->locNz) - 1)) && ((gpu_def->locNz) >= 2))) && (device_is_active_point(i, j, k) == 1)) { int local1 = device_set_boundary_basic_coordinate(i, j, k); int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); if ((j != 0) && (j != (gpu_def->locNy) - 1)) { DevArraysPtr.P_w[local] = DevArraysPtr.P_w[local1]; } else if (j == 0) { DevArraysPtr.P_w[local] = gpu_def->P_atm; } else { DevArraysPtr.P_w[local] = DevArraysPtr.P_w[local1] + device_ro_eff_gdy(DevArraysPtr, local1); } device_test_positive(DevArraysPtr.P_w[local], __FILE__, __LINE__); } } // Является ли точка нагнетательной скважиной __device__ int device_is_injection_well(int i, int j, int k) { return 0; } // Является ли точка добывающей скважиной __device__ int device_is_output_well(int i, int j, int k) { return 0; } // Устанавливает значения втекаемых/вытекаемых жидкостей q_i на скважинах __device__ void device_wells_q(ptr_Arrays DevArraysPtr, int i, int j, int k, double* q_w, double* q_n, double* q_g) { *q_w = 0.0; *q_g = 0.0; *q_n = 0.0; }
8311be16674950aea08babf95bb5d513ad3226f3.hip
// !!! This is a file automatically generated by hipify!!! /* * Please write your name and net ID below * * Last name: Bora * First name: Anuj * Net ID: aab688 * */ /* * This file contains the code for doing the heat distribution problem. * You do not need to modify anything except starting gpu_heat_dist() at the bottom * of this file. * In gpu_heat_dist() you can organize your data structure and the call to your * kernel(s) that you need to write too. * * You compile with: * nvcc -o heatdist heatdist.cu */ #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <time.h> /* To index element (i,j) of a 2D array stored as 1D */ #define index(i, j, N) ((i)*(N)) + (j) #define BLOCKSIZE 256 /*****************************************************************/ // Function declarations: Feel free to add any functions you want. int gpu_levenshtein( char * , char * , int , int ); void printMatrix(int*, unsigned int); int getBlocks(int , int ); /*****************************************************************/ /*****************************************************************/ __global__ void warmUpGPU() { // do nothing } __global__ void antiparallelUT(unsigned short* d_A, int step, int len1, int len2, const char* d_word1, const char* d_word2, int* d_result) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = step-i; if (i == 0 && j <= len2) { d_A[index(i + j, j, len2 + 1)] = j; } if (j == 0 && i <= len2) { d_A[index(i + j, j, len2 + 1)] = i; } if (i <= len1 && j <= len2 && i>=1 && j>=1) { //printf("[%d][%d]\n", i, j); //d_A[i*N+j] = 1; int delete_count; int insert; int substitute; char c2; char c1; c1 = d_word1[i-1]; c2 = d_word2[j-1]; int score = 1; if (c1 == c2) { score = 0; } /* delete_count = d_A[index(i - 1, j, len2 + 1)] + 1; insert = d_A[index(i, j - 1, len2 + 1)] + 1; substitute = d_A[index(i - 1, j - 1, len2 + 1)] + score; */ int old_x = i - 1; int y = j; int x = old_x + y; if (x > len1 ) { x = x - len1 - 1; } delete_count = d_A[index(x, y, len2 + 1)] + 1; old_x = i - 1; y = j - 1; x = old_x + y; if (x > len1 ) { x = x - len1 - 1; } insert = d_A[index(x, y, len2 + 1)] + score; old_x = i; y = j - 1; x = old_x + y; if (x > len1 ) { x = x - len1 - 1; } substitute = d_A[index(x, y, len2 + 1)] + 1; int min; if (delete_count < insert) { min = delete_count; } else { min = insert; } if (substitute < min) { min = substitute; } x = i + j; y = j; if (x > len1 ) { x = x - len1 - 1; } d_A[index(x, y, len2 + 1)] = min; if (i == len1 && j == len2) { //printf("updating result..."); //printf("\nmin = %d\n", min); d_result[0] = min; } } //__syncthreads(); } // Print void printMatrix(int* playground, int len1, int len2) { for (int i = 0; i < len1; i++) { for (int j = 0; j < len2; j++) { printf("%d ", playground[index(i,j,len2)]); } printf("\n "); } } int getBlocks(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } /*****************************************************************/ int main(int argc, char * argv[]) { //char string1[4] = {'a', 'n', 'u', 'j'}; //char string2[5] = {'a', 'n', 't', 'j', 'b'}; int size =32000; char string1[size]; char string2[size]; for (int i = 0; i < size; i++) { string1[i] = 'a'; string2[i] = 'b'; } //string1[0] = 'b'; int dis = gpu_levenshtein(string1, string2, size, size); printf("Result = %d", dis); return 0; } /***************** The GPU version: Write your code here *********************/ int gpu_levenshtein( char * word1, char * word2, int len1, int len2) { hipLaunchKernelGGL(( warmUpGPU), dim3(1), dim3(1), 0, 0, ); int N = len1 + 1; int size = (N) * (N) * sizeof(unsigned short); int word1_size = len1 * sizeof(char); int word2_size = len2 * sizeof(char); int* result_host = (int*)malloc(sizeof(int)); result_host[0] = 0; unsigned short *zero_matrix; unsigned short *d_A; int *d_result; char *d_word1, *d_word2; zero_matrix = (unsigned short *)calloc((N) * (N), sizeof(unsigned short)); if(zero_matrix == NULL) { printf("Memory allocation failed on CPU"); } // int i, j; /* for(i = 0; i <= len1; i++) { for (j = 0; j <= len2; j++) { zero_matrix[index(i, j, (len2 + 1))] = 0; } } */ /* for (i = 0; i < (2 * N)-1; i++) { for (j = 0; j < N; j++) { zero_matrix[index(i, 0, (len2 + 1))] = 0; if (i == 0) { zero_matrix[index(0, j, (len2 + 1))] = j; } if (j == 0) { zero_matrix[index(i, 0, (len2 + 1))] = i; } } }*/ //printMatrix(zero_matrix, ((2 * N)-1), N); /* for (i = 0; i <= len1; i++) { zero_matrix[index(i, 0, (len2 + 1))] = i; } for (i = 0; i <= len2; i++) { zero_matrix[index(0, i, (len2 + 1))] = i; } */ if ( hipSuccess != hipMalloc((void **) &d_A, size) ) { printf( "Error in allocating memory on GPU!!\n" ); } hipMalloc((void **) &d_result, sizeof(int)); hipMalloc((void **) &d_word1, word1_size); hipMalloc((void **) &d_word2, word2_size); hipMemcpy(d_A, zero_matrix, size, hipMemcpyHostToDevice); hipMemcpy(d_result, result_host, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_word1, word1, word1_size, hipMemcpyHostToDevice); hipMemcpy(d_word2, word2, word2_size, hipMemcpyHostToDevice); int step = 0; for ( step = 0; step < (2 * N) - 1; step++) { dim3 dimBlock(BLOCKSIZE); dim3 dimGrid(getBlocks(step, BLOCKSIZE)); if (step == 0) { hipLaunchKernelGGL(( antiparallelUT), dim3(1),dim3(dimBlock.x), 0, 0, d_A,step,len1, len2, d_word1, d_word2, d_result); } else { hipLaunchKernelGGL(( antiparallelUT), dim3(dimGrid.x),dim3(dimBlock.x), 0, 0, d_A,step,len1, len2, d_word1, d_word2, d_result); } } // Step 3 : Bring result back to host //hipMemcpy(zero_matrix, d_A, size, hipMemcpyDeviceToHost); //int *result_host = 0; hipMemcpy(result_host, d_result, sizeof(int), hipMemcpyDeviceToHost); // Step 4 : Free device memory hipFree(d_A); hipFree(d_word1); hipFree(d_word2); //printf("\n\n\n"); //printMatrix(zero_matrix, N, N); //printMatrix(flags, len1, len2); //printf("%d \n", index(len1, len2, len2 + 1)); //int result = zero_matrix[index(len1, len2, (len2 + 1))]; free(zero_matrix); return result_host[0]; }
8311be16674950aea08babf95bb5d513ad3226f3.cu
/* * Please write your name and net ID below * * Last name: Bora * First name: Anuj * Net ID: aab688 * */ /* * This file contains the code for doing the heat distribution problem. * You do not need to modify anything except starting gpu_heat_dist() at the bottom * of this file. * In gpu_heat_dist() you can organize your data structure and the call to your * kernel(s) that you need to write too. * * You compile with: * nvcc -o heatdist heatdist.cu */ #include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <time.h> /* To index element (i,j) of a 2D array stored as 1D */ #define index(i, j, N) ((i)*(N)) + (j) #define BLOCKSIZE 256 /*****************************************************************/ // Function declarations: Feel free to add any functions you want. int gpu_levenshtein( char * , char * , int , int ); void printMatrix(int*, unsigned int); int getBlocks(int , int ); /*****************************************************************/ /*****************************************************************/ __global__ void warmUpGPU() { // do nothing } __global__ void antiparallelUT(unsigned short* d_A, int step, int len1, int len2, const char* d_word1, const char* d_word2, int* d_result) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = step-i; if (i == 0 && j <= len2) { d_A[index(i + j, j, len2 + 1)] = j; } if (j == 0 && i <= len2) { d_A[index(i + j, j, len2 + 1)] = i; } if (i <= len1 && j <= len2 && i>=1 && j>=1) { //printf("[%d][%d]\n", i, j); //d_A[i*N+j] = 1; int delete_count; int insert; int substitute; char c2; char c1; c1 = d_word1[i-1]; c2 = d_word2[j-1]; int score = 1; if (c1 == c2) { score = 0; } /* delete_count = d_A[index(i - 1, j, len2 + 1)] + 1; insert = d_A[index(i, j - 1, len2 + 1)] + 1; substitute = d_A[index(i - 1, j - 1, len2 + 1)] + score; */ int old_x = i - 1; int y = j; int x = old_x + y; if (x > len1 ) { x = x - len1 - 1; } delete_count = d_A[index(x, y, len2 + 1)] + 1; old_x = i - 1; y = j - 1; x = old_x + y; if (x > len1 ) { x = x - len1 - 1; } insert = d_A[index(x, y, len2 + 1)] + score; old_x = i; y = j - 1; x = old_x + y; if (x > len1 ) { x = x - len1 - 1; } substitute = d_A[index(x, y, len2 + 1)] + 1; int min; if (delete_count < insert) { min = delete_count; } else { min = insert; } if (substitute < min) { min = substitute; } x = i + j; y = j; if (x > len1 ) { x = x - len1 - 1; } d_A[index(x, y, len2 + 1)] = min; if (i == len1 && j == len2) { //printf("updating result..."); //printf("\nmin = %d\n", min); d_result[0] = min; } } //__syncthreads(); } // Print void printMatrix(int* playground, int len1, int len2) { for (int i = 0; i < len1; i++) { for (int j = 0; j < len2; j++) { printf("%d ", playground[index(i,j,len2)]); } printf("\n "); } } int getBlocks(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } /*****************************************************************/ int main(int argc, char * argv[]) { //char string1[4] = {'a', 'n', 'u', 'j'}; //char string2[5] = {'a', 'n', 't', 'j', 'b'}; int size =32000; char string1[size]; char string2[size]; for (int i = 0; i < size; i++) { string1[i] = 'a'; string2[i] = 'b'; } //string1[0] = 'b'; int dis = gpu_levenshtein(string1, string2, size, size); printf("Result = %d", dis); return 0; } /***************** The GPU version: Write your code here *********************/ int gpu_levenshtein( char * word1, char * word2, int len1, int len2) { warmUpGPU<<<1, 1>>>(); int N = len1 + 1; int size = (N) * (N) * sizeof(unsigned short); int word1_size = len1 * sizeof(char); int word2_size = len2 * sizeof(char); int* result_host = (int*)malloc(sizeof(int)); result_host[0] = 0; unsigned short *zero_matrix; unsigned short *d_A; int *d_result; char *d_word1, *d_word2; zero_matrix = (unsigned short *)calloc((N) * (N), sizeof(unsigned short)); if(zero_matrix == NULL) { printf("Memory allocation failed on CPU"); } // int i, j; /* for(i = 0; i <= len1; i++) { for (j = 0; j <= len2; j++) { zero_matrix[index(i, j, (len2 + 1))] = 0; } } */ /* for (i = 0; i < (2 * N)-1; i++) { for (j = 0; j < N; j++) { zero_matrix[index(i, 0, (len2 + 1))] = 0; if (i == 0) { zero_matrix[index(0, j, (len2 + 1))] = j; } if (j == 0) { zero_matrix[index(i, 0, (len2 + 1))] = i; } } }*/ //printMatrix(zero_matrix, ((2 * N)-1), N); /* for (i = 0; i <= len1; i++) { zero_matrix[index(i, 0, (len2 + 1))] = i; } for (i = 0; i <= len2; i++) { zero_matrix[index(0, i, (len2 + 1))] = i; } */ if ( cudaSuccess != cudaMalloc((void **) &d_A, size) ) { printf( "Error in allocating memory on GPU!!\n" ); } cudaMalloc((void **) &d_result, sizeof(int)); cudaMalloc((void **) &d_word1, word1_size); cudaMalloc((void **) &d_word2, word2_size); cudaMemcpy(d_A, zero_matrix, size, cudaMemcpyHostToDevice); cudaMemcpy(d_result, result_host, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_word1, word1, word1_size, cudaMemcpyHostToDevice); cudaMemcpy(d_word2, word2, word2_size, cudaMemcpyHostToDevice); int step = 0; for ( step = 0; step < (2 * N) - 1; step++) { dim3 dimBlock(BLOCKSIZE); dim3 dimGrid(getBlocks(step, BLOCKSIZE)); if (step == 0) { antiparallelUT<<<1,dimBlock.x>>>(d_A,step,len1, len2, d_word1, d_word2, d_result); } else { antiparallelUT<<<dimGrid.x,dimBlock.x>>>(d_A,step,len1, len2, d_word1, d_word2, d_result); } } // Step 3 : Bring result back to host //cudaMemcpy(zero_matrix, d_A, size, cudaMemcpyDeviceToHost); //int *result_host = 0; cudaMemcpy(result_host, d_result, sizeof(int), cudaMemcpyDeviceToHost); // Step 4 : Free device memory cudaFree(d_A); cudaFree(d_word1); cudaFree(d_word2); //printf("\n\n\n"); //printMatrix(zero_matrix, N, N); //printMatrix(flags, len1, len2); //printf("%d \n", index(len1, len2, len2 + 1)); //int result = zero_matrix[index(len1, len2, (len2 + 1))]; free(zero_matrix); return result_host[0]; }
73a2c94a34d7b0d77db0208dce627e3092843b13.hip
// !!! This is a file automatically generated by hipify!!! #include "BvhPrimitiveNode.h" #include <cassert> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include "system\CudaDevice\CudaKernelLauncher.cu" namespace mn { BvhPrimitiveArray::BvhPrimitiveArray() {} BvhPrimitiveArray::~BvhPrimitiveArray() {} void BvhPrimitiveArray::setup(uint primSize) { _bvArray.setup(primSize); _primSize = primSize; /// build attribs checkCudaErrors(hipMalloc((void**)&_attribs[MTCODE], sizeof(MCSize)*primSize)); checkCudaErrors(hipMalloc((void**)&_attribs[IDX], sizeof(int)*primSize)); checkCudaErrors(hipMalloc((void**)&_attribs[VIDA], sizeof(int)*primSize)); checkCudaErrors(hipMalloc((void**)&_attribs[VIDB], sizeof(int)*primSize)); checkCudaErrors(hipMalloc((void**)&_attribs[VIDC], sizeof(int)*primSize)); checkCudaErrors(hipMalloc((void**)&_attribs[TYPE], sizeof(uint)*primSize)); checkCudaErrors(hipMalloc((void**)&_attribs[EXT_MARK], sizeof(uint)*primSize)); checkCudaErrors(hipMalloc((void**)&_attribs[EXT_NO], sizeof(int)*primSize)); /// build ports portptr(COMPLETE) = new BvhPrimitiveCompletePort(_bvArray.portobj<0>()); /// link ports port<COMPLETE>()->link(_attribs, MTCODE); } void BvhPrimitiveArray::cleanup() { /// clean attribs for (int i = 0; i < NUM_ATTRIBS; i++) checkCudaErrors(hipFree(_attribs[i])); /// clean ports delete port<COMPLETE>(); /// _bvArray.cleanup(); } void BvhPrimitiveArray::gather(int size, const int* gatherPos, BvhPrimitiveArray& to) { _bvArray.gather(size, gatherPos, to._bvArray); recordLaunch("GatherPrims", (size + 255) / 256, 256, 0, gatherPrims, size, gatherPos, portobj<0>(), to.portobj<0>()); } void BvhPrimitiveArray::scatter(int size, const int* scatterPos, BvhPrimitiveArray& to) { _bvArray.scatter(size, scatterPos, to._bvArray); recordLaunch("ScatterPrims", (size + 255) / 256, 256, 0, scatterPrims, size, scatterPos, portobj<0>(), to.portobj<0>()); } void*& BvhPrimitiveArray::portptr(EnumBvhPrimPorts no) { assert(no >= COMPLETE && no < NUM_PORTS); return _ports[no]; } __global__ void gatherPrims(int size, const int* gatherPos, BvhPrimitiveCompletePort from, BvhPrimitiveCompletePort to) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; int ori = gatherPos[idx]; to.mtcode(idx) = from.mtcode(ori); to.idx(idx) = from.idx(ori); to.vida(idx) = from.vida(ori); to.vidb(idx) = from.vidb(ori); to.vidc(idx) = from.vidc(ori); to.type(idx) = from.type(ori); } __global__ void scatterPrims(int size, const int* scatterPos, BvhPrimitiveCompletePort from, BvhPrimitiveCompletePort to) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; int tar = scatterPos[idx]; to.mtcode(tar) = from.mtcode(idx); to.idx(tar) = from.idx(idx); to.vida(tar) = from.vida(idx); to.vidb(tar) = from.vidb(idx); to.vidc(tar) = from.vidc(idx); to.type(tar) = from.type(idx); } }
73a2c94a34d7b0d77db0208dce627e3092843b13.cu
#include "BvhPrimitiveNode.h" #include <cassert> #include <cuda_runtime.h> #include <helper_cuda.h> #include "system\CudaDevice\CudaKernelLauncher.cu" namespace mn { BvhPrimitiveArray::BvhPrimitiveArray() {} BvhPrimitiveArray::~BvhPrimitiveArray() {} void BvhPrimitiveArray::setup(uint primSize) { _bvArray.setup(primSize); _primSize = primSize; /// build attribs checkCudaErrors(cudaMalloc((void**)&_attribs[MTCODE], sizeof(MCSize)*primSize)); checkCudaErrors(cudaMalloc((void**)&_attribs[IDX], sizeof(int)*primSize)); checkCudaErrors(cudaMalloc((void**)&_attribs[VIDA], sizeof(int)*primSize)); checkCudaErrors(cudaMalloc((void**)&_attribs[VIDB], sizeof(int)*primSize)); checkCudaErrors(cudaMalloc((void**)&_attribs[VIDC], sizeof(int)*primSize)); checkCudaErrors(cudaMalloc((void**)&_attribs[TYPE], sizeof(uint)*primSize)); checkCudaErrors(cudaMalloc((void**)&_attribs[EXT_MARK], sizeof(uint)*primSize)); checkCudaErrors(cudaMalloc((void**)&_attribs[EXT_NO], sizeof(int)*primSize)); /// build ports portptr(COMPLETE) = new BvhPrimitiveCompletePort(_bvArray.portobj<0>()); /// link ports port<COMPLETE>()->link(_attribs, MTCODE); } void BvhPrimitiveArray::cleanup() { /// clean attribs for (int i = 0; i < NUM_ATTRIBS; i++) checkCudaErrors(cudaFree(_attribs[i])); /// clean ports delete port<COMPLETE>(); /// _bvArray.cleanup(); } void BvhPrimitiveArray::gather(int size, const int* gatherPos, BvhPrimitiveArray& to) { _bvArray.gather(size, gatherPos, to._bvArray); recordLaunch("GatherPrims", (size + 255) / 256, 256, 0, gatherPrims, size, gatherPos, portobj<0>(), to.portobj<0>()); } void BvhPrimitiveArray::scatter(int size, const int* scatterPos, BvhPrimitiveArray& to) { _bvArray.scatter(size, scatterPos, to._bvArray); recordLaunch("ScatterPrims", (size + 255) / 256, 256, 0, scatterPrims, size, scatterPos, portobj<0>(), to.portobj<0>()); } void*& BvhPrimitiveArray::portptr(EnumBvhPrimPorts no) { assert(no >= COMPLETE && no < NUM_PORTS); return _ports[no]; } __global__ void gatherPrims(int size, const int* gatherPos, BvhPrimitiveCompletePort from, BvhPrimitiveCompletePort to) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; int ori = gatherPos[idx]; to.mtcode(idx) = from.mtcode(ori); to.idx(idx) = from.idx(ori); to.vida(idx) = from.vida(ori); to.vidb(idx) = from.vidb(ori); to.vidc(idx) = from.vidc(ori); to.type(idx) = from.type(ori); } __global__ void scatterPrims(int size, const int* scatterPos, BvhPrimitiveCompletePort from, BvhPrimitiveCompletePort to) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; int tar = scatterPos[idx]; to.mtcode(tar) = from.mtcode(idx); to.idx(tar) = from.idx(idx); to.vida(tar) = from.vida(idx); to.vidb(tar) = from.vidb(idx); to.vidc(tar) = from.vidc(idx); to.type(tar) = from.type(idx); } }
bb4deeee80dfe0bfa6cc9c009b9e88b91667a38b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel_hip.cuh" #include <thrust/scan.h> __global__ void expKernel(numeric_t* out, numeric_t* in, size_t N) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < N) { out[x] = exp(in[x]); } } __global__ void intKernel(numeric_t* out, numeric_t* in, numeric_t delta, size_t N) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < N) { out[x] = in[x] * delta; if (x % 1 == 0) { //printf("intKernel %d f(%f) = %f\n", x, f[x], out[x]); } } } __global__ void sumKernel(numeric_t* out, numeric_t* f, numeric_t* blockSum, size_t N) { int tid = threadIdx.x; int off = blockIdx.x * blockDim.x; int i = blockIdx.x * blockDim.x + threadIdx.x; numeric_t sum = 0.0; for (int j = off; j < off + tid; j++) { sum += f[j]; } out[i] == sum; if (threadIdx.x == blockDim.x - 1) { blockSum[blockIdx.x] = sum; } } __global__ void sumBlocksKernel(numeric_t* sums, numeric_t* blockSum, size_t N) { int i = blockIdx.x * blockDim.x + threadIdx.x; numeric_t sum = 0; for (int j = 0; j < blockIdx.x; j++) { sum += blockSum[j]; } sums[i] = sums[i] + sum; } __global__ void mulVec(numeric_t* out, numeric_t* in, numeric_t scalar, size_t N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { out[i] = exp(sin(sqrt(in[i]))); } }
bb4deeee80dfe0bfa6cc9c009b9e88b91667a38b.cu
#include "kernel.cuh" #include <thrust/scan.h> __global__ void expKernel(numeric_t* out, numeric_t* in, size_t N) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < N) { out[x] = exp(in[x]); } } __global__ void intKernel(numeric_t* out, numeric_t* in, numeric_t delta, size_t N) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < N) { out[x] = in[x] * delta; if (x % 1 == 0) { //printf("intKernel %d f(%f) = %f\n", x, f[x], out[x]); } } } __global__ void sumKernel(numeric_t* out, numeric_t* f, numeric_t* blockSum, size_t N) { int tid = threadIdx.x; int off = blockIdx.x * blockDim.x; int i = blockIdx.x * blockDim.x + threadIdx.x; numeric_t sum = 0.0; for (int j = off; j < off + tid; j++) { sum += f[j]; } out[i] == sum; if (threadIdx.x == blockDim.x - 1) { blockSum[blockIdx.x] = sum; } } __global__ void sumBlocksKernel(numeric_t* sums, numeric_t* blockSum, size_t N) { int i = blockIdx.x * blockDim.x + threadIdx.x; numeric_t sum = 0; for (int j = 0; j < blockIdx.x; j++) { sum += blockSum[j]; } sums[i] = sums[i] + sum; } __global__ void mulVec(numeric_t* out, numeric_t* in, numeric_t scalar, size_t N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { out[i] = exp(sin(sqrt(in[i]))); } }
6dba64f22cc853daf4af2fc763ed91a3d8ffc03c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_x3; int xdim0_advec_mom_kernel_x3_h = -1; __constant__ int ydim0_advec_mom_kernel_x3; int ydim0_advec_mom_kernel_x3_h = -1; __constant__ int xdim1_advec_mom_kernel_x3; int xdim1_advec_mom_kernel_x3_h = -1; __constant__ int ydim1_advec_mom_kernel_x3; int ydim1_advec_mom_kernel_x3_h = -1; __constant__ int xdim2_advec_mom_kernel_x3; int xdim2_advec_mom_kernel_x3_h = -1; __constant__ int ydim2_advec_mom_kernel_x3; int ydim2_advec_mom_kernel_x3_h = -1; __constant__ int xdim3_advec_mom_kernel_x3; int xdim3_advec_mom_kernel_x3_h = -1; __constant__ int ydim3_advec_mom_kernel_x3; int ydim3_advec_mom_kernel_x3_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_mom_kernel_x3 * (y) + \ xdim0_advec_mom_kernel_x3 * ydim0_advec_mom_kernel_x3 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_mom_kernel_x3 * (y) + \ xdim1_advec_mom_kernel_x3 * ydim1_advec_mom_kernel_x3 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_mom_kernel_x3 * (y) + \ xdim2_advec_mom_kernel_x3 * ydim2_advec_mom_kernel_x3 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_mom_kernel_x3 * (y) + \ xdim3_advec_mom_kernel_x3 * ydim3_advec_mom_kernel_x3 * (z)) // user function __device__ inline void advec_mom_kernel_x3_gpu(double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x) { post_vol[OPS_ACC1(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)]; pre_vol[OPS_ACC0(0, 0, 0)] = post_vol[OPS_ACC1(0, 0, 0)] + vol_flux_x[OPS_ACC3(1, 0, 0)] - vol_flux_x[OPS_ACC3(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_advec_mom_kernel_x3(double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_x3 + idx_z * 1 * 1 * xdim0_advec_mom_kernel_x3 * ydim0_advec_mom_kernel_x3; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_x3 + idx_z * 1 * 1 * xdim1_advec_mom_kernel_x3 * ydim1_advec_mom_kernel_x3; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_x3 + idx_z * 1 * 1 * xdim2_advec_mom_kernel_x3 * ydim2_advec_mom_kernel_x3; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_x3 + idx_z * 1 * 1 * xdim3_advec_mom_kernel_x3 * ydim3_advec_mom_kernel_x3; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_x3_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_x3(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel_x3_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif // Timing double t1, t2, c1, c2; ops_arg args[4] = {arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 4, range, 124)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(124, "advec_mom_kernel_x3"); OPS_kernels[124].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_x3_h || ydim0 != ydim0_advec_mom_kernel_x3_h || xdim1 != xdim1_advec_mom_kernel_x3_h || ydim1 != ydim1_advec_mom_kernel_x3_h || xdim2 != xdim2_advec_mom_kernel_x3_h || ydim2 != ydim2_advec_mom_kernel_x3_h || xdim3 != xdim3_advec_mom_kernel_x3_h || ydim3 != ydim3_advec_mom_kernel_x3_h) { hipMemcpyToSymbol(xdim0_advec_mom_kernel_x3, &xdim0, sizeof(int)); xdim0_advec_mom_kernel_x3_h = xdim0; hipMemcpyToSymbol(ydim0_advec_mom_kernel_x3, &ydim0, sizeof(int)); ydim0_advec_mom_kernel_x3_h = ydim0; hipMemcpyToSymbol(xdim1_advec_mom_kernel_x3, &xdim1, sizeof(int)); xdim1_advec_mom_kernel_x3_h = xdim1; hipMemcpyToSymbol(ydim1_advec_mom_kernel_x3, &ydim1, sizeof(int)); ydim1_advec_mom_kernel_x3_h = ydim1; hipMemcpyToSymbol(xdim2_advec_mom_kernel_x3, &xdim2, sizeof(int)); xdim2_advec_mom_kernel_x3_h = xdim2; hipMemcpyToSymbol(ydim2_advec_mom_kernel_x3, &ydim2, sizeof(int)); ydim2_advec_mom_kernel_x3_h = ydim2; hipMemcpyToSymbol(xdim3_advec_mom_kernel_x3, &xdim3, sizeof(int)); xdim3_advec_mom_kernel_x3_h = xdim3; hipMemcpyToSymbol(ydim3_advec_mom_kernel_x3, &ydim3, sizeof(int)); ydim3_advec_mom_kernel_x3_h = ydim3; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args, 4, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[124].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_advec_mom_kernel_x3), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[124].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[124].mpi_time += t2 - t1; OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_x3(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 124; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 124; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg *)malloc(4 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel_x3_execute; if (OPS_diags > 1) { ops_timing_realloc(124, "advec_mom_kernel_x3"); } ops_enqueue_kernel(desc); } #endif
6dba64f22cc853daf4af2fc763ed91a3d8ffc03c.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_x3; int xdim0_advec_mom_kernel_x3_h = -1; __constant__ int ydim0_advec_mom_kernel_x3; int ydim0_advec_mom_kernel_x3_h = -1; __constant__ int xdim1_advec_mom_kernel_x3; int xdim1_advec_mom_kernel_x3_h = -1; __constant__ int ydim1_advec_mom_kernel_x3; int ydim1_advec_mom_kernel_x3_h = -1; __constant__ int xdim2_advec_mom_kernel_x3; int xdim2_advec_mom_kernel_x3_h = -1; __constant__ int ydim2_advec_mom_kernel_x3; int ydim2_advec_mom_kernel_x3_h = -1; __constant__ int xdim3_advec_mom_kernel_x3; int xdim3_advec_mom_kernel_x3_h = -1; __constant__ int ydim3_advec_mom_kernel_x3; int ydim3_advec_mom_kernel_x3_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_mom_kernel_x3 * (y) + \ xdim0_advec_mom_kernel_x3 * ydim0_advec_mom_kernel_x3 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_mom_kernel_x3 * (y) + \ xdim1_advec_mom_kernel_x3 * ydim1_advec_mom_kernel_x3 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_mom_kernel_x3 * (y) + \ xdim2_advec_mom_kernel_x3 * ydim2_advec_mom_kernel_x3 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_mom_kernel_x3 * (y) + \ xdim3_advec_mom_kernel_x3 * ydim3_advec_mom_kernel_x3 * (z)) // user function __device__ inline void advec_mom_kernel_x3_gpu(double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x) { post_vol[OPS_ACC1(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)]; pre_vol[OPS_ACC0(0, 0, 0)] = post_vol[OPS_ACC1(0, 0, 0)] + vol_flux_x[OPS_ACC3(1, 0, 0)] - vol_flux_x[OPS_ACC3(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_advec_mom_kernel_x3(double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_x3 + idx_z * 1 * 1 * xdim0_advec_mom_kernel_x3 * ydim0_advec_mom_kernel_x3; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_x3 + idx_z * 1 * 1 * xdim1_advec_mom_kernel_x3 * ydim1_advec_mom_kernel_x3; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_x3 + idx_z * 1 * 1 * xdim2_advec_mom_kernel_x3 * ydim2_advec_mom_kernel_x3; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_x3 + idx_z * 1 * 1 * xdim3_advec_mom_kernel_x3 * ydim3_advec_mom_kernel_x3; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_x3_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_x3(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel_x3_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif // Timing double t1, t2, c1, c2; ops_arg args[4] = {arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 4, range, 124)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(124, "advec_mom_kernel_x3"); OPS_kernels[124].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_x3_h || ydim0 != ydim0_advec_mom_kernel_x3_h || xdim1 != xdim1_advec_mom_kernel_x3_h || ydim1 != ydim1_advec_mom_kernel_x3_h || xdim2 != xdim2_advec_mom_kernel_x3_h || ydim2 != ydim2_advec_mom_kernel_x3_h || xdim3 != xdim3_advec_mom_kernel_x3_h || ydim3 != ydim3_advec_mom_kernel_x3_h) { cudaMemcpyToSymbol(xdim0_advec_mom_kernel_x3, &xdim0, sizeof(int)); xdim0_advec_mom_kernel_x3_h = xdim0; cudaMemcpyToSymbol(ydim0_advec_mom_kernel_x3, &ydim0, sizeof(int)); ydim0_advec_mom_kernel_x3_h = ydim0; cudaMemcpyToSymbol(xdim1_advec_mom_kernel_x3, &xdim1, sizeof(int)); xdim1_advec_mom_kernel_x3_h = xdim1; cudaMemcpyToSymbol(ydim1_advec_mom_kernel_x3, &ydim1, sizeof(int)); ydim1_advec_mom_kernel_x3_h = ydim1; cudaMemcpyToSymbol(xdim2_advec_mom_kernel_x3, &xdim2, sizeof(int)); xdim2_advec_mom_kernel_x3_h = xdim2; cudaMemcpyToSymbol(ydim2_advec_mom_kernel_x3, &ydim2, sizeof(int)); ydim2_advec_mom_kernel_x3_h = ydim2; cudaMemcpyToSymbol(xdim3_advec_mom_kernel_x3, &xdim3, sizeof(int)); xdim3_advec_mom_kernel_x3_h = xdim3; cudaMemcpyToSymbol(ydim3_advec_mom_kernel_x3, &ydim3, sizeof(int)); ydim3_advec_mom_kernel_x3_h = ydim3; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args, 4, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[124].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_advec_mom_kernel_x3<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[124].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[124].mpi_time += t2 - t1; OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_x3(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 124; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 124; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg *)malloc(4 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel_x3_execute; if (OPS_diags > 1) { ops_timing_realloc(124, "advec_mom_kernel_x3"); } ops_enqueue_kernel(desc); } #endif
79618b6e923e472a2dd20e56d406da14a17a548a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm_with_handle<float>(hipblasHandle_t handle, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm_with_handle<double>(hipblasHandle_t handle, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm_batch<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, const int strideA, const int strideB, const int strideC,const int batch, const int lda, const int ldb) { // Note that cublas follows fortran order. //int lda = (TransA == CblasNoTrans) ? K : M; //int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemmStridedBatched(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, &beta, C, N, strideC, batch)); } template <> void caffe_gpu_gemm_batch<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C, const int strideA, const int strideB, const int strideC,const int batch,const int lda, const int ldb) { // Note that cublas follows fortran order. //int lda = (TransA == CblasNoTrans) ? K : M; //int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemmStridedBatched(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb,strideB, A, lda,strideA, &beta, C, N,strideC,batch)); } template <> void caffe_gpu_gemm_batch2<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, const int strideA, const int strideB, const int strideC, const int batch) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemmStridedBatched(Caffe::cublas_handle(), cuTransB, cuTransA, N,M,K, &alpha, B, ldb, strideB, A, lda, strideA, &beta, C, ldc, strideC, batch)); } //note that batchgemm follows the row order not the colum order. template <> void caffe_gpu_gemm_batch2<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C, const int strideA, const int strideB, const int strideC, const int batch) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemmStridedBatched(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, &beta, C, ldc, strideC, batch)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void clip_kernel(const int n, const Dtype clip, Dtype* x) { CUDA_KERNEL_LOOP(index, n) { if (x[index]>clip) { x[index] = clip; } else if (x[index]<-clip) { x[index] = -clip; } } } template <> void caffe_gpu_clip(const int N, const float clip, float* X) { // NOLINT_NEXT_LINE(whitespace/operators) CHECK_GE(clip, 0); clip_kernel<float> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, clip, X); } template <> void caffe_gpu_clip(const int N, const double clip, double* X) { // NOLINT_NEXT_LINE(whitespace/operators) CHECK_GE(clip, 0); clip_kernel<double> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, clip, X); } template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sqr_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * a[index]; } } template <> void caffe_gpu_sqr<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqr_kernel<float> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, a, y); } template <> void caffe_gpu_sqr<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqr_kernel<double> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, a, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<float> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<double> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, a, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
79618b6e923e472a2dd20e56d406da14a17a548a.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm_with_handle<float>(cublasHandle_t handle, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm_with_handle<double>(cublasHandle_t handle, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm_batch<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, const int strideA, const int strideB, const int strideC,const int batch, const int lda, const int ldb) { // Note that cublas follows fortran order. //int lda = (TransA == CblasNoTrans) ? K : M; //int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemmStridedBatched(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, &beta, C, N, strideC, batch)); } template <> void caffe_gpu_gemm_batch<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C, const int strideA, const int strideB, const int strideC,const int batch,const int lda, const int ldb) { // Note that cublas follows fortran order. //int lda = (TransA == CblasNoTrans) ? K : M; //int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemmStridedBatched(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb,strideB, A, lda,strideA, &beta, C, N,strideC,batch)); } template <> void caffe_gpu_gemm_batch2<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, const int strideA, const int strideB, const int strideC, const int batch) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemmStridedBatched(Caffe::cublas_handle(), cuTransB, cuTransA, N,M,K, &alpha, B, ldb, strideB, A, lda, strideA, &beta, C, ldc, strideC, batch)); } //note that batchgemm follows the row order not the colum order. template <> void caffe_gpu_gemm_batch2<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C, const int strideA, const int strideB, const int strideC, const int batch) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemmStridedBatched(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, &beta, C, ldc, strideC, batch)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void clip_kernel(const int n, const Dtype clip, Dtype* x) { CUDA_KERNEL_LOOP(index, n) { if (x[index]>clip) { x[index] = clip; } else if (x[index]<-clip) { x[index] = -clip; } } } template <> void caffe_gpu_clip(const int N, const float clip, float* X) { // NOLINT_NEXT_LINE(whitespace/operators) CHECK_GE(clip, 0); clip_kernel<float> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, clip, X); } template <> void caffe_gpu_clip(const int N, const double clip, double* X) { // NOLINT_NEXT_LINE(whitespace/operators) CHECK_GE(clip, 0); clip_kernel<double> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, clip, X); } template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sqr_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * a[index]; } } template <> void caffe_gpu_sqr<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqr_kernel<float> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, a, y); } template <> void caffe_gpu_sqr<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqr_kernel<double> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, a, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<float> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<double> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, a, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
ba03eae37fd13628966fb1adcf9c3ec043c648b0.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 by Contributors * \file tensorrt.cu * \brief TensorRT GPU operation registration * \author Marek Kolodziej, Clement Fuji Tsang */ #if MXNET_USE_TENSORRT #include "./tensorrt-inl.h" namespace mxnet { namespace op { #define CHECK_CUDART(x) do { \ hipError_t res = (x); \ if (res != hipSuccess) { \ fprintf(stderr, "CUDART: %s = %d (%s) at (%s:%d)\n", \ #x, res, hipGetErrorString(res), __FILE__, __LINE__); \ exit(1); \ } \ } while (0) void TRTCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; hipStream_t cuda_s = Stream<gpu>::GetStream(ctx.get_stream<gpu>()); const auto& param = state.get_state<TRTEngineParam>(); for (size_t i = 0; i < param.binding_order->size(); ++i) { auto& p = param.binding_order->at(i); if (p.second == true) { param.bindings->at(i) = inputs[p.first].dptr_; } else { param.bindings->at(i) = outputs[p.first].dptr_; } } const int batch_size = static_cast<int>(inputs[0].shape_[0]); param.trt_executor->enqueue(batch_size, param.bindings->data(), cuda_s, nullptr); } NNVM_REGISTER_OP(_TensorRT) .set_attr<FStatefulCompute>("FStatefulCompute<gpu>", TRTCompute); } // namespace op } // namespace mxnet #endif // MXNET_USE_TENSORRT
ba03eae37fd13628966fb1adcf9c3ec043c648b0.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 by Contributors * \file tensorrt.cu * \brief TensorRT GPU operation registration * \author Marek Kolodziej, Clement Fuji Tsang */ #if MXNET_USE_TENSORRT #include "./tensorrt-inl.h" namespace mxnet { namespace op { #define CHECK_CUDART(x) do { \ cudaError_t res = (x); \ if (res != cudaSuccess) { \ fprintf(stderr, "CUDART: %s = %d (%s) at (%s:%d)\n", \ #x, res, cudaGetErrorString(res), __FILE__, __LINE__); \ exit(1); \ } \ } while (0) void TRTCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; cudaStream_t cuda_s = Stream<gpu>::GetStream(ctx.get_stream<gpu>()); const auto& param = state.get_state<TRTEngineParam>(); for (size_t i = 0; i < param.binding_order->size(); ++i) { auto& p = param.binding_order->at(i); if (p.second == true) { param.bindings->at(i) = inputs[p.first].dptr_; } else { param.bindings->at(i) = outputs[p.first].dptr_; } } const int batch_size = static_cast<int>(inputs[0].shape_[0]); param.trt_executor->enqueue(batch_size, param.bindings->data(), cuda_s, nullptr); } NNVM_REGISTER_OP(_TensorRT) .set_attr<FStatefulCompute>("FStatefulCompute<gpu>", TRTCompute); } // namespace op } // namespace mxnet #endif // MXNET_USE_TENSORRT
b45459fe57f83323681c0a751f7d29c3b0e272ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "HexEle.h" //#include "cuprintf.cuh" #include "cuprintf.cu" #include "float3x3.h" #include "float3x3_helper.h" #include "helper_math.h" #include "NeoHookean.h" #include "CGCuda.h" #include "Quadrature.hpp" __constant__ QuadratureCuda quadrature; __constant__ float admmtol = 0.01; ///@brief error tolerance in energy for admm min ele. __constant__ float etol = 1e-3; __constant__ int nLinStep = 20; __constant__ int sw[8][3] = { { -1, -1, -1 }, { -1, -1, 1 }, { -1, 1, -1 }, { -1, 1, 1 }, { 1, -1, -1 }, { 1, -1, 1 }, { 1, 1, -1 }, { 1, 1, 1 } }; __device__ void Stiffness(const float3 & p, const float3 *XX, const float3 * xx, float weight, float K[][KSIZE]); ///@param XX an array of 8 float3 s. Ordered z-> y ->x __device__ float3 ShapeFunGrad(int ii, const float3 & xx, const float3 * XX) { float3 size = 4 * (XX[7] - XX[0]); float3 grad; size.x = 1.0f / (size.x); size.y = 1.0f / (size.y); size.z = 1.0f / (size.z); grad.x = size.x * sw[ii][0] * (1 + sw[ii][1] * xx.y) * (1 + sw[ii][2] * xx.z); grad.y = size.y * sw[ii][1] * (1 + sw[ii][0] * xx.x) * (1 + sw[ii][2] * xx.z); grad.z = size.z * sw[ii][2] * (1 + sw[ii][0] * xx.x) * (1 + sw[ii][1] * xx.y); return grad; } __device__ void GetDefGrad(const float3 * XX, const float3 * xx, float3 p, float3x3& F) { F.setIdentity(); for (int ii = 0; ii<NVERT; ii++){ float3 grad = ShapeFunGrad(ii, p, XX); float3x3 outer; outer.outerProd(xx[ii] - XX[ii], grad); F += outer; } } ///@return -1 if inverted. Energy is assumed to be non-negative. __device__ float GetEnergy(const float3 *XX, const float3 * xx) { float energy = 0; for (int ii = 0; ii<quadrature.N; ii++){ float3x3 F; GetDefGrad(XX, xx, quadrature.P[ii], F); float ee = StrainEnergy(F); if (ee <= -1){ //for(int ii = 0;ii<9;ii++){ // cuPrintf("%.7f ", F.m[ii]); //} return -1; } energy += quadrature.W[ii] * ee; } float3 size = XX[7] - XX[0]; float vol = size.x * size.y * size.z; return vol*energy; } __device__ void ShapeFun(const float3 & p, float * ww) { for (int ii = 0; ii<NVERT; ii++){ ww[ii] = (1.0 / 8) * (1 + sw[ii][0] * p.x) *(1 + sw[ii][1] * p.y) *(1 + sw[ii][2] * p.z); } } __device__ void GetForce(const float3 *XX, const float3 * xx, float3 * ff) { float3x3 F; float3 size = XX[7] - XX[0]; float Nshape[NVERT]; float vol = size.x * size.y * size.z; for (int ii = 0; ii<NVERT; ii++){ ff[ii].x = 0; ff[ii].y = 0; ff[ii].z = 0; } for (int jj = 0; jj<quadrature.N; jj++){ ShapeFun(quadrature.P[jj], Nshape); GetDefGrad(XX, xx, quadrature.P[jj], F); float3x3 PP; PK1(F, PP); for (int ii = 0; ii<NVERT; ii++){ float3 gradN = ShapeFunGrad(ii, quadrature.P[jj], XX); ff[ii] -= vol* quadrature.W[jj] * PP * gradN;; } } } __device__ void Stiffness(const float3 * XX, const float3 * xx, float K[][KSIZE]) { float3 size = XX[7] - XX[0]; float vol = size.x*size.y*size.z; for (int ii = 0; ii<KSIZE; ii++){ for (int jj = 0; jj<KSIZE; jj++){ K[ii][jj] = 0; } } for (int ii = 0; ii<quadrature.N; ii++){ Stiffness(quadrature.P[ii], XX, xx, quadrature.W[ii] * vol, K); } } ///@param p quadrature point in natural coordinates. ///@param weight Quadrature weight at point p. ///@TODO probably there is a more efficient calculation __device__ void Stiffness(const float3 & p, const float3 *XX, const float3 * xx, float weight, float K[][KSIZE]) { float3 dN[8]; float3x3 F; float3x3 dF; GetDefGrad(XX, xx, p, F); for (int ii = 0; ii<NVERT; ii++){ dN[ii] = ShapeFunGrad(ii, p, XX); } for (int ii = 0; ii<8; ii++){ for (int jj = 0; jj<3; jj++){ //int col = 3*at[ii]+jj; int col = 3 * ii + jj; dF.setZero(); dF.m[3 * jj] = dN[ii].x; dF.m[3 * jj + 1] = dN[ii].y; dF.m[3 * jj + 2] = dN[ii].z; float3x3 dP; dPdx(F, dF, dP); for (int vv = 0; vv<8; vv++){ float3 dfdxi = dP*dN[vv]; //K[3*at[vv]][col] += weight*dfdxi.x; K[3 * vv][col] += weight*dfdxi.x; K[3 * vv + 1][col] += weight*dfdxi.y; K[3 * vv + 2][col] += weight*dfdxi.z; } } } } __device__ float admmEnergy(const float3 * XX, const float3 * xx, const ADMMInfo & admm) { float E = GetEnergy(XX, xx); for (int ii = 0; ii<NVERT; ii++){ float3 diff = xx[ii] - admm.Z[ii]; E += 0.5 * admm.ro * dot(diff, diff); E += dot(admm.y[ii], diff); } return E; } __device__ void admmForce(const float3 * XX, const float3 * xx, const ADMMInfo & admm, float3 * ff) { GetForce(XX, xx, ff); for (int ii = 0; ii<NVERT; ii++){ ff[ii] -= admm.ro * (xx[ii] - admm.Z[ii]); ff[ii] -= admm.y[ii]; } } __device__ void admmStiffness(const float3 * XX, const float3 * xx, const ADMMInfo & admm, float K[][KSIZE]) { Stiffness(XX, xx, K); for (int ii = 0; ii<KSIZE; ii++){ K[ii][ii] += admm.ro; } } __device__ void arrayCpy(float3 * dst, const float3 * src, int N) { for (int ii = 0; ii<N; ii++){ dst[ii] = src[ii]; } } __device__ void addMult(float3 * dst, const float3 * dx, float hh, int N) { for (int ii = 0; ii<N; ii++){ dst[ii] += hh*dx[ii]; } } __device__ void admmMinEle(const float3 * XX, float3 * xx, const ADMMInfo & admm) { float E = 0; float hh = 1; int NSteps = 2; float change = 0; float3 ff[NVERT]; float3 dx[NVERT]; float K[KSIZE][KSIZE]; E = admmEnergy(XX, xx, admm); for (int ii = 0; ii<NSteps; ii++){ // cuPrintf("%d %.3f\n",ii, E); admmForce(XX, xx, admm, ff); for (int jj = 0; jj<NVERT; jj++){ dx[jj].x = 0; dx[jj].y = 0; dx[jj].z = 0; } admmStiffness(XX, xx, admm, K); CG((const float*)K, (const float*)ff, (float*)dx); for (int jj = 0; jj<NVERT; jj++){ change += abs(dx[jj].x); change += abs(dx[jj].y); change += abs(dx[jj].z); } //cuPrintf("f: %.4f\n", ff[0].x); //cuPrintf("dx %.4f\n", K[0][0]); //cuPrintf("dx %.4f\n", dx[0].x); //simple line search //ff=x0 x in previous iteration arrayCpy(ff, xx, NVERT); for (int jj = 0; jj<nLinStep; jj++){ if (change * hh < etol){ return; } addMult(xx, dx, hh, NVERT); float ene = admmEnergy(XX, xx, admm); //cuPrintf("%d %d\n", ii, jj); if (ene <= -1 || ene >= E){ //cuPrintf("%.6f %0.6f\n", ene, E); //cuPrintf("%.4f\n", hh); hh *= 0.5; arrayCpy(xx, ff, NVERT); } else{ if (abs(ene - E)<etol){ return; } E = ene; break; } } } } __global__ void admmMinTest(const float3 * XX, float3 * xx, const ADMMInfo * admm) { int idx = blockIdx.x*blockDim.x + threadIdx.x; cuPrintfRestrict(0, 0); admmMinEle(XX, &(xx[NVERT*idx]), *admm); } __global__ void admmMinEleDup(const float3 * XX, float3 * xx, const ADMMInfo * admm) { int idx = blockIdx.x*blockDim.x + threadIdx.x; //cuPrintfRestrict(0,0); ADMMInfo admmLoc = admm[idx]; admmMinEle(XX, &(xx[NVERT*idx]), admmLoc); } __global__ void GetEnergy(const float3 * XX, const ADMMInfo * admm, float * E) { int idx = blockIdx.x*blockDim.x + threadIdx.x; E[idx] = GetEnergy(XX, admm[idx].Z); } __global__ void GetIntForce(const float3 * XX, const ADMMInfo * admm, float3 * ff) { int idx = blockIdx.x*blockDim.x + threadIdx.x; GetForce(XX, admm[idx].Z, &(ff[NVERT*idx])); } __global__ void HexStiffnessTest(const float3 * XX, const float3 * xx, KMat * Ks, float3 * ff, float * E) { cuPrintfRestrict(0, 0); int idx = blockIdx.x*blockDim.x + threadIdx.x; Stiffness(XX, xx, Ks[idx].K); E[idx] = GetEnergy(XX, xx); float3x3 F; float3x3 P; float3 p = make_float3(-0.5, -0.5, -0.5); GetDefGrad(XX, xx, p, F); GetForce(XX, xx, &(ff[8 * idx])); float3x3 dF; dF.setZero(); float3 grad = ShapeFunGrad(6, p, XX); dF.m[6] = grad.x; dF.m[7] = grad.y; dF.m[8] = grad.z; dPdx(F, dF, P); for (int ii = 0; ii<3; ii++){ ff[8 * idx + ii].x = P.m[3 * ii]; ff[8 * idx + ii].y = P.m[3 * ii + 1]; ff[8 * idx + ii].z = P.m[3 * ii + 2]; //ff[8*idx + ii].x = F.m[3*ii]; //ff[8*idx + ii].y = F.m[3*ii+1]; //ff[8*idx + ii].z = F.m[3*ii+2]; } } __global__ void HexTest(float3 * xx) { int idx = blockIdx.x*blockDim.x + threadIdx.x; for (int ii = 0; ii<quadrature.N; ii++){ xx[idx * 8 + ii].x = sw[ii][0]; } } void initHexEle() { QuadratureCuda qq; Quadrature quad=Quadrature::Gauss2; qq.N = 8; for (int ii = 0; ii<qq.N; ii++){ qq.P[ii] = make_float3(quad.x[ii][0], quad.x[ii][1], quad.x[ii][2]); qq.W[ii] = quad.w[ii]; } hipMemcpyToSymbol(quadrature, &qq, sizeof(qq)); }
b45459fe57f83323681c0a751f7d29c3b0e272ba.cu
#include "device_launch_parameters.h" #include "HexEle.h" //#include "cuprintf.cuh" #include "cuprintf.cu" #include "float3x3.h" #include "float3x3_helper.h" #include "helper_math.h" #include "NeoHookean.h" #include "CGCuda.h" #include "Quadrature.hpp" __constant__ QuadratureCuda quadrature; __constant__ float admmtol = 0.01; ///@brief error tolerance in energy for admm min ele. __constant__ float etol = 1e-3; __constant__ int nLinStep = 20; __constant__ int sw[8][3] = { { -1, -1, -1 }, { -1, -1, 1 }, { -1, 1, -1 }, { -1, 1, 1 }, { 1, -1, -1 }, { 1, -1, 1 }, { 1, 1, -1 }, { 1, 1, 1 } }; __device__ void Stiffness(const float3 & p, const float3 *XX, const float3 * xx, float weight, float K[][KSIZE]); ///@param XX an array of 8 float3 s. Ordered z-> y ->x __device__ float3 ShapeFunGrad(int ii, const float3 & xx, const float3 * XX) { float3 size = 4 * (XX[7] - XX[0]); float3 grad; size.x = 1.0f / (size.x); size.y = 1.0f / (size.y); size.z = 1.0f / (size.z); grad.x = size.x * sw[ii][0] * (1 + sw[ii][1] * xx.y) * (1 + sw[ii][2] * xx.z); grad.y = size.y * sw[ii][1] * (1 + sw[ii][0] * xx.x) * (1 + sw[ii][2] * xx.z); grad.z = size.z * sw[ii][2] * (1 + sw[ii][0] * xx.x) * (1 + sw[ii][1] * xx.y); return grad; } __device__ void GetDefGrad(const float3 * XX, const float3 * xx, float3 p, float3x3& F) { F.setIdentity(); for (int ii = 0; ii<NVERT; ii++){ float3 grad = ShapeFunGrad(ii, p, XX); float3x3 outer; outer.outerProd(xx[ii] - XX[ii], grad); F += outer; } } ///@return -1 if inverted. Energy is assumed to be non-negative. __device__ float GetEnergy(const float3 *XX, const float3 * xx) { float energy = 0; for (int ii = 0; ii<quadrature.N; ii++){ float3x3 F; GetDefGrad(XX, xx, quadrature.P[ii], F); float ee = StrainEnergy(F); if (ee <= -1){ //for(int ii = 0;ii<9;ii++){ // cuPrintf("%.7f ", F.m[ii]); //} return -1; } energy += quadrature.W[ii] * ee; } float3 size = XX[7] - XX[0]; float vol = size.x * size.y * size.z; return vol*energy; } __device__ void ShapeFun(const float3 & p, float * ww) { for (int ii = 0; ii<NVERT; ii++){ ww[ii] = (1.0 / 8) * (1 + sw[ii][0] * p.x) *(1 + sw[ii][1] * p.y) *(1 + sw[ii][2] * p.z); } } __device__ void GetForce(const float3 *XX, const float3 * xx, float3 * ff) { float3x3 F; float3 size = XX[7] - XX[0]; float Nshape[NVERT]; float vol = size.x * size.y * size.z; for (int ii = 0; ii<NVERT; ii++){ ff[ii].x = 0; ff[ii].y = 0; ff[ii].z = 0; } for (int jj = 0; jj<quadrature.N; jj++){ ShapeFun(quadrature.P[jj], Nshape); GetDefGrad(XX, xx, quadrature.P[jj], F); float3x3 PP; PK1(F, PP); for (int ii = 0; ii<NVERT; ii++){ float3 gradN = ShapeFunGrad(ii, quadrature.P[jj], XX); ff[ii] -= vol* quadrature.W[jj] * PP * gradN;; } } } __device__ void Stiffness(const float3 * XX, const float3 * xx, float K[][KSIZE]) { float3 size = XX[7] - XX[0]; float vol = size.x*size.y*size.z; for (int ii = 0; ii<KSIZE; ii++){ for (int jj = 0; jj<KSIZE; jj++){ K[ii][jj] = 0; } } for (int ii = 0; ii<quadrature.N; ii++){ Stiffness(quadrature.P[ii], XX, xx, quadrature.W[ii] * vol, K); } } ///@param p quadrature point in natural coordinates. ///@param weight Quadrature weight at point p. ///@TODO probably there is a more efficient calculation __device__ void Stiffness(const float3 & p, const float3 *XX, const float3 * xx, float weight, float K[][KSIZE]) { float3 dN[8]; float3x3 F; float3x3 dF; GetDefGrad(XX, xx, p, F); for (int ii = 0; ii<NVERT; ii++){ dN[ii] = ShapeFunGrad(ii, p, XX); } for (int ii = 0; ii<8; ii++){ for (int jj = 0; jj<3; jj++){ //int col = 3*at[ii]+jj; int col = 3 * ii + jj; dF.setZero(); dF.m[3 * jj] = dN[ii].x; dF.m[3 * jj + 1] = dN[ii].y; dF.m[3 * jj + 2] = dN[ii].z; float3x3 dP; dPdx(F, dF, dP); for (int vv = 0; vv<8; vv++){ float3 dfdxi = dP*dN[vv]; //K[3*at[vv]][col] += weight*dfdxi.x; K[3 * vv][col] += weight*dfdxi.x; K[3 * vv + 1][col] += weight*dfdxi.y; K[3 * vv + 2][col] += weight*dfdxi.z; } } } } __device__ float admmEnergy(const float3 * XX, const float3 * xx, const ADMMInfo & admm) { float E = GetEnergy(XX, xx); for (int ii = 0; ii<NVERT; ii++){ float3 diff = xx[ii] - admm.Z[ii]; E += 0.5 * admm.ro * dot(diff, diff); E += dot(admm.y[ii], diff); } return E; } __device__ void admmForce(const float3 * XX, const float3 * xx, const ADMMInfo & admm, float3 * ff) { GetForce(XX, xx, ff); for (int ii = 0; ii<NVERT; ii++){ ff[ii] -= admm.ro * (xx[ii] - admm.Z[ii]); ff[ii] -= admm.y[ii]; } } __device__ void admmStiffness(const float3 * XX, const float3 * xx, const ADMMInfo & admm, float K[][KSIZE]) { Stiffness(XX, xx, K); for (int ii = 0; ii<KSIZE; ii++){ K[ii][ii] += admm.ro; } } __device__ void arrayCpy(float3 * dst, const float3 * src, int N) { for (int ii = 0; ii<N; ii++){ dst[ii] = src[ii]; } } __device__ void addMult(float3 * dst, const float3 * dx, float hh, int N) { for (int ii = 0; ii<N; ii++){ dst[ii] += hh*dx[ii]; } } __device__ void admmMinEle(const float3 * XX, float3 * xx, const ADMMInfo & admm) { float E = 0; float hh = 1; int NSteps = 2; float change = 0; float3 ff[NVERT]; float3 dx[NVERT]; float K[KSIZE][KSIZE]; E = admmEnergy(XX, xx, admm); for (int ii = 0; ii<NSteps; ii++){ // cuPrintf("%d %.3f\n",ii, E); admmForce(XX, xx, admm, ff); for (int jj = 0; jj<NVERT; jj++){ dx[jj].x = 0; dx[jj].y = 0; dx[jj].z = 0; } admmStiffness(XX, xx, admm, K); CG((const float*)K, (const float*)ff, (float*)dx); for (int jj = 0; jj<NVERT; jj++){ change += abs(dx[jj].x); change += abs(dx[jj].y); change += abs(dx[jj].z); } //cuPrintf("f: %.4f\n", ff[0].x); //cuPrintf("dx %.4f\n", K[0][0]); //cuPrintf("dx %.4f\n", dx[0].x); //simple line search //ff=x0 x in previous iteration arrayCpy(ff, xx, NVERT); for (int jj = 0; jj<nLinStep; jj++){ if (change * hh < etol){ return; } addMult(xx, dx, hh, NVERT); float ene = admmEnergy(XX, xx, admm); //cuPrintf("%d %d\n", ii, jj); if (ene <= -1 || ene >= E){ //cuPrintf("%.6f %0.6f\n", ene, E); //cuPrintf("%.4f\n", hh); hh *= 0.5; arrayCpy(xx, ff, NVERT); } else{ if (abs(ene - E)<etol){ return; } E = ene; break; } } } } __global__ void admmMinTest(const float3 * XX, float3 * xx, const ADMMInfo * admm) { int idx = blockIdx.x*blockDim.x + threadIdx.x; cuPrintfRestrict(0, 0); admmMinEle(XX, &(xx[NVERT*idx]), *admm); } __global__ void admmMinEleDup(const float3 * XX, float3 * xx, const ADMMInfo * admm) { int idx = blockIdx.x*blockDim.x + threadIdx.x; //cuPrintfRestrict(0,0); ADMMInfo admmLoc = admm[idx]; admmMinEle(XX, &(xx[NVERT*idx]), admmLoc); } __global__ void GetEnergy(const float3 * XX, const ADMMInfo * admm, float * E) { int idx = blockIdx.x*blockDim.x + threadIdx.x; E[idx] = GetEnergy(XX, admm[idx].Z); } __global__ void GetIntForce(const float3 * XX, const ADMMInfo * admm, float3 * ff) { int idx = blockIdx.x*blockDim.x + threadIdx.x; GetForce(XX, admm[idx].Z, &(ff[NVERT*idx])); } __global__ void HexStiffnessTest(const float3 * XX, const float3 * xx, KMat * Ks, float3 * ff, float * E) { cuPrintfRestrict(0, 0); int idx = blockIdx.x*blockDim.x + threadIdx.x; Stiffness(XX, xx, Ks[idx].K); E[idx] = GetEnergy(XX, xx); float3x3 F; float3x3 P; float3 p = make_float3(-0.5, -0.5, -0.5); GetDefGrad(XX, xx, p, F); GetForce(XX, xx, &(ff[8 * idx])); float3x3 dF; dF.setZero(); float3 grad = ShapeFunGrad(6, p, XX); dF.m[6] = grad.x; dF.m[7] = grad.y; dF.m[8] = grad.z; dPdx(F, dF, P); for (int ii = 0; ii<3; ii++){ ff[8 * idx + ii].x = P.m[3 * ii]; ff[8 * idx + ii].y = P.m[3 * ii + 1]; ff[8 * idx + ii].z = P.m[3 * ii + 2]; //ff[8*idx + ii].x = F.m[3*ii]; //ff[8*idx + ii].y = F.m[3*ii+1]; //ff[8*idx + ii].z = F.m[3*ii+2]; } } __global__ void HexTest(float3 * xx) { int idx = blockIdx.x*blockDim.x + threadIdx.x; for (int ii = 0; ii<quadrature.N; ii++){ xx[idx * 8 + ii].x = sw[ii][0]; } } void initHexEle() { QuadratureCuda qq; Quadrature quad=Quadrature::Gauss2; qq.N = 8; for (int ii = 0; ii<qq.N; ii++){ qq.P[ii] = make_float3(quad.x[ii][0], quad.x[ii][1], quad.x[ii][2]); qq.W[ii] = quad.w[ii]; } cudaMemcpyToSymbol(quadrature, &qq, sizeof(qq)); }
90fdf96602039562f7ad6da3cfaecaf901f1e029.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_structs.h" #include "cuda_export_headers.h" struct geometry_local { int gridSize; int blockSize; }; template<typename T> geometry_local find_suitable_geometry_local(T func, uint shared_memory_used, uint32_t smCount) { int gridSize; int blockSize; int maxActiveBlocks; hipOccupancyMaxPotentialBlockSize(&gridSize, &blockSize, func, shared_memory_used, 0); hipOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, func, blockSize, shared_memory_used); gridSize = maxActiveBlocks * smCount; return geometry_local{gridSize, blockSize}; } __global__ void field_add_kernel(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, size_t arr_len) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { c_arr[tid] = a_arr[tid] + b_arr[tid]); tid += blockDim.x * gridDim.x; } } __global__ void field_sub_kernel(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, size_t arr_len) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { c_arr[tid] = a_arr[tid] - b_arr[tid]); tid += blockDim.x * gridDim.x; } } __global__ void field_mul_kernel(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, size_t arr_len) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { c_arr[tid] = a_arr[tid] * b_arr[tid]); tid += blockDim.x * gridDim.x; } } using field_kernel_t = __global__ void(const embedded_field*, const embedded_field*, embedded_field*, size_t); void field_func_invoke(const embedded_field* a_host_arr, const embedded_field* b_host_arr, embedded_field* c_host_arr, uint32_t arr_len, field_kernel_t func) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); uint32_t smCount = prop.multiProcessorCount; geometry_local geometry = find_suitable_geometry_local(func, 0, smCount); embedded_field* a_dev_arr = nullptr; embedded_field* b_dev_arr = nullptr; embedded_field* c_dev_arr = nullptr; hipMalloc((void **)&a_dev_arr, arr_len * sizeof(embedded_field)); hipMalloc((void **)&b_dev_arr, arr_len * sizeof(embedded_field)); hipMalloc((void **)&c_dev_arr, arr_len * sizeof(embedded_field)); hipMemcpy(a_dev_arr, a_host_arr, arr_len * sizeof(embedded_field), hipMemcpyHostToDevice); hipMemcpy(b_dev_arr, b_host_arr, arr_len * sizeof(embedded_field), hipMemcpyHostToDevice); (hipLaunchKernelGGL((*func)), dim3(geometry.gridSize), dim3(geometry.blockSize), 0, 0, a_dev_arr, b_dev_arr, c_dev_arr, arr_len); hipMemcpy(c_host_arr, c_dev_arr, arr_len * sizeof(embedded_field), hipMemcpyDeviceToHost); hipFree(a_dev_arr); hipFree(b_dev_arr); hipFree(c_dev_arr); } void field_add(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, uint32_t arr_len) { field_func_invoke(a_arr, b_arr, c_arr, arr_len, field_add_kernel); } void field_sub(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, uint32_t arr_len) { field_func_invoke(a_arr, b_arr, c_arr, arr_len, field_sub_kernel); } void field_mul(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, uint32_t arr_len) { field_func_invoke(a_arr, b_arr, c_arr, arr_len, field_mul_kernel); } __global__ void ec_add_kernel(const ec_point* a_arr, const ec_point* b_arr, ec_point* c_arr, uint32_t arr_len) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { c_arr[tid] = ECC_ADD(a_arr[tid], b_arr[tid]); tid += blockDim.x * gridDim.x; } } __global__ void ec_sub_kernel(const ec_point* a_arr, const ec_point* b_arr, ec_point* c_arr, uint32_t arr_len) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { c_arr[tid] = ECC_SUB(a_arr[tid], b_arr[tid]); tid += blockDim.x * gridDim.x; } } using ec_kernel_t = __global__ void(const ec_point*, const ec_point*, ec_point*, size_t); void ec_func_invoke(const ec_point* a_host_arr, const ec_point* b_host_arr, ec_point* c_host_arr, uint32_t arr_len, ec_kernel_t func) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); uint32_t smCount = prop.multiProcessorCount; geometry_local geometry = find_suitable_geometry_local(func, 0, smCount); ec_point* a_dev_arr = nullptr; ec_point* b_dev_arr = nullptr; ec_point* c_dev_arr = nullptr; hipMalloc((void **)&a_dev_arr, arr_len * sizeof(ec_point)); hipMalloc((void **)&b_dev_arr, arr_len * sizeof(ec_point)); hipMalloc((void **)&c_dev_arr, arr_len * sizeof(ec_point)); hipMemcpy(a_dev_arr, a_host_arr, arr_len * sizeof(ec_point), hipMemcpyHostToDevice); hipMemcpy(b_dev_arr, b_host_arr, arr_len * sizeof(ec_point), hipMemcpyHostToDevice); (hipLaunchKernelGGL((*func)), dim3(geometry.gridSize), dim3(geometry.blockSize), 0, 0, a_dev_arr, b_dev_arr, c_dev_arr, arr_len); hipMemcpy(c_host_arr, c_dev_arr, arr_len * sizeof(ec_point), hipMemcpyDeviceToHost); hipFree(a_dev_arr); hipFree(b_dev_arr); hipFree(c_dev_arr); } void ec_point_add(ec_point* a_arr, ec_point* b_arr, ec_point* c_arr, uint32_t arr_len) { ec_func_invoke(a_arr, b_arr, c_arr, arr_len, ec_add_kernel); } void ec_point_sub(ec_point* a_arr, ec_point* b_arr, ec_point* c_arr, uint32_t arr_len) { ec_func_invoke(a_arr, b_arr, c_arr, arr_len, ec_sub_kernel); } //----------------------------------------------------------------------------------------------------------------------------------------------- //Multiexponentiation (based on Pippenger realization) //----------------------------------------------------------------------------------------------------------------------------------------------- void large_Pippenger_driver(affine_point*, uint256_g*, ec_point*, size_t); ec_point ec_multiexp(affine_point* points, uint256_g* powers, uint32_t arr_len) { affine_point* dev_points = nullptr; uint256_g* dev_powers = nullptr; ec_point* dev_res = nullptr; ec_point res; hipMalloc((void **)&dev_points, arr_len * sizeof(affine_point)); hipMalloc((void **)&dev_powers, arr_len * sizeof(uint256_g)); hipMalloc((void **)&dev_res, ec_point); hipMemcpy(dev_points, points, arr_len * sizeof(affine_point), hipMemcpyHostToDevice); hipMemcpy(dev_powers, powers, arr_len * sizeof(uint256_g), hipMemcpyHostToDevice); large_Pippenger_driver(dev_points, dev_powers, dev_res, arr_len); hipMemcpy(&res, dev_res, sizeof(ec_point), hipMemcpyDeviceToHost); hipFree(dev_points); hipFree(dev_powers); hipFree(dev_res); return res; } //----------------------------------------------------------------------------------------------------------------------------------------------- //FFT routines //----------------------------------------------------------------------------------------------------------------------------------------------- void naive_fft_driver(embedded_field*, embedded_field*, uint32_t, bool); void mult_by_const(embedded_field* arr, __constant__ embedded_field& elem) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { arr[tid] *= elem; tid += blockDim.x * gridDim.x; } } void FFT_invoke(embedded_field* input_arr, embedded_field* output_arr, uint32_t arr_len, bool is_inverse, embedded_field* inversed = nullptr) { embedded_field* dev_input_arr = nullptr; embedded_field* dev_output_arr = nullptr; hipMalloc((void **)&dev_input_arr, arr_len * sizeof(embedded_field)); hipMalloc((void **)&dev_output_arr, arr_len * sizeof(embedded_field)); hipMemcpy(dev_input_arr, input_arr, arr_len * sizeof(embedded_field), hipMemcpyHostToDevice); naive_fft_driver(dev_input_arr, dev_output_arr, arr_len, is_inverse); if (is_inverse) { __constant__ embedded_field dev_temp; hipMemcpyToSymbol(dev_temp, inversed, sizeof(embedded_field)); mult_by_const(output_arr, dev_temp); } hipMemcpy(output_arr, dev_output_arr, arr_len * sizeof(embedded_field), hipMemcpyDeviceToHost); hipFree(dev_input_arr); hipFree(dev_output_arr); } void EXPORT FFT(embedded_field* input_arr, embedded_field* output_arr, uint32_t arr_len) { FFT_invoke(input_arr, output_arr, arr_len, false); } void EXPORT iFFT(embedded_field* input_arr, embedded_field* output_arr, uint32_t arr_len, const embedded_field& n_inv) { FFT_invoke(input_arr, output_arr, arr_len, true, &n_inv); } //------------------------------------------------------------------------------------------------------------------------------------------------ //polynomial arithmetic //------------------------------------------------------------------------------------------------------------------------------------------------ // polynomial _polynomial_multiplication_on_fft(const polynomial&, const polynomial&); // polynomial EXPORT poly_add(const& polynomial, const& polynomial); // polynomial EXPORT poly_sub(const& polynomial, const& polynomial); // polynomial EXPORT poly_mul(const& polynomial, const& polynomial);
90fdf96602039562f7ad6da3cfaecaf901f1e029.cu
#include "cuda_structs.h" #include "cuda_export_headers.h" struct geometry_local { int gridSize; int blockSize; }; template<typename T> geometry_local find_suitable_geometry_local(T func, uint shared_memory_used, uint32_t smCount) { int gridSize; int blockSize; int maxActiveBlocks; cudaOccupancyMaxPotentialBlockSize(&gridSize, &blockSize, func, shared_memory_used, 0); cudaOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, func, blockSize, shared_memory_used); gridSize = maxActiveBlocks * smCount; return geometry_local{gridSize, blockSize}; } __global__ void field_add_kernel(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, size_t arr_len) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { c_arr[tid] = a_arr[tid] + b_arr[tid]); tid += blockDim.x * gridDim.x; } } __global__ void field_sub_kernel(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, size_t arr_len) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { c_arr[tid] = a_arr[tid] - b_arr[tid]); tid += blockDim.x * gridDim.x; } } __global__ void field_mul_kernel(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, size_t arr_len) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { c_arr[tid] = a_arr[tid] * b_arr[tid]); tid += blockDim.x * gridDim.x; } } using field_kernel_t = __global__ void(const embedded_field*, const embedded_field*, embedded_field*, size_t); void field_func_invoke(const embedded_field* a_host_arr, const embedded_field* b_host_arr, embedded_field* c_host_arr, uint32_t arr_len, field_kernel_t func) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); uint32_t smCount = prop.multiProcessorCount; geometry_local geometry = find_suitable_geometry_local(func, 0, smCount); embedded_field* a_dev_arr = nullptr; embedded_field* b_dev_arr = nullptr; embedded_field* c_dev_arr = nullptr; cudaMalloc((void **)&a_dev_arr, arr_len * sizeof(embedded_field)); cudaMalloc((void **)&b_dev_arr, arr_len * sizeof(embedded_field)); cudaMalloc((void **)&c_dev_arr, arr_len * sizeof(embedded_field)); cudaMemcpy(a_dev_arr, a_host_arr, arr_len * sizeof(embedded_field), cudaMemcpyHostToDevice); cudaMemcpy(b_dev_arr, b_host_arr, arr_len * sizeof(embedded_field), cudaMemcpyHostToDevice); (*func)<<<geometry.gridSize, geometry.blockSize, 0>>>(a_dev_arr, b_dev_arr, c_dev_arr, arr_len); cudaMemcpy(c_host_arr, c_dev_arr, arr_len * sizeof(embedded_field), cudaMemcpyDeviceToHost); cudaFree(a_dev_arr); cudaFree(b_dev_arr); cudaFree(c_dev_arr); } void field_add(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, uint32_t arr_len) { field_func_invoke(a_arr, b_arr, c_arr, arr_len, field_add_kernel); } void field_sub(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, uint32_t arr_len) { field_func_invoke(a_arr, b_arr, c_arr, arr_len, field_sub_kernel); } void field_mul(const embedded_field* a_arr, const embedded_field* b_arr, embedded_field* c_arr, uint32_t arr_len) { field_func_invoke(a_arr, b_arr, c_arr, arr_len, field_mul_kernel); } __global__ void ec_add_kernel(const ec_point* a_arr, const ec_point* b_arr, ec_point* c_arr, uint32_t arr_len) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { c_arr[tid] = ECC_ADD(a_arr[tid], b_arr[tid]); tid += blockDim.x * gridDim.x; } } __global__ void ec_sub_kernel(const ec_point* a_arr, const ec_point* b_arr, ec_point* c_arr, uint32_t arr_len) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { c_arr[tid] = ECC_SUB(a_arr[tid], b_arr[tid]); tid += blockDim.x * gridDim.x; } } using ec_kernel_t = __global__ void(const ec_point*, const ec_point*, ec_point*, size_t); void ec_func_invoke(const ec_point* a_host_arr, const ec_point* b_host_arr, ec_point* c_host_arr, uint32_t arr_len, ec_kernel_t func) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); uint32_t smCount = prop.multiProcessorCount; geometry_local geometry = find_suitable_geometry_local(func, 0, smCount); ec_point* a_dev_arr = nullptr; ec_point* b_dev_arr = nullptr; ec_point* c_dev_arr = nullptr; cudaMalloc((void **)&a_dev_arr, arr_len * sizeof(ec_point)); cudaMalloc((void **)&b_dev_arr, arr_len * sizeof(ec_point)); cudaMalloc((void **)&c_dev_arr, arr_len * sizeof(ec_point)); cudaMemcpy(a_dev_arr, a_host_arr, arr_len * sizeof(ec_point), cudaMemcpyHostToDevice); cudaMemcpy(b_dev_arr, b_host_arr, arr_len * sizeof(ec_point), cudaMemcpyHostToDevice); (*func)<<<geometry.gridSize, geometry.blockSize, 0>>>(a_dev_arr, b_dev_arr, c_dev_arr, arr_len); cudaMemcpy(c_host_arr, c_dev_arr, arr_len * sizeof(ec_point), cudaMemcpyDeviceToHost); cudaFree(a_dev_arr); cudaFree(b_dev_arr); cudaFree(c_dev_arr); } void ec_point_add(ec_point* a_arr, ec_point* b_arr, ec_point* c_arr, uint32_t arr_len) { ec_func_invoke(a_arr, b_arr, c_arr, arr_len, ec_add_kernel); } void ec_point_sub(ec_point* a_arr, ec_point* b_arr, ec_point* c_arr, uint32_t arr_len) { ec_func_invoke(a_arr, b_arr, c_arr, arr_len, ec_sub_kernel); } //----------------------------------------------------------------------------------------------------------------------------------------------- //Multiexponentiation (based on Pippenger realization) //----------------------------------------------------------------------------------------------------------------------------------------------- void large_Pippenger_driver(affine_point*, uint256_g*, ec_point*, size_t); ec_point ec_multiexp(affine_point* points, uint256_g* powers, uint32_t arr_len) { affine_point* dev_points = nullptr; uint256_g* dev_powers = nullptr; ec_point* dev_res = nullptr; ec_point res; cudaMalloc((void **)&dev_points, arr_len * sizeof(affine_point)); cudaMalloc((void **)&dev_powers, arr_len * sizeof(uint256_g)); cudaMalloc((void **)&dev_res, ec_point); cudaMemcpy(dev_points, points, arr_len * sizeof(affine_point), cudaMemcpyHostToDevice); cudaMemcpy(dev_powers, powers, arr_len * sizeof(uint256_g), cudaMemcpyHostToDevice); large_Pippenger_driver(dev_points, dev_powers, dev_res, arr_len); cudaMemcpy(&res, dev_res, sizeof(ec_point), cudaMemcpyDeviceToHost); cudaFree(dev_points); cudaFree(dev_powers); cudaFree(dev_res); return res; } //----------------------------------------------------------------------------------------------------------------------------------------------- //FFT routines //----------------------------------------------------------------------------------------------------------------------------------------------- void naive_fft_driver(embedded_field*, embedded_field*, uint32_t, bool); void mult_by_const(embedded_field* arr, __constant__ embedded_field& elem) { size_t tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < arr_len) { arr[tid] *= elem; tid += blockDim.x * gridDim.x; } } void FFT_invoke(embedded_field* input_arr, embedded_field* output_arr, uint32_t arr_len, bool is_inverse, embedded_field* inversed = nullptr) { embedded_field* dev_input_arr = nullptr; embedded_field* dev_output_arr = nullptr; cudaMalloc((void **)&dev_input_arr, arr_len * sizeof(embedded_field)); cudaMalloc((void **)&dev_output_arr, arr_len * sizeof(embedded_field)); cudaMemcpy(dev_input_arr, input_arr, arr_len * sizeof(embedded_field), cudaMemcpyHostToDevice); naive_fft_driver(dev_input_arr, dev_output_arr, arr_len, is_inverse); if (is_inverse) { __constant__ embedded_field dev_temp; cudaMemcpyToSymbol(dev_temp, inversed, sizeof(embedded_field)); mult_by_const(output_arr, dev_temp); } cudaMemcpy(output_arr, dev_output_arr, arr_len * sizeof(embedded_field), cudaMemcpyDeviceToHost); cudaFree(dev_input_arr); cudaFree(dev_output_arr); } void EXPORT FFT(embedded_field* input_arr, embedded_field* output_arr, uint32_t arr_len) { FFT_invoke(input_arr, output_arr, arr_len, false); } void EXPORT iFFT(embedded_field* input_arr, embedded_field* output_arr, uint32_t arr_len, const embedded_field& n_inv) { FFT_invoke(input_arr, output_arr, arr_len, true, &n_inv); } //------------------------------------------------------------------------------------------------------------------------------------------------ //polynomial arithmetic //------------------------------------------------------------------------------------------------------------------------------------------------ // polynomial _polynomial_multiplication_on_fft(const polynomial&, const polynomial&); // polynomial EXPORT poly_add(const& polynomial, const& polynomial); // polynomial EXPORT poly_sub(const& polynomial, const& polynomial); // polynomial EXPORT poly_mul(const& polynomial, const& polynomial);
677ce32164b504c05e7eb057ad439a11dceb0e9d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> // I'm hardcoding all the constants // It'll be easier to use them instead of sending them as arguments // over and over again... #define N 100 // Number of atoms #define N2 128 // Nearest power of 2 >=N #define T_NUMBER 9 // (360 - 270) / 10 -> number of different T values #define TN 900 // N * T_NUMBER -> number of different T values times N // It's the size of arrays we have to use #define STEPS 1000 // Number of steps /* macro from: https://gist.github.com/NicholasShatokhin/3769635 */ #define CUDA_CALL(x) do { if((x) != hipSuccess) { \ printf("Error at %s:%d -- %s\n",__FILE__,__LINE__, hipGetErrorString(x)); \ return EXIT_FAILURE;}} while(0) __device__ float Energy(float* positionX, float* positionY, float* positionZ, int i) { int idx = threadIdx.x; // I'm using vector of size being power of 2 // This way reduction is easier to write __shared__ float vec_E[N2]; // Initializing unused values to 0 if (idx < N2 - N) vec_E[N + idx] = 0; float E = 0; float X = positionX[i] - positionX[idx]; float Y = positionY[i] - positionY[idx]; float Z = positionZ[i] - positionZ[idx]; vec_E[idx] = (i != idx ? (1. / (X*X + Y*Y + Z*Z)) : 0); __syncthreads(); for (unsigned int s = N2 / 2; s > 0; s >>= 1) { if (idx < s) { vec_E[idx] += vec_E[idx + s]; } __syncthreads(); } E = vec_E[0]; return E; } // I hope I've used correct functions for random values __device__ float RAND1(hiprandState_t* state) { return (hiprand_uniform(state) - 0.5); } __device__ float RAND0(hiprandState_t* state) { return hiprand_uniform(state); } __device__ void sync_pos(float* positionX, float* positionY, float* positionZ, float* positionNEWX, float* positionNEWY, float* positionNEWZ) { int idx = threadIdx.x; positionX[idx] = positionNEWX[idx]; positionY[idx] = positionNEWY[idx]; positionZ[idx] = positionNEWZ[idx]; } __device__ void newpos(float* positionNEWX, float* positionNEWY, float* positionNEWZ, int i, float size, hiprandState_t *state) { positionNEWX[i] += RAND1(state); if (positionNEWX[i] < 0) positionNEWX[i] = fabsf(positionNEWX[i]); else if (positionNEWX[i] > size) positionNEWX[i] -= size; positionNEWY[i] += RAND1(state); if (positionNEWY[i] < 0) positionNEWY[i] = fabsf(positionNEWY[i]); else if (positionNEWY[i] > size) positionNEWY[i] -= size; positionNEWZ[i] += RAND1(state); if (positionNEWZ[i] < 0) positionNEWZ[i] = fabsf(positionNEWZ[i]); else if (positionNEWZ[i] > size) positionNEWZ[i] -= size; } void pr(float* positionX, float* positionY, float* positionZ) { int i; for (i = 0; i < N; i++) printf("%f %f %f\n", positionX[i], positionY[i], positionZ[i]); printf("\n\n"); } __global__ void simulate(float* positionX, float* positionY, float* positionZ, float* stepY, unsigned long seed) { int idx = threadIdx.x; int offset = blockIdx.x * N; // I don't have to compute the function multiple times __shared__ float size; if (idx == 0) size = cbrtf(N); __shared__ float shrX[N], shrY[N], shrZ[N]; __shared__ float newX[N], newY[N], newZ[N]; __shared__ float rnd; __shared__ float tmpX[N], tmpY[N], tmpZ[N]; int sY = 0; float acc_sY = 0; int T = 270 + offset * 10; float kT = .01/T; float E; hiprandState_t state; hiprand_init(seed, idx, 0, &state); // start shrX[idx] = RAND0(&state) * size; shrY[idx] = RAND0(&state) * size; shrZ[idx] = RAND0(&state) * size; sync_pos(newX, newY, newZ, shrX, shrY, shrZ); sync_pos(tmpX, tmpY, tmpZ, shrX, shrY, shrZ); __syncthreads(); // These 2 for loops are a bottleneck // I can't execute them in parallel // The simulation is executed step by step // Atom by atom for (int k = 0; k < STEPS; k++) { // step // I'm calculating new position into temporary variables for all the threads // 0.01s of improvement, much wow newpos(tmpX, tmpY, tmpZ, idx, size, &state); for (int i = 0; i < N; i++) { if (idx == i) { // When i is correct, I move values from tmp to new newX[i] = tmpX[i]; newY[i] = tmpY[i]; newZ[i] = tmpZ[i]; // before: // newpos(newX, newY, newZ, i, size, &state); } __syncthreads(); E = Energy(newX, newY, newZ, i) - Energy(shrX, shrY, shrZ, i); // I'll have to use random value in order to check whether I should make a move // I can't comput RAND0() for each thread, because then the if statement may or may not fail // This approach allows me to make sure the outcome is the same for all the threads if (idx == i) rnd = RAND0(&state); // rnd is shared __syncthreads(); if (E < 0) { sync_pos(shrX, shrY, shrZ, newX, newY, newZ); if (idx == 0) sY++; } else if(rnd < expf(-E/kT)){ sync_pos(shrX, shrY, shrZ, newX, newY, newZ); if (idx == 0) sY++; } __syncthreads(); } acc_sY += sY * 1. / N; sY = 0; } positionX[offset + idx] = shrX[idx]; positionY[offset + idx] = shrY[idx]; positionZ[offset + idx] = shrZ[idx]; if (idx == 0) stepY[blockIdx.x] = acc_sY; } int main() { float *positionX_gpu, *positionY_gpu, *positionZ_gpu, *stepY_gpu; CUDA_CALL(hipMalloc((void**)&positionX_gpu, sizeof(float) * TN)); CUDA_CALL(hipMalloc((void**)&positionY_gpu, sizeof(float) * TN)); CUDA_CALL(hipMalloc((void**)&positionZ_gpu, sizeof(float) * TN)); CUDA_CALL(hipMalloc((void**)&stepY_gpu, sizeof(float) * T_NUMBER)); // block for every T // thread for every atom dim3 dimBlock(N); dim3 dimGrid(T_NUMBER); hipLaunchKernelGGL(( simulate), dim3(dimGrid), dim3(dimBlock), 0, 0, positionX_gpu, positionY_gpu, positionZ_gpu, stepY_gpu, time(NULL)); float* positionX = (float*) malloc(TN * sizeof(float)); float* positionY = (float*) malloc(TN * sizeof(float)); float* positionZ = (float*) malloc(TN * sizeof(float)); float* stepY = (float*) malloc(T_NUMBER * sizeof(float)); CUDA_CALL(hipMemcpy(positionX, positionX_gpu, sizeof(float) * TN, hipMemcpyDeviceToHost)); CUDA_CALL(hipMemcpy(positionY, positionY_gpu, sizeof(float) * TN, hipMemcpyDeviceToHost)); CUDA_CALL(hipMemcpy(positionZ, positionZ_gpu, sizeof(float) * TN, hipMemcpyDeviceToHost)); CUDA_CALL(hipMemcpy(stepY, stepY_gpu, sizeof(float) * T_NUMBER, hipMemcpyDeviceToHost)); CUDA_CALL(hipFree(positionX_gpu)); CUDA_CALL(hipFree(positionY_gpu)); CUDA_CALL(hipFree(positionZ_gpu)); CUDA_CALL(hipFree(stepY_gpu)); int T; for (int i = 0; i < T_NUMBER; ++i) { T = 270 + i * 10; printf("Stepe ACC %d %f\n", T, stepY[i] * 1./STEPS); } // I'm not using positionX, positionY, and positionZ // But it can be used in some next simulations/operations free(positionX); free(positionY); free(positionZ); free(stepY); return 0; }
677ce32164b504c05e7eb057ad439a11dceb0e9d.cu
#include <cuda_runtime_api.h> #include <cuda.h> #include <curand_kernel.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> // I'm hardcoding all the constants // It'll be easier to use them instead of sending them as arguments // over and over again... #define N 100 // Number of atoms #define N2 128 // Nearest power of 2 >=N #define T_NUMBER 9 // (360 - 270) / 10 -> number of different T values #define TN 900 // N * T_NUMBER -> number of different T values times N // It's the size of arrays we have to use #define STEPS 1000 // Number of steps /* macro from: https://gist.github.com/NicholasShatokhin/3769635 */ #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d -- %s\n",__FILE__,__LINE__, cudaGetErrorString(x)); \ return EXIT_FAILURE;}} while(0) __device__ float Energy(float* positionX, float* positionY, float* positionZ, int i) { int idx = threadIdx.x; // I'm using vector of size being power of 2 // This way reduction is easier to write __shared__ float vec_E[N2]; // Initializing unused values to 0 if (idx < N2 - N) vec_E[N + idx] = 0; float E = 0; float X = positionX[i] - positionX[idx]; float Y = positionY[i] - positionY[idx]; float Z = positionZ[i] - positionZ[idx]; vec_E[idx] = (i != idx ? (1. / (X*X + Y*Y + Z*Z)) : 0); __syncthreads(); for (unsigned int s = N2 / 2; s > 0; s >>= 1) { if (idx < s) { vec_E[idx] += vec_E[idx + s]; } __syncthreads(); } E = vec_E[0]; return E; } // I hope I've used correct functions for random values __device__ float RAND1(curandState* state) { return (curand_uniform(state) - 0.5); } __device__ float RAND0(curandState* state) { return curand_uniform(state); } __device__ void sync_pos(float* positionX, float* positionY, float* positionZ, float* positionNEWX, float* positionNEWY, float* positionNEWZ) { int idx = threadIdx.x; positionX[idx] = positionNEWX[idx]; positionY[idx] = positionNEWY[idx]; positionZ[idx] = positionNEWZ[idx]; } __device__ void newpos(float* positionNEWX, float* positionNEWY, float* positionNEWZ, int i, float size, curandState *state) { positionNEWX[i] += RAND1(state); if (positionNEWX[i] < 0) positionNEWX[i] = fabsf(positionNEWX[i]); else if (positionNEWX[i] > size) positionNEWX[i] -= size; positionNEWY[i] += RAND1(state); if (positionNEWY[i] < 0) positionNEWY[i] = fabsf(positionNEWY[i]); else if (positionNEWY[i] > size) positionNEWY[i] -= size; positionNEWZ[i] += RAND1(state); if (positionNEWZ[i] < 0) positionNEWZ[i] = fabsf(positionNEWZ[i]); else if (positionNEWZ[i] > size) positionNEWZ[i] -= size; } void pr(float* positionX, float* positionY, float* positionZ) { int i; for (i = 0; i < N; i++) printf("%f %f %f\n", positionX[i], positionY[i], positionZ[i]); printf("\n\n"); } __global__ void simulate(float* positionX, float* positionY, float* positionZ, float* stepY, unsigned long seed) { int idx = threadIdx.x; int offset = blockIdx.x * N; // I don't have to compute the function multiple times __shared__ float size; if (idx == 0) size = cbrtf(N); __shared__ float shrX[N], shrY[N], shrZ[N]; __shared__ float newX[N], newY[N], newZ[N]; __shared__ float rnd; __shared__ float tmpX[N], tmpY[N], tmpZ[N]; int sY = 0; float acc_sY = 0; int T = 270 + offset * 10; float kT = .01/T; float E; curandState state; curand_init(seed, idx, 0, &state); // start shrX[idx] = RAND0(&state) * size; shrY[idx] = RAND0(&state) * size; shrZ[idx] = RAND0(&state) * size; sync_pos(newX, newY, newZ, shrX, shrY, shrZ); sync_pos(tmpX, tmpY, tmpZ, shrX, shrY, shrZ); __syncthreads(); // These 2 for loops are a bottleneck // I can't execute them in parallel // The simulation is executed step by step // Atom by atom for (int k = 0; k < STEPS; k++) { // step // I'm calculating new position into temporary variables for all the threads // 0.01s of improvement, much wow newpos(tmpX, tmpY, tmpZ, idx, size, &state); for (int i = 0; i < N; i++) { if (idx == i) { // When i is correct, I move values from tmp to new newX[i] = tmpX[i]; newY[i] = tmpY[i]; newZ[i] = tmpZ[i]; // before: // newpos(newX, newY, newZ, i, size, &state); } __syncthreads(); E = Energy(newX, newY, newZ, i) - Energy(shrX, shrY, shrZ, i); // I'll have to use random value in order to check whether I should make a move // I can't comput RAND0() for each thread, because then the if statement may or may not fail // This approach allows me to make sure the outcome is the same for all the threads if (idx == i) rnd = RAND0(&state); // rnd is shared __syncthreads(); if (E < 0) { sync_pos(shrX, shrY, shrZ, newX, newY, newZ); if (idx == 0) sY++; } else if(rnd < expf(-E/kT)){ sync_pos(shrX, shrY, shrZ, newX, newY, newZ); if (idx == 0) sY++; } __syncthreads(); } acc_sY += sY * 1. / N; sY = 0; } positionX[offset + idx] = shrX[idx]; positionY[offset + idx] = shrY[idx]; positionZ[offset + idx] = shrZ[idx]; if (idx == 0) stepY[blockIdx.x] = acc_sY; } int main() { float *positionX_gpu, *positionY_gpu, *positionZ_gpu, *stepY_gpu; CUDA_CALL(cudaMalloc((void**)&positionX_gpu, sizeof(float) * TN)); CUDA_CALL(cudaMalloc((void**)&positionY_gpu, sizeof(float) * TN)); CUDA_CALL(cudaMalloc((void**)&positionZ_gpu, sizeof(float) * TN)); CUDA_CALL(cudaMalloc((void**)&stepY_gpu, sizeof(float) * T_NUMBER)); // block for every T // thread for every atom dim3 dimBlock(N); dim3 dimGrid(T_NUMBER); simulate<<<dimGrid, dimBlock>>>(positionX_gpu, positionY_gpu, positionZ_gpu, stepY_gpu, time(NULL)); float* positionX = (float*) malloc(TN * sizeof(float)); float* positionY = (float*) malloc(TN * sizeof(float)); float* positionZ = (float*) malloc(TN * sizeof(float)); float* stepY = (float*) malloc(T_NUMBER * sizeof(float)); CUDA_CALL(cudaMemcpy(positionX, positionX_gpu, sizeof(float) * TN, cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(positionY, positionY_gpu, sizeof(float) * TN, cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(positionZ, positionZ_gpu, sizeof(float) * TN, cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(stepY, stepY_gpu, sizeof(float) * T_NUMBER, cudaMemcpyDeviceToHost)); CUDA_CALL(cudaFree(positionX_gpu)); CUDA_CALL(cudaFree(positionY_gpu)); CUDA_CALL(cudaFree(positionZ_gpu)); CUDA_CALL(cudaFree(stepY_gpu)); int T; for (int i = 0; i < T_NUMBER; ++i) { T = 270 + i * 10; printf("Stepe ACC %d %f\n", T, stepY[i] * 1./STEPS); } // I'm not using positionX, positionY, and positionZ // But it can be used in some next simulations/operations free(positionX); free(positionY); free(positionZ); free(stepY); return 0; }
0f686258379affa230e777d3a3dcc06a0a0ae904.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include "../NativeOps.h" #include <hip/hip_runtime.h> #include <cuda_launch_config.h> #include <buffer.h> #include <helpers/shape.h> #include "../Environment.h" #include <helpers/TAD.h> #include <ops/specials.h> #include <loops/reduce3.h> #include <loops/indexreduce.h> #include <loops/summarystatsreduce.h> #include <loops/random.h> #include <loops/broadcasting.h> #include <loops/broadcasting_bool.h> #include <loops/scalar.h> #include <loops/scalar_bool.h> #include <loops/pairwise_transform.h> #include <loops/pairwise_bool.h> #include <loops/transform_same.h> #include <loops/transform_float.h> #include <loops/transform_strict.h> #include <loops/transform_bool.h> #include <loops/transform_any.h> #include <loops/reduce_float.h> #include <loops/reduce_same.h> #include <loops/reduce_bool.h> #include <loops/reduce_long.h> //#include <thread> #include <map> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <cuda_device_runtime_api.h> #include <pointercast.h> #include <stdio.h> #include <stdlib.h> #include <loops/type_conversions.h> #include <op_boilerplate.h> #include <loops/aggregates.h> #include <helpers/threshold.h> #include <ShapeList.h> #include <Context.h> #include <ops/specials_cuda.h> #include <graph/exceptions/datatype_exception.h> #include <helpers/CudaLaunchHelper.h> // FIXME: we need cuda-specific implementations #include <helpers/logger.h> #include <NDArray.h> #include <GraphExecutioner.h> #include <graph/GraphHolder.h> #include <graph/VariablesSet.h> #include <ops/declarable/OpRegistrator.h> #include <ops/declarable/CustomOperations.h> //#include <sys/time.h> #include <hiprand/hiprand.h> #include <Status.h> #include <helpers/DebugHelper.h> using namespace nd4j; #include <loops/special_kernels.h> hipDeviceProp_t *deviceProperties; hipFuncAttributes *funcAttributes = new hipFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __ND4J_EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; typedef struct { long streamId; long callId; } __syncInfo; typedef __syncInfo SyncInfo; /** * This is utility kernel, that updates given special buffer with proper values in device memory */ extern "C" __global__ void prepareShapeBuffer(int *dimension, int *maxDimension, Nd4jLong *specialPointer, int rows, nd4j::DataType dataType) { Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid > 0) return; dimension[0] = 0; maxDimension[0] = 1; specialPointer[0] = 2; specialPointer[1] = rows; specialPointer[2] = 1; specialPointer[3] = 1; specialPointer[4] = 1; specialPointer[5] = 0; specialPointer[6] = 1; specialPointer[7] = 99; ArrayOptions::setDataType(specialPointer, dataType); //printf("special[0]: [%lld]\n", (long long) specialPointer[0]); //shape::printShapeInfoLinear("prepareShapeBuffer", specialPointer); } // this method isn't used, left here for legacy and caution purposes // TLDR: don't use this way, it sucks void CUDART_CB syncCallback(hipStream_t stream, hipError_t status, void *data){ SyncInfo *sync = reinterpret_cast<SyncInfo *>(data); //printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId); } // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jLong)ptrToDeviceId; } template <typename T> dim3 getOptimalDimensions(Nd4jLong n,hipFuncAttributes attributes, hipDeviceProp_t properties) { // we can combine the two to compute a block size int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties); // no real sense launching more threads, then number of elements we have if (num_threads > n) num_threads = n; if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads; // compute the number of blocks of size num_threads to launch int num_blocks = n / num_threads; // check for partial block at the end if (num_blocks > blockLimit) num_blocks = blockLimit; if (num_blocks < 4 && n > 128) { num_blocks = 4; num_threads = n / num_blocks; } if (num_threads >= 768) { num_blocks = num_blocks * 2; num_threads = num_threads / 2; } if(n % num_threads && num_blocks < blockLimit) ++num_blocks; //(num_threads * sizeof(T)) + attributes.sharedSizeBytes); return dim3(num_blocks,num_threads, 3000); } int getBaseMemorySize(int xRank, hipFuncAttributes funcAttr) { int memory_limit = 256; //funcAttr.sharedSizeBytes; // TODO: remove this later memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4) /* if (xRank == 0) xRank = 2; memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4; memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4; memory_limit += (xRank * 4) * 6; memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase */ return memory_limit; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, hipFuncAttributes funcAttr) { int countMP = deviceProperties[deviceId].multiProcessorCount; int blockThreshold = getDeviceBlockThreshold(deviceId); int num_threads = problemLength / (countMP * blockThreshold); num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads); num_threads = nd4j::math::nd4j_max<int>(num_threads, 64); num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads); int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr); dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit); return launchDims; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) { int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int warpSize = deviceProperties[deviceId].warpSize; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); // round num_threads to nearest warpSize num_threads -= num_threads % warpSize; num_threads = nd4j::math::nd4j_max<int>(1, num_threads); if (num_threads < warpSize && tadLength < warpSize) num_threads = tadLength; // since we use shared memory as fast memory for some cases - we need to count that in int memory_limit = getBaseMemorySize(xRank, funcAttr); int memory_floor = memory_limit; int effective_block_limit = countMP * blockThreshold; int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit); int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize); // at this moment we've stored all required information for things. time to count in reduction multipliers int reduction_per_block = 0; bool found = false; if (reduction > 0) while (!found) { reduction_per_block = (num_threads * elementSize * reduction); if (memory_limit + reduction_per_block < desiredShared) { memory_limit += reduction_per_block; found = true; } else { if (num_threads > minThreads) { num_threads -= 32; } else { memory_limit += reduction_per_block; found = true; } } } // at this moment we know total memory used per block, and we also know per-mp limit. int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block); // we don't want to spawn more blocks, that gpu can actually handle without queue //num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // if (num_blocks > countMP) // num_blocks = num_blocks - (num_blocks % countMP); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } reduction_per_block = (num_threads * elementSize * reduction); memory_limit = memory_floor + reduction_per_block; } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP); return dim3(num_blocks,num_threads, memory_limit); } /* * This method returns kernel launch param for linear memory access */ dim3 getFlatLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *dYShapeInfo, hipFuncAttributes funcAttr) { auto xRank = shape::rank(dXShapeInfo); auto yRank = dYShapeInfo == nullptr ? 0 : shape::rank(dYShapeInfo); auto zRank = 0; int memory_limit = getBaseMemorySize(xRank, funcAttr); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); auto xLength = shape::length(dXShapeInfo); int effective_block_limit = countMP * blockThreshold; // for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here int num_threads = xLength / effective_block_limit; if (num_threads < minThreads) num_threads = minThreads; num_threads = num_threads - (num_threads % 32); int memory_floor = memory_limit; int num_blocks = xLength / num_threads; num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } } if (xLength / num_threads > blockLimit) num_blocks *= 2; dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit); return launchDims; } /** * This method returns kernel launch params with TAD-based memory access * * @param deviceId * @param dXShapeInfo * @param tadShapeInfo * @param funcAttr * @param dimensionLength * @param elementSize * @param reductionSize * @return */ dim3 getReduceLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *tadShapeInfo, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) { Nd4jLong tadLength = 0; Nd4jLong numTads = 0; if (tadShapeInfo != nullptr) { tadLength = shape::length(tadShapeInfo); numTads = shape::length(dXShapeInfo) / tadLength; if (tadLength == 1) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("A xLength: [%i], zLength: [%i]\n", shape::length(dXShapeInfo), shape::length(tadShapeInfo)); } } else{ // we have special case - reduction along all dimensions tadLength = nd4j::math::nd4j_min<int>(shape::length(dXShapeInfo), 768); numTads = shape::length(dXShapeInfo) / tadLength; } auto xRank = shape::rank(dXShapeInfo); int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo); dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.dX == 1 printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.dX: [%i], launchDims.dY: [%i], launchDims.dZ: [%i]\n", shape::length(dXShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z); } return launchDims; } /** * Returns optimal launch parameters * given the extra pointers passed in. * The extra pointer should be * the host pointer for the shape information * associated with the data. * From there it is used to obtain the length * from which we can derive the optimal launch parameters. * */ template <typename T> dim3 getOptimalLaunchParameters(const Nd4jLong *hXShapeInfo, hipFuncAttributes attributes, hipDeviceProp_t properties) { auto n = shape::length(hXShapeInfo); dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y)); return launchDims; } nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(hipStream_t stream) { Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<Nd4jLong> *scalarDimension; nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(hipStream_t stream) { auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong))); CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer"); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } Nd4jLong *getShapeInfoHostPointer() { return scalarShapeInfo->data; } Nd4jLong * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } Nd4jLong * getDimensionHostPointer() { return scalarDimension->data; } Nd4jLong * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; hipStream_t streamRef; public: ScalarInfo(hipStream_t stream) { T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T))); CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer"); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ Nd4jLong *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the dZ pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ Nd4jLong *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; void NativeOps::execPairwiseTransform( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims(256, 1024, 8192); if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled()) throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransform both operands must have same data type", xType, yType); if (xType != zType && yType != zType) throw std::runtime_error("NativeOps::execPairwiseTransform requires Z operand to have either X or Y type"); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES, LIBND4J_TYPES) #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES) #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execPairwiseTransformBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isB(zType)) throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool wrong Z operand data type", nd4j::DataType::BOOL, zType); if (yType != xType) throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool both operands must have same data type", xType, yType); auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(256, 1024, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::pairwise_transforms::PairWiseBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), LIBND4J_TYPES, BOOL_TYPES) } //////////////////////////////////////////////////////////////////////// void NativeOps::execSummaryStatsScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execBroadcastBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOps::execBroadcastBool requires Z operand to have BOOL type"); if (yType != xType) throw std::runtime_error("NativeOps::execBroadcastBool requires both X & Y operands to have same type"); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, BOOL_TYPES) DEBUG_KERNEL(stream, opNum); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcast( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { /* hipEvent_t start; hipEventCreateWithFlags(&start, hipEventDisableTiming); timespec tsX; timespec tsY; clock_gettime(CLOCK_REALTIME, &tsX); */ auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 16384); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ void NativeOps::execReduceFloat(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("FF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw std::runtime_error("NativeOps::execReduceFloat requires Z operand to have floating point type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, FLOAT_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceFloat(...) failed"); } void NativeOps::execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("SF8 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != xType) throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed"); } void NativeOps::execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("SF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xRank = shape::rank(hXShapeInfo); if (zType != xType) throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type"); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduce( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); Nd4jLong *hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); Nd4jLong *dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); Nd4jLong *dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execIndexReduce requires Z operand to have INT64 type", zType); auto dz = reinterpret_cast<Nd4jLong*>(dZ); BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduce(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, shape::rank(hZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F8 opNum:[%i]\n", opNum); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX,dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, FLOAT_TYPES); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ void NativeOps::execIndexReduceScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo){ if (nd4j::Environment::getInstance()->isDebug()) printf("F1 opNum:[%i]\n", opNum); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // void *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1) printf("AF1 opNum:[%i]\n", opNum); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); // FIXME: we want Z to be one of integer types //if (!DataTypeUtils::isZ(zType)) // throw nd4j::datatype_exception("NativeOps::execIndexReduceScalar requires Z operand to have one of integer types") if (zType != nd4j::DataType::INT64) throw nd4j::datatype_exception::build("NativeOps::exeIndexReduceScalar requires Z operand to have INT64 data type", zType); auto dz = reinterpret_cast<Nd4jLong*>(dZ); BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, nullptr, 0, nullptr, 0, 1, allocationPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalar(...) failed"); } void NativeOps::execTransformSame(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (xType != zType) throw std::runtime_error("NativeOps::execTransformSame requires X & Z to have same type"); //nd4j_printf("Going to execute transformSame; opNum: %i\n", opNum); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformSame, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execTransformSame(...) failed"); } void NativeOps::execTransformBool(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOps::execTransformBool requires Z to have same boolean type"); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformBool, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES); } void NativeOps::execTransformAny(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); switch (opNum) { case transform::IsMax: { bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } auto special = reinterpret_cast<double *>(extraPointers[17]); if (scalarCheat) { auto scalarShape = ShapeBuilders::createScalarShapeInfo(nd4j::DataType::INT64); /** * In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call */ execIndexReduceScalar(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, scalarShape, special, nullptr); Nd4jLong maxIdx = -119; checkCudaErrors(hipStreamSynchronize(*stream)); hipMemcpyAsync(&maxIdx, special, sizeof(Nd4jLong), hipMemcpyDeviceToHost, *stream); checkCudaErrors(hipStreamSynchronize(*stream)); int targetIdx = 0; if (shape::order(hXShapeInfo) == 'c' || shape::order(hXShapeInfo) == 'f' && maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1] >= shape::length(hXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1]; dim3 launchDims(1, 512, 1024); BUILD_SINGLE_SELECTOR(zType, fillIsMaxGeneric, (launchDims, stream, dZ, shape::length(hZShapeInfo), targetIdx), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed"); delete[] scalarShape; } else { auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); auto hostTShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[19]); auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]); auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); int dimensionLength = getDeviceId(extraPointers[18]); auto cshape = ShapeBuilders::createVectorShapeInfo(nd4j::DataType::INT32, dimensionLength); // we call for IMax on specified dimension execIndexReduce(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostTShapeInfo, special, hostYShapeInfo, nullptr, cshape, dimension, nullptr); DEBUG_KERNEL(stream, opNum); dim3 launchDims(256, 256, 16384); // at this point, all IMax indexes are gathered, and we execute filler BUILD_SINGLE_SELECTOR(zType, fillDimensionalIsMaxGeneric, (launchDims, stream, special, dZ, dZShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed"); delete[] cshape; } } break; default: { dim3 launchDims(512, 512, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformAny, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, LIBND4J_TYPES); } } } void NativeOps::execTransformStrict(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (xType != zType || !DataTypeUtils::isR(xType)) throw datatype_exception::build("NativeOps::execTransformStrict requires X & Z to have same floating point type", xType, zType); switch (opNum) { case transform::SoftMax: case transform::SoftMaxDerivative: case transform::LogSoftMax: { if (shape::isVector(hXShapeInfo)) { int length = shape::length(hXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(double) * 4); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES); } else { auto shape = shape::shapeOf(hXShapeInfo); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); // special pointer for special buffer for special ops auto specialPointer = reinterpret_cast<double *>(extraPointers[6]); auto dimension = reinterpret_cast<int *>(specialPointer); auto maxDimension = dimension + 1; auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1); auto special = reinterpret_cast<double *> (maxShapeBuffer + (MAX_RANK * 2 + 4)); Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; Nd4jLong maxShape[2] = {shape::shapeOf(hXShapeInfo)[0], 1}; auto hostMaxShapeBuffer = shape::shapeBuffer(2, xType, maxShape); auto cshape = ShapeBuilders::createVectorShapeInfo(nd4j::DataType::INT32, 1); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; hipLaunchKernelGGL(( prepareShapeBuffer), dim3(1), dim3(1), 128, *stream, dimension, maxDimension, maxShapeBuffer, shape[0], xType); DEBUG_KERNEL(stream, opNum); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceSame(tempPointers, reduce::Max, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, cshape, maxDimension, nullptr); DEBUG_KERNEL(stream, opNum); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcast(tempPointers, broadcast::Subtract, hX, hXShapeInfo, dX, dXShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, cshape, dimension, nullptr); DEBUG_KERNEL(stream, opNum); // exp 3 execTransformStrict(extraPointers, transform::Exp, hZ, hZShapeInfo, dZ, dZShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); DEBUG_KERNEL(stream, opNum); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceSame(tempPointers, reduce::Sum, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, cshape, maxDimension, nullptr); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcast(tempPointers, broadcast::Divide, hZ, hZShapeInfo, dZ, dZShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, cshape, dimension, nullptr); DEBUG_KERNEL(stream, opNum); // log 3 if (opNum == transform::LogSoftMax) execTransformStrict(extraPointers, transform::Log, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams); else if (opNum == transform::SoftMaxDerivative) execTransformStrict(extraPointers, transform::SpecialDerivative, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams); nd4j::DebugHelper::checkErrorCode(stream, "SoftMax(...) failed"); delete hostMaxShapeBuffer; delete[] cshape; } } break; default: { BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES); } } } void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw datatype_exception::build("NativeOps::execTransformFloat requires Z to have floating point type", zType); if (opNum == transform::Histogram) { dim3 launchDims(256, 256, 32768); Nd4jPointer maskedAllocPointer; auto length = shape::length(hZShapeInfo); hipMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * DataTypeUtils::sizeOf(nd4j::DataType::INT64)); auto imaskedAllocPointer = reinterpret_cast<int *>(maskedAllocPointer); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, imaskedAllocPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES); checkCudaErrors(hipStreamSynchronize(*stream)); hipFree(maskedAllocPointer); } else { dim3 launchDims(512, 512, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param dZ the dZ array * @param dZShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flatten(Nd4jPointer *extraPointers, int offset, char order, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hInput, Nd4jLong *hInputShapeInfo, void *dInput, Nd4jLong *dInputShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F22 opNum:[7]\n"); // int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hYShapeInfo), 2, funcAttributes[30]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF222 opNum:[7]\n"); auto type = nd4j::ArrayOptions::dataType(hInputShapeInfo); BUILD_SINGLE_SELECTOR(type, flattenKernelGeneric, (launchDims, stream, extraPointers, offset, order, dZ, dZShapeInfo, dInput, dInputShapeInfo), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } void NativeOps::checkP2P() { int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; hipSetDevice(dX); hipDeviceCanAccessPeer(&canAccess, dX , dY); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; hipSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void NativeOps::enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; hipSetDevice(dX); hipDeviceCanAccessPeer(&canAccess, dX , dY); if (canAccess) { if (enable) { hipDeviceEnablePeerAccess(dY, 0); } else { hipDeviceDisablePeerAccess(dY); } } else { if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY); } } } hipSetDevice(curDevice); } allowedP2P = enable; hipSetDevice(curDevice); } bool NativeOps::isP2PAvailable() { return supportedP2P; } void NativeOps::initializeDevicesAndFunctions() { int devCnt = 0; hipGetDeviceCount(&devCnt); deviceProperties = new hipDeviceProp_t[devCnt]; for (int i = 0; i < devCnt; i++) { hipSetDevice(i); hipGetDeviceProperties(&deviceProperties[i], i); hipDeviceSetLimit(hipLimitStackSize, 4096); } hipSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); } void NativeOps::initializeFunctions(Nd4jPointer *functions) { nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions); /* this->hipblasSgemv = (CublasSgemv)functions[0]; this->hipblasDgemv = (CublasDgemv)functions[1]; this->hipblasHgemm = (CublasHgemm)functions[2]; this->hipblasSgemm = (CublasSgemm)functions[3]; this->hipblasDgemm = (CublasDgemm)functions[4]; this->cublasSgemmEx = (CublasSgemmEx)functions[5]; this->hipblasHgemmBatched = (CublasHgemmBatched)functions[6]; this->hipblasSgemmBatched = (CublasSgemmBatched)functions[7]; this->hipblasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer NativeOps::mallocHost(Nd4jLong memorySize, int flags) { Nd4jPointer pointer; // hipHostMallocMapped |hipHostMallocPortable hipError_t res = hipHostMalloc(reinterpret_cast<void **>(&pointer), memorySize, hipHostMallocDefault); if (res != 0) pointer = 0L; return pointer; } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer NativeOps::mallocDevice(Nd4jLong memorySize, Nd4jPointer ptrToDeviceId, int flags) { Nd4jPointer pointer; hipError_t res = hipMalloc(reinterpret_cast<void **>(&pointer), memorySize); if (res != 0) pointer = 0L; return pointer; } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int NativeOps::freeHost(Nd4jPointer pointer) { hipError_t res = hipHostFree(reinterpret_cast<void *>(pointer)); if (res != 0) pointer = 0L; return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) { hipError_t res = hipFree(reinterpret_cast<void *>(pointer)); if (res != 0) pointer = 0L; return 1L; } Nd4jPointer NativeOps::createContext() { return 0L; } Nd4jPointer NativeOps::createStream() { Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(hipStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream"); hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream)); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipStreamCreate(...) failed"); return nativeStream; } Nd4jPointer NativeOps::createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t)); CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer"); hipError_t dZ = hipEventCreateWithFlags(reinterpret_cast<hipEvent_t *>(&nativeEvent), hipEventDisableTiming); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipEventCreateWithFlags(...) failed"); return nativeEvent; } int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream); hipError_t dZ = hipEventRecord(*pEvent, *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipEventRecord(...) failed"); return 1; } int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) { int deviceId = getDeviceId(ptrToDeviceId); hipError_t dZ = hipSetDevice(deviceId); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipSetDevice(...) failed"); return 1; } Nd4jLong NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jLong) memFree; } Nd4jLong NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jLong) memTotal; } int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipMemcpyKind kind; DEBUG_KERNEL(pStream, 0); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } case 3: { kind = hipMemcpyDeviceToDevice; } break; default: { printf("UNDEFINED MEMCPY!\n"); break; } } hipError_t dZ = hipMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream); if (dZ != 0) { checkCudaErrors(dZ); printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); throw std::runtime_error("hipMemcpyAsync(...) failed"); //return 0L; } return 1; } int NativeOps::memset(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { hipError_t dZ = hipMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size)); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipMemset(...) failed"); return 1; } int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipError_t dZ = hipMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipMemsetAsync(...) failed"); return 1; } int NativeOps::destroyEvent(Nd4jPointer event) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipError_t dZ = hipEventDestroy(*pEvent); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaEvenDestroy(...) failed"); return 1; } int NativeOps::streamSynchronize(Nd4jPointer stream) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream); hipError_t dZ = hipStreamSynchronize(*pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipStreamSynchronize(...) failed"); return 1L; } int NativeOps::eventSynchronize(Nd4jPointer event) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipError_t dZ = hipEventSynchronize(*pEvent); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipEventSynchronize(...) failed"); return 1L; } int NativeOps::getAvailableDevices() { int devCnt = 0; hipGetDeviceCount(&devCnt); return devCnt; } void NativeOps::enableDebugMode(bool reallyEnable) { nd4j::Environment::getInstance()->setDebug(reallyEnable); } void NativeOps::setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int NativeOps::ompGetMaxThreads() { return maxThreads; } int NativeOps::ompGetNumThreads() { return maxThreads; } void NativeOps::setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void NativeOps::enableVerboseMode(bool reallyEnable) { nd4j::Environment::getInstance()->setVerbose(reallyEnable); } int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].major; } int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].minor; } const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].name; } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, Nd4jPointer *ddata, Nd4jPointer *dinputShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hXShapeInfo = hZShapeInfo; auto hShapePointers = reinterpret_cast<Nd4jLong **>(inputShapeInfo); // numArrays will be used as number of TADs, so each block process 1 input int smem = 8192; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hZShapeInfo) == 2 && shape::order(hZShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0 || shape::order(hShapePointers[i]) != 'c') { isVstack = false; break; } } } // let's try to fit N-dimensional vstack if (!isVstack && !isScalar && dimension == 0 && shape::order(hXShapeInfo) == 'c') { auto length0 = shape::length(hShapePointers[0]); isVstack = true; for (int i = 0; i < numArrays; i++) { if (shape::elementWiseStride(hShapePointers[i]) <= 0 || shape::order(hShapePointers[i]) != 'c' || length0 != shape::length(hShapePointers[i])) { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hZShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going scalar concat\n"); dim3 launchDims(128, 128, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelScalarGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), dZ), LIBND4J_TYPES); } else if (isVstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going VStack concat\n"); dim3 launchDims(128, 512, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelVStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES); } else if (isHstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going HStack concat\n"); dim3 launchDims(128, 128, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelHStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES); } else { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going generic concat\n"); auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); dim3 launchDims(128, 128, 8192); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets), LIBND4J_TYPES); } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); hipError_t res = hipStreamSynchronize(*stream); checkCudaErrors(res); nd4j::DebugHelper::checkErrorCode(stream, "Legacy ConcatFloat(...) failed"); } void NativeOps::specialConcat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { nd4j::SpecialMethods<float>::concatCpuGeneric( dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo); } /** * This method saves */ void NativeOps::tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *target, Nd4jLong *offsets) { //nd4j_printf("START ------->\n",""); //nd4j_printf("Shape pointer: [%p]\n", dXShapeInfo); //nd4j_printf("Dimension pointer: [%p]\n", dimension); //nd4j_printf("shape rank: [%i]; dimLength: [%i]\n", shape::rank(dXShapeInfo), dimensionLength); //shape::printShapeInfoLinear(dXShapeInfo); //fflush(stdout); //shape::printArray<int>(reinterpret_cast<void*>(dimension), dimensionLength, "dimensions"); //fflush(stdout); //nd4j_printf("END ------->\n",""); shape::TAD tad; tad.init(dXShapeInfo, dimension, dimensionLength); //nd4j_printf("Creating TAD shape...\n",""); tad.createTadOnlyShapeInfo(); //nd4j_printf("Creating TAD offsets...\n",""); tad.createOffsets(); //nd4j_printf("memcpy TAD shape...\n",""); std::memcpy(reinterpret_cast<void *>(target), tad.tadOnlyShapeInfo, shape::shapeInfoByteLength(tad.tadOnlyShapeInfo)); //nd4j_printf("memcpy TAD offsets...\n",""); std::memcpy(reinterpret_cast<void *>(offsets), tad.tadOffsets, tad.numTads * sizeof(Nd4jLong)); //nd4j_printf("memcpy finished...\n",""); } int NativeOps::memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipMemcpyKind kind; DEBUG_KERNEL(pStream, -1); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } case 3: { kind = hipMemcpyDeviceToDevice; } break; } //hipError_t dZ = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); hipError_t dZ = hipMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("hipMemcpyToSymbolAsync(...) failed"); return 1; } Nd4jPointer NativeOps::getConstantSpace() { Nd4jPointer dConstAddr; hipError_t dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory); if (dZ != 0) throw std::runtime_error("hipGetSymbolAddress(...) failed"); return dConstAddr; } void NativeOps::pullRows(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(64, 256, 1024); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } void NativeOps::average(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length, bool propagate) { hipStream_t * stream = reinterpret_cast<hipStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(256, 256, 4096); // averagingKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dX, dz, n, length, propagate); BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES); } } void NativeOps::accumulate(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length) { auto stream = reinterpret_cast<hipStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(n, 256, 16384); BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n,length), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES); } } void NativeOps::shuffle(Nd4jPointer *extras, Nd4jPointer *x, Nd4jPointer *xShapeInfo, Nd4jPointer *dx, Nd4jPointer *dXShapeInfo, Nd4jPointer *z, Nd4jPointer *zShapeInfo, Nd4jPointer *dz, Nd4jPointer *dZShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); auto dX = reinterpret_cast<void **>(dx); auto dZ = reinterpret_cast<void **>(dz); auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo); auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo); auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets); auto xType = nd4j::ArrayOptions::dataType(xShape[0]); dim3 launchDims(N, 256, 8192); BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES); DEBUG_KERNEL(stream, 0); } /* void NativeOps::execMetaPredicateShape(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraA, void *extraB, double scalarA, double scalarB) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, functions::grid::GRIDShaped, ::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraA, extraB, scalarA, scalarB), LIBND4J_TYPES); // functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dy, dYShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); DEBUG_KERNEL(stream, opNumA); } */ bool NativeOps::isExperimentalEnabled() { return nd4j::Environment::getInstance()->isExperimentalBuild(); } void NativeOps::setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int NativeOps::getDevice() { int curDevice = -1; hipGetDevice(&curDevice); return curDevice; } void NativeOps::setElementThreshold(int num) { // this is no-op for CUDA } void NativeOps::setTADThreshold(int num) { // this is no-op for CUDA } void NativeOps::execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, bool biasCorrected, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims(256, 256, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES) DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES) } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduce3Scalar(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execScalarBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (xType != yType ) throw std::runtime_error("NativeOps::execScalarBool requires X & Y to have same type"); if (!DataTypeUtils::isB(zType) ) throw std::runtime_error("NativeOps::execScalarBool requires Z operand to have BOOL type"); BUILD_DOUBLE_SELECTOR(xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, BOOL_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalarBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (xType != yType ) throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires X & Y to have same type", xType, yType); if (!DataTypeUtils::isB(zType) ) throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires Z operand to have BOOL type", nd4j::DataType::BOOL, zType); BUILD_DOUBLE_SELECTOR(xType, yType, functions::scalar::ScalarBoolTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled()) throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType); if (!Environment::getInstance()->isExperimentalBuild() && Environment::getInstance()->isDebug()) { auto sX = DataTypeUtils::asString(xType); auto sY = DataTypeUtils::asString(yType); auto sZ = DataTypeUtils::asString(zType); nd4j_printf("Running execScalar with dtypes: [%s], [%s], [%s]\n", sX.c_str(), sY.c_str(), sZ.c_str()); } #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled()) throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType); dim3 launchDims(256, 256, 16384); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execAggregate(Nd4jPointer *extraPointers, int opNum, void **arguments, int numArguments, Nd4jLong **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, void *realArguments, int numRealArguments, nd4j::DataType dtype) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateKernelGeneric(launchDims, stream, opNum, arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), FLOAT_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed"); } void NativeOps::execAggregateBatch(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { // not implemented yet hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateBatchKernelGeneric(launchDims, stream, opNum, numAggregates, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), FLOAT_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; hipError_t res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(hipStreamSynchronize(*stream)); checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, dZ, dZShapeInfo, extraArguments), BUILD_SINGLE_SELECTOR(zType, functions::random::RandomFunction, ::executeCudaSingle(launchDims, extraPointers, opNum, stateDevice, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(hipMemcpyAsync(stateHost, stateDevice, sizeOf, hipMemcpyDeviceToHost, *stream)); checkCudaErrors(hipStreamSynchronize(*stream)); hipFree(stateDevice); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; hipError_t res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(hipStreamSynchronize(*stream)); checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaDouble(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(hipMemcpyAsync(stateHost, stateDevice, sizeOf, hipMemcpyDeviceToHost, *stream)); checkCudaErrors(hipStreamSynchronize(*stream)); hipFree(stateDevice); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; hipError_t res = hipMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(hipStreamSynchronize(*stream)); checkCudaErrors(hipMemcpyAsync(stateDevice, stateHost, sizeOf, hipMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaTriple(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(hipMemcpyAsync(stateHost, stateDevice, sizeOf, hipMemcpyDeviceToHost, *stream)); checkCudaErrors(hipStreamSynchronize(*stream)); hipFree(stateDevice); } Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // hipStreamSynchronize(*stream); auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev)); buffer->propagateToDevice(buffer, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A"); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B"); return buffer; } void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice hipDeviceSynchronize(); delete buffer; } void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream); } void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) { auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer NativeOps::pointerForAddress(Nd4jLong address) { return reinterpret_cast<Nd4jPointer >(address); } void NativeOps::tear(Nd4jPointer *extras, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); dim3 launchDims(512, 512, 512); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed"); } void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) { auto stream = reinterpret_cast<hipStream_t *>(&extras[1]); auto g_scanBlockSums = reinterpret_cast<int **>(&extras[2]); int blockSize = 512; // max size of the thread blocks int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize)))); int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (nd4j::isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = nd4j::floorPow2(numElements); int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2); int np2LastBlock = 0; int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts int extraSpace = numEltsPerBlock / NUM_BANKS; int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); dim3 gridOnes(1, 1, 1); dim3 threadsOnes(numThreadsLastBlock, 1, 1); if (sharedMemSize < 2048) sharedMemSize = 2048; if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048; // execute the scan if (numBlocks > 1) { nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(grid), dim3(threads), 1024, *stream, dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(1), dim3(numThreadsLastBlock), 1024, *stream, dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0); } else { nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0); } } void NativeOps::encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed"); } void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); //encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz); prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed"); } void NativeOps::encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 4096); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed"); } void NativeOps::decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto zType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed"); } void NativeOps::execReduce3All(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParamsVals, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D119 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims(shape::length(hZShapeInfo), 256, 32768); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AD119 opNum:[%i]\n", opNum); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled()) throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType); if (yType != xType) throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execAll(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParamsVals, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), LIBND4J_TYPES, FLOAT_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::sort(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, bool descending) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[ 1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2*k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window<<=1) { int n = window; int rev = 0; do{ int half = n >> 1; BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES); n>>=1; rev = 1; } while(n > 1); } } nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed"); } void NativeOps::sortTad(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { // to be implemented hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadFloat(...) failed"); } void NativeOps::sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) { throw std::runtime_error("sortCooIndices:: Not implemented yet"); } Nd4jLong NativeOps::encodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed"); Nd4jLong dZ = (Nd4jLong) resultPointer[0]; resultPointer[0] = 0; return dZ; } void NativeOps::decodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed"); } Nd4jLong* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) { return nullptr; } void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) { } nd4j::graph::ResultWrapper* NativeOps::executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { return nullptr; } const char* NativeOps::getAllCustomOps() { return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations(); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { nd4j::graph::VariableSpace varSpace; Context block(2, &varSpace); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numBArgs; e++) block.getBArguments()->push_back(bArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]); // we shouldn't copy buffer if that's empty array void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; auto array = new nd4j::NDArray(buffer_, shape_); array->triggerAllocationFlag(false, false); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.workspace() != nullptr) shapeList->detach(); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { Context block(1); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e])); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<nd4j::NDArray*> inputs(numInputs); std::vector<nd4j::NDArray*> outputs(numOutputs); std::vector<double> ttArgs(numTArgs); std::vector<bool> bbArgs(0); std::vector<Nd4jLong> iiArgs(numIArgs); // filling block now with inputs for (int e = 0; e < numInputs; e++) { auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; inputs[e] = new nd4j::NDArray(buffer, shape); } // if not inplace - transferring output arrays if (!isInplace) for (int e = 0; e < numOutputs; e++) { // we want to keep original output shape intact auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e])); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e]; // FIXME: revisit this. bool canNullify = true; for (int i = 0; i < numInputs; i++) { void *ibuffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i]; if (ibuffer == buffer) { canNullify = false; break; } } if (canNullify) memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape))); auto array = new nd4j::NDArray(buffer, shape); outputs[e] = array; // and we want to release shape copy once we're done array->triggerAllocationFlag(false, true); } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; // hypothetically at this point we have everything filled auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace); //auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace); if (!isInplace) for (int e = 0; e < numOutputs; e++) { //shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]); //shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo()); //outputs[e]->printIndexedBuffer("C++ raw output"); //outputs[e]->printBuffer("C++ indexed output"); if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))) outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))); } /* if (!isInplace) { if (dZ->size() != numOutputs) { return ND4J_STATUS_BAD_OUTPUT; } for (int e = 0; e < numOutputs; e++) { auto buffer = (T *) outputBuffers[e]; auto shape = (int *) outputShapes[e]; nd4j::NDArray<T> tmp(buffer, shape); if (tmp.lengthOf() != dZ->at(e)->lengthOf()) { nd4j_printf("Provided output array for [%s] has length of %i, but actual dZ has length of %i\n", op->getOpName()->c_str(), tmp.lengthOf(), dZ->at(e)->lengthOf()); return ND4J_STATUS_BAD_OUTPUT; } tmp.assign(dZ->at(e)); } } else { // if op is inplace, our ResultSet holds pointers dZ->purge(); } delete dZ; */ for (auto v: inputs) delete v; for (auto v: outputs) delete v; return Status::OK(); } int NativeOps::execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace); } int NativeOps::registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) { auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<nd4j::NDArray*> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e])); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace); auto varSet = new nd4j::graph::VariablesSet(dZ); if (dZ == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet* NativeOps::executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) { nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId); return ND4J_STATUS_OK; } void NativeOps::deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void NativeOps::deleteIntArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } void NativeOps::deleteLongArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<Nd4jLong *>(pointer); delete[] ptr; } template <typename T> static void deleteVariablesSetT(Nd4jPointer pointer) { nd4j::graph::VariablesSet* ptr = reinterpret_cast<nd4j::graph::VariablesSet*>(pointer); delete ptr; } void NativeOps::deleteVariablesSet(Nd4jPointer pointer) { deleteVariablesSetT<double>(pointer); } void NativeOps::deleteShapeList(Nd4jPointer shapeList) { nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList); list->destroy(); delete list; } const char* NativeOps::getAllOperations() { return nd4j::OpTracker::getInstance()->exportOperations(); } Nd4jPointer NativeOps::getGraphState(Nd4jLong id) { return (Nd4jPointer) new nd4j::graph::GraphState(id); } void NativeOps::deleteGraphState(Nd4jPointer state) { auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state); delete stateP; } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are Node node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = inputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]); auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { // nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto dZ = LogicExecutor::processNode(graph, &node); if (dZ != Status::OK()) return dZ; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = outputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]); NDArray array(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus NativeOps::execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState*>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } void NativeOps::deleteResultWrapper(Nd4jPointer ptr) { // just 0 room for compiler s@!t auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr); delete p; } int NativeOps::estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) { throw std::runtime_error("estimateThreshold: Not implemented yet"); } /* * TypeDef: * void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ); */ void NativeOps::convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) { auto dx = reinterpret_cast<void *>(dX); auto dz = reinterpret_cast<void *>(dZ); if (srcType == ND4J_FLOAT8) { if (dstType == ND4J_FLOAT8) { // convertKernel<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: eventually we might want to add it } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_UINT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: still might want to add } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: .... ^^^ } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float16>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO... } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz); } else { printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT24) { } else if (srcType == ND4J_FLOAT32) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_DOUBLE) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { // } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_THRESHOLD) { if (dstType == ND4J_FLOAT16) { //nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz); } else if (dstType == ND4J_FLOAT32) { //nd4j::convertFromThreshold<float>(nullptr, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::convertFromThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } Nd4jPointer NativeOps::createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) { auto u = new nd4j::utf8string(string, length); return reinterpret_cast<Nd4jPointer>(u); } void NativeOps::deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) { delete(reinterpret_cast<nd4j::utf8string*>(ptr)); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; for (int e = 0; e < numOfSubArrs; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLenX); const auto yOffset = shape::getIndexOffset(i, yShapeInfo, arrLenY); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T> __host__ static void scatterUpdateCudaLauncher(const hipStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { hipLaunchKernelGGL(( scatterUpdateCuda<T>), dim3(512), dim3(256), MAX_NUM_THREADS, *stream, opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void NativeOps::scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs, void* hX, Nd4jLong* hXShapeInfo, Nd4jLong* hXOffsets, void* dX, Nd4jLong* dXShapeInfo, Nd4jLong* dXOffsets, void* hY, Nd4jLong* hYShapeInfo, Nd4jLong* hYOffsets, void* dY, Nd4jLong* dYShapeInfo, Nd4jLong* dYOffsets, int* hIindexes, int* dIndexes) { auto stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); nd4j::DataType type = ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(type, scatterUpdateCudaLauncher, (stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIndexes), LIBND4J_TYPES); }
0f686258379affa230e777d3a3dcc06a0a0ae904.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include "../NativeOps.h" #include <cuda.h> #include <cuda_launch_config.h> #include <buffer.h> #include <helpers/shape.h> #include "../Environment.h" #include <helpers/TAD.h> #include <ops/specials.h> #include <loops/reduce3.h> #include <loops/indexreduce.h> #include <loops/summarystatsreduce.h> #include <loops/random.h> #include <loops/broadcasting.h> #include <loops/broadcasting_bool.h> #include <loops/scalar.h> #include <loops/scalar_bool.h> #include <loops/pairwise_transform.h> #include <loops/pairwise_bool.h> #include <loops/transform_same.h> #include <loops/transform_float.h> #include <loops/transform_strict.h> #include <loops/transform_bool.h> #include <loops/transform_any.h> #include <loops/reduce_float.h> #include <loops/reduce_same.h> #include <loops/reduce_bool.h> #include <loops/reduce_long.h> //#include <thread> #include <map> #include <cuda.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <cuda_device_runtime_api.h> #include <pointercast.h> #include <stdio.h> #include <stdlib.h> #include <loops/type_conversions.h> #include <op_boilerplate.h> #include <loops/aggregates.h> #include <helpers/threshold.h> #include <ShapeList.h> #include <Context.h> #include <ops/specials_cuda.h> #include <graph/exceptions/datatype_exception.h> #include <helpers/CudaLaunchHelper.h> // FIXME: we need cuda-specific implementations #include <helpers/logger.h> #include <NDArray.h> #include <GraphExecutioner.h> #include <graph/GraphHolder.h> #include <graph/VariablesSet.h> #include <ops/declarable/OpRegistrator.h> #include <ops/declarable/CustomOperations.h> //#include <sys/time.h> #include <curand.h> #include <Status.h> #include <helpers/DebugHelper.h> using namespace nd4j; #include <loops/special_kernels.h> cudaDeviceProp *deviceProperties; cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __ND4J_EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; typedef struct { long streamId; long callId; } __syncInfo; typedef __syncInfo SyncInfo; /** * This is utility kernel, that updates given special buffer with proper values in device memory */ extern "C" __global__ void prepareShapeBuffer(int *dimension, int *maxDimension, Nd4jLong *specialPointer, int rows, nd4j::DataType dataType) { Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid > 0) return; dimension[0] = 0; maxDimension[0] = 1; specialPointer[0] = 2; specialPointer[1] = rows; specialPointer[2] = 1; specialPointer[3] = 1; specialPointer[4] = 1; specialPointer[5] = 0; specialPointer[6] = 1; specialPointer[7] = 99; ArrayOptions::setDataType(specialPointer, dataType); //printf("special[0]: [%lld]\n", (long long) specialPointer[0]); //shape::printShapeInfoLinear("prepareShapeBuffer", specialPointer); } // this method isn't used, left here for legacy and caution purposes // TLDR: don't use this way, it sucks void CUDART_CB syncCallback(cudaStream_t stream, cudaError_t status, void *data){ SyncInfo *sync = reinterpret_cast<SyncInfo *>(data); //printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId); } // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jLong)ptrToDeviceId; } template <typename T> dim3 getOptimalDimensions(Nd4jLong n,cudaFuncAttributes attributes, cudaDeviceProp properties) { // we can combine the two to compute a block size int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties); // no real sense launching more threads, then number of elements we have if (num_threads > n) num_threads = n; if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads; // compute the number of blocks of size num_threads to launch int num_blocks = n / num_threads; // check for partial block at the end if (num_blocks > blockLimit) num_blocks = blockLimit; if (num_blocks < 4 && n > 128) { num_blocks = 4; num_threads = n / num_blocks; } if (num_threads >= 768) { num_blocks = num_blocks * 2; num_threads = num_threads / 2; } if(n % num_threads && num_blocks < blockLimit) ++num_blocks; //(num_threads * sizeof(T)) + attributes.sharedSizeBytes); return dim3(num_blocks,num_threads, 3000); } int getBaseMemorySize(int xRank, cudaFuncAttributes funcAttr) { int memory_limit = 256; //funcAttr.sharedSizeBytes; // TODO: remove this later memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4) /* if (xRank == 0) xRank = 2; memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4; memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4; memory_limit += (xRank * 4) * 6; memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase */ return memory_limit; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, cudaFuncAttributes funcAttr) { int countMP = deviceProperties[deviceId].multiProcessorCount; int blockThreshold = getDeviceBlockThreshold(deviceId); int num_threads = problemLength / (countMP * blockThreshold); num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads); num_threads = nd4j::math::nd4j_max<int>(num_threads, 64); num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads); int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr); dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit); return launchDims; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) { int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int warpSize = deviceProperties[deviceId].warpSize; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); // round num_threads to nearest warpSize num_threads -= num_threads % warpSize; num_threads = nd4j::math::nd4j_max<int>(1, num_threads); if (num_threads < warpSize && tadLength < warpSize) num_threads = tadLength; // since we use shared memory as fast memory for some cases - we need to count that in int memory_limit = getBaseMemorySize(xRank, funcAttr); int memory_floor = memory_limit; int effective_block_limit = countMP * blockThreshold; int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit); int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize); // at this moment we've stored all required information for things. time to count in reduction multipliers int reduction_per_block = 0; bool found = false; if (reduction > 0) while (!found) { reduction_per_block = (num_threads * elementSize * reduction); if (memory_limit + reduction_per_block < desiredShared) { memory_limit += reduction_per_block; found = true; } else { if (num_threads > minThreads) { num_threads -= 32; } else { memory_limit += reduction_per_block; found = true; } } } // at this moment we know total memory used per block, and we also know per-mp limit. int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block); // we don't want to spawn more blocks, that gpu can actually handle without queue //num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // if (num_blocks > countMP) // num_blocks = num_blocks - (num_blocks % countMP); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } reduction_per_block = (num_threads * elementSize * reduction); memory_limit = memory_floor + reduction_per_block; } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP); return dim3(num_blocks,num_threads, memory_limit); } /* * This method returns kernel launch param for linear memory access */ dim3 getFlatLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *dYShapeInfo, cudaFuncAttributes funcAttr) { auto xRank = shape::rank(dXShapeInfo); auto yRank = dYShapeInfo == nullptr ? 0 : shape::rank(dYShapeInfo); auto zRank = 0; int memory_limit = getBaseMemorySize(xRank, funcAttr); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); auto xLength = shape::length(dXShapeInfo); int effective_block_limit = countMP * blockThreshold; // for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here int num_threads = xLength / effective_block_limit; if (num_threads < minThreads) num_threads = minThreads; num_threads = num_threads - (num_threads % 32); int memory_floor = memory_limit; int num_blocks = xLength / num_threads; num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } } if (xLength / num_threads > blockLimit) num_blocks *= 2; dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit); return launchDims; } /** * This method returns kernel launch params with TAD-based memory access * * @param deviceId * @param dXShapeInfo * @param tadShapeInfo * @param funcAttr * @param dimensionLength * @param elementSize * @param reductionSize * @return */ dim3 getReduceLaunchParams(int deviceId, Nd4jLong *dXShapeInfo, Nd4jLong *tadShapeInfo, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) { Nd4jLong tadLength = 0; Nd4jLong numTads = 0; if (tadShapeInfo != nullptr) { tadLength = shape::length(tadShapeInfo); numTads = shape::length(dXShapeInfo) / tadLength; if (tadLength == 1) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("A xLength: [%i], zLength: [%i]\n", shape::length(dXShapeInfo), shape::length(tadShapeInfo)); } } else{ // we have special case - reduction along all dimensions tadLength = nd4j::math::nd4j_min<int>(shape::length(dXShapeInfo), 768); numTads = shape::length(dXShapeInfo) / tadLength; } auto xRank = shape::rank(dXShapeInfo); int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo); dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { //|| launchDims.dX == 1 printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.dX: [%i], launchDims.dY: [%i], launchDims.dZ: [%i]\n", shape::length(dXShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z); } return launchDims; } /** * Returns optimal launch parameters * given the extra pointers passed in. * The extra pointer should be * the host pointer for the shape information * associated with the data. * From there it is used to obtain the length * from which we can derive the optimal launch parameters. * */ template <typename T> dim3 getOptimalLaunchParameters(const Nd4jLong *hXShapeInfo, cudaFuncAttributes attributes, cudaDeviceProp properties) { auto n = shape::length(hXShapeInfo); dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y)); return launchDims; } nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(cudaStream_t stream) { Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<Nd4jLong> *scalarDimension; nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(cudaStream_t stream) { auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong))); CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer"); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } Nd4jLong *getShapeInfoHostPointer() { return scalarShapeInfo->data; } Nd4jLong * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } Nd4jLong * getDimensionHostPointer() { return scalarDimension->data; } Nd4jLong * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; cudaStream_t streamRef; public: ScalarInfo(cudaStream_t stream) { T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T))); CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer"); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ Nd4jLong *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the dZ pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ Nd4jLong *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; void NativeOps::execPairwiseTransform( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims(256, 1024, 8192); if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled()) throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransform both operands must have same data type", xType, yType); if (xType != zType && yType != zType) throw std::runtime_error("NativeOps::execPairwiseTransform requires Z operand to have either X or Y type"); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES, LIBND4J_TYPES) #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::pairwise_transforms::PairWiseTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dY, dYShapeInfo, hYShapeInfo, dZ, dZShapeInfo, hZShapeInfo, extraParams), LIBND4J_TYPES) #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execPairwiseTransformBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isB(zType)) throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool wrong Z operand data type", nd4j::DataType::BOOL, zType); if (yType != xType) throw nd4j::datatype_exception::build("NativeOps::execPairwiseTransformBool both operands must have same data type", xType, yType); auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(256, 1024, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::pairwise_transforms::PairWiseBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams), LIBND4J_TYPES, BOOL_TYPES) } //////////////////////////////////////////////////////////////////////// void NativeOps::execSummaryStatsScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execBroadcastBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOps::execBroadcastBool requires Z operand to have BOOL type"); if (yType != xType) throw std::runtime_error("NativeOps::execBroadcastBool requires both X & Y operands to have same type"); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::broadcast::BroadcastBool, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, BOOL_TYPES) DEBUG_KERNEL(stream, opNum); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcast( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { /* cudaEvent_t start; cudaEventCreateWithFlags(&start, cudaEventDisableTiming); timespec tsX; timespec tsY; clock_gettime(CLOCK_REALTIME, &tsX); */ auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto dTADShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto dTADOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims(256, 256, 16384); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::broadcast::Broadcast, ::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, dTADShapeInfo, dTADOffsets, dTADShapeInfoZ, dTADOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ void NativeOps::execReduceFloat(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("FF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw std::runtime_error("NativeOps::execReduceFloat requires Z operand to have floating point type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, FLOAT_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceFloat(...) failed"); } void NativeOps::execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("SF8 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != xType) throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed"); } void NativeOps::execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("SF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xRank = shape::rank(hXShapeInfo); if (zType != xType) throw datatype_exception::build("NativeOps::execReduceSame requires both X & Z operands to have same type", xType, zType); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceSame(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type"); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("NativeOps::execReduceBool requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduce( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); Nd4jLong *hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); Nd4jLong *dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); Nd4jLong *dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("NativeOps::execIndexReduce requires Z operand to have INT64 type", zType); auto dz = reinterpret_cast<Nd4jLong*>(dZ); BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduce(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, dZShapeInfo, shape::rank(hZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto dTADOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F8 opNum:[%i]\n", opNum); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xRank = shape::rank(hXShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction, ::execReduceXD(launchDims, stream, opNum, xRank, dX,dXShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, reductionPointer, dTADShapeInfo, dTADOffsets), LIBND4J_TYPES, FLOAT_TYPES); } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ void NativeOps::execIndexReduceScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo){ if (nd4j::Environment::getInstance()->isDebug()) printf("F1 opNum:[%i]\n", opNum); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // void *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); void *reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); if (nd4j::Environment::getInstance()->isDebugAndVerbose() && launchDims.x == 1) printf("AF1 opNum:[%i]\n", opNum); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); // FIXME: we want Z to be one of integer types //if (!DataTypeUtils::isZ(zType)) // throw nd4j::datatype_exception("NativeOps::execIndexReduceScalar requires Z operand to have one of integer types") if (zType != nd4j::DataType::INT64) throw nd4j::datatype_exception::build("NativeOps::exeIndexReduceScalar requires Z operand to have INT64 data type", zType); auto dz = reinterpret_cast<Nd4jLong*>(dZ); BUILD_SINGLE_SELECTOR(xType, functions::indexreduce::IndexReduce, ::executeIndexReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz, nullptr, 0, nullptr, 0, 1, allocationPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execIndexReduceScalar(...) failed"); } void NativeOps::execTransformSame(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (xType != zType) throw std::runtime_error("NativeOps::execTransformSame requires X & Z to have same type"); //nd4j_printf("Going to execute transformSame; opNum: %i\n", opNum); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformSame, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execTransformSame(...) failed"); } void NativeOps::execTransformBool(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isB(zType)) throw std::runtime_error("NativeOps::execTransformBool requires Z to have same boolean type"); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformBool, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES); } void NativeOps::execTransformAny(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); switch (opNum) { case transform::IsMax: { bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } auto special = reinterpret_cast<double *>(extraPointers[17]); if (scalarCheat) { auto scalarShape = ShapeBuilders::createScalarShapeInfo(nd4j::DataType::INT64); /** * In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call */ execIndexReduceScalar(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, scalarShape, special, nullptr); Nd4jLong maxIdx = -119; checkCudaErrors(cudaStreamSynchronize(*stream)); cudaMemcpyAsync(&maxIdx, special, sizeof(Nd4jLong), cudaMemcpyDeviceToHost, *stream); checkCudaErrors(cudaStreamSynchronize(*stream)); int targetIdx = 0; if (shape::order(hXShapeInfo) == 'c' || shape::order(hXShapeInfo) == 'f' && maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1] >= shape::length(hXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hXShapeInfo)[shape::rank(hXShapeInfo) - 1]; dim3 launchDims(1, 512, 1024); BUILD_SINGLE_SELECTOR(zType, fillIsMaxGeneric, (launchDims, stream, dZ, shape::length(hZShapeInfo), targetIdx), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed"); delete[] scalarShape; } else { auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); auto hostTShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[19]); auto tadMaxShapeInfo = reinterpret_cast<Nd4jLong *> (extraPointers[10]); auto tadMaxOffsets = reinterpret_cast<Nd4jLong *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); int dimensionLength = getDeviceId(extraPointers[18]); auto cshape = ShapeBuilders::createVectorShapeInfo(nd4j::DataType::INT32, dimensionLength); // we call for IMax on specified dimension execIndexReduce(extraPointers, indexreduce::IndexMax, nullptr, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostTShapeInfo, special, hostYShapeInfo, nullptr, cshape, dimension, nullptr); DEBUG_KERNEL(stream, opNum); dim3 launchDims(256, 256, 16384); // at this point, all IMax indexes are gathered, and we execute filler BUILD_SINGLE_SELECTOR(zType, fillDimensionalIsMaxGeneric, (launchDims, stream, special, dZ, dZShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "Legacy IsMax(...) failed"); delete[] cshape; } } break; default: { dim3 launchDims(512, 512, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformAny, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, LIBND4J_TYPES); } } } void NativeOps::execTransformStrict(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (xType != zType || !DataTypeUtils::isR(xType)) throw datatype_exception::build("NativeOps::execTransformStrict requires X & Z to have same floating point type", xType, zType); switch (opNum) { case transform::SoftMax: case transform::SoftMaxDerivative: case transform::LogSoftMax: { if (shape::isVector(hXShapeInfo)) { int length = shape::length(hXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(double) * 4); BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES); } else { auto shape = shape::shapeOf(hXShapeInfo); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); // special pointer for special buffer for special ops auto specialPointer = reinterpret_cast<double *>(extraPointers[6]); auto dimension = reinterpret_cast<int *>(specialPointer); auto maxDimension = dimension + 1; auto maxShapeBuffer = reinterpret_cast<Nd4jLong *>(maxDimension + 1); auto special = reinterpret_cast<double *> (maxShapeBuffer + (MAX_RANK * 2 + 4)); Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; Nd4jLong maxShape[2] = {shape::shapeOf(hXShapeInfo)[0], 1}; auto hostMaxShapeBuffer = shape::shapeBuffer(2, xType, maxShape); auto cshape = ShapeBuilders::createVectorShapeInfo(nd4j::DataType::INT32, 1); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; prepareShapeBuffer<<<1, 1, 128, *stream>>>(dimension, maxDimension, maxShapeBuffer, shape[0], xType); DEBUG_KERNEL(stream, opNum); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceSame(tempPointers, reduce::Max, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, cshape, maxDimension, nullptr); DEBUG_KERNEL(stream, opNum); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcast(tempPointers, broadcast::Subtract, hX, hXShapeInfo, dX, dXShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, cshape, dimension, nullptr); DEBUG_KERNEL(stream, opNum); // exp 3 execTransformStrict(extraPointers, transform::Exp, hZ, hZShapeInfo, dZ, dZShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); DEBUG_KERNEL(stream, opNum); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceSame(tempPointers, reduce::Sum, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, cshape, maxDimension, nullptr); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcast(tempPointers, broadcast::Divide, hZ, hZShapeInfo, dZ, dZShapeInfo, nullptr, hostMaxShapeBuffer, special, maxShapeBuffer, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, cshape, dimension, nullptr); DEBUG_KERNEL(stream, opNum); // log 3 if (opNum == transform::LogSoftMax) execTransformStrict(extraPointers, transform::Log, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams); else if (opNum == transform::SoftMaxDerivative) execTransformStrict(extraPointers, transform::SpecialDerivative, nullptr, hZShapeInfo, dZ, dZShapeInfo, nullptr, hZShapeInfo, dZ, dZShapeInfo, extraParams); nd4j::DebugHelper::checkErrorCode(stream, "SoftMax(...) failed"); delete hostMaxShapeBuffer; delete[] cshape; } } break; default: { BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), FLOAT_TYPES); } } } void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xRank = shape::rank(hXShapeInfo); auto zRank = shape::rank(hZShapeInfo); auto xType = ArrayOptions::dataType(hXShapeInfo); auto zType = ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw datatype_exception::build("NativeOps::execTransformFloat requires Z to have floating point type", zType); if (opNum == transform::Histogram) { dim3 launchDims(256, 256, 32768); Nd4jPointer maskedAllocPointer; auto length = shape::length(hZShapeInfo); cudaMalloc(reinterpret_cast<void **>(&maskedAllocPointer), length * launchDims.x * DataTypeUtils::sizeOf(nd4j::DataType::INT64)); auto imaskedAllocPointer = reinterpret_cast<int *>(maskedAllocPointer); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, imaskedAllocPointer, reductionPointer, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES); checkCudaErrors(cudaStreamSynchronize(*stream)); cudaFree(maskedAllocPointer); } else { dim3 launchDims(512, 512, 16384); BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat, ::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ, dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param dZ the dZ array * @param dZShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flatten(Nd4jPointer *extraPointers, int offset, char order, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hInput, Nd4jLong *hInputShapeInfo, void *dInput, Nd4jLong *dInputShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F22 opNum:[7]\n"); // int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hYShapeInfo), 2, funcAttributes[30]); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AF222 opNum:[7]\n"); auto type = nd4j::ArrayOptions::dataType(hInputShapeInfo); BUILD_SINGLE_SELECTOR(type, flattenKernelGeneric, (launchDims, stream, extraPointers, offset, order, dZ, dZShapeInfo, dInput, dInputShapeInfo), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } void NativeOps::checkP2P() { int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; cudaSetDevice(dX); cudaDeviceCanAccessPeer(&canAccess, dX , dY); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; cudaSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void NativeOps::enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; cudaSetDevice(dX); cudaDeviceCanAccessPeer(&canAccess, dX , dY); if (canAccess) { if (enable) { cudaDeviceEnablePeerAccess(dY, 0); } else { cudaDeviceDisablePeerAccess(dY); } } else { if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY); } } } cudaSetDevice(curDevice); } allowedP2P = enable; cudaSetDevice(curDevice); } bool NativeOps::isP2PAvailable() { return supportedP2P; } void NativeOps::initializeDevicesAndFunctions() { int devCnt = 0; cudaGetDeviceCount(&devCnt); deviceProperties = new cudaDeviceProp[devCnt]; for (int i = 0; i < devCnt; i++) { cudaSetDevice(i); cudaGetDeviceProperties(&deviceProperties[i], i); cudaDeviceSetLimit(cudaLimitStackSize, 4096); } cudaSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); } void NativeOps::initializeFunctions(Nd4jPointer *functions) { nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions); /* this->cublasSgemv = (CublasSgemv)functions[0]; this->cublasDgemv = (CublasDgemv)functions[1]; this->cublasHgemm = (CublasHgemm)functions[2]; this->cublasSgemm = (CublasSgemm)functions[3]; this->cublasDgemm = (CublasDgemm)functions[4]; this->cublasSgemmEx = (CublasSgemmEx)functions[5]; this->cublasHgemmBatched = (CublasHgemmBatched)functions[6]; this->cublasSgemmBatched = (CublasSgemmBatched)functions[7]; this->cublasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer NativeOps::mallocHost(Nd4jLong memorySize, int flags) { Nd4jPointer pointer; // cudaHostAllocMapped |cudaHostAllocPortable cudaError_t res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize, cudaHostAllocDefault); if (res != 0) pointer = 0L; return pointer; } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer NativeOps::mallocDevice(Nd4jLong memorySize, Nd4jPointer ptrToDeviceId, int flags) { Nd4jPointer pointer; cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize); if (res != 0) pointer = 0L; return pointer; } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int NativeOps::freeHost(Nd4jPointer pointer) { cudaError_t res = cudaFreeHost(reinterpret_cast<void *>(pointer)); if (res != 0) pointer = 0L; return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) { cudaError_t res = cudaFree(reinterpret_cast<void *>(pointer)); if (res != 0) pointer = 0L; return 1L; } Nd4jPointer NativeOps::createContext() { return 0L; } Nd4jPointer NativeOps::createStream() { Nd4jPointer nativeStream = (Nd4jPointer) malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream"); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream)); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaStreamCreate(...) failed"); return nativeStream; } Nd4jPointer NativeOps::createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t)); CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer"); cudaError_t dZ = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaEventCreateWithFlags(...) failed"); return nativeEvent; } int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream); cudaError_t dZ = cudaEventRecord(*pEvent, *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaEventRecord(...) failed"); return 1; } int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) { int deviceId = getDeviceId(ptrToDeviceId); cudaError_t dZ = cudaSetDevice(deviceId); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaSetDevice(...) failed"); return 1; } Nd4jLong NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jLong) memFree; } Nd4jLong NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jLong) memTotal; } int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaMemcpyKind kind; DEBUG_KERNEL(pStream, 0); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } case 3: { kind = cudaMemcpyDeviceToDevice; } break; default: { printf("UNDEFINED MEMCPY!\n"); break; } } cudaError_t dZ = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream); if (dZ != 0) { checkCudaErrors(dZ); printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); throw std::runtime_error("cudaMemcpyAsync(...) failed"); //return 0L; } return 1; } int NativeOps::memset(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaError_t dZ = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size)); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaMemset(...) failed"); return 1; } int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaError_t dZ = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaMemsetAsync(...) failed"); return 1; } int NativeOps::destroyEvent(Nd4jPointer event) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaError_t dZ = cudaEventDestroy(*pEvent); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaEvenDestroy(...) failed"); return 1; } int NativeOps::streamSynchronize(Nd4jPointer stream) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream); cudaError_t dZ = cudaStreamSynchronize(*pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaStreamSynchronize(...) failed"); return 1L; } int NativeOps::eventSynchronize(Nd4jPointer event) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaError_t dZ = cudaEventSynchronize(*pEvent); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaEventSynchronize(...) failed"); return 1L; } int NativeOps::getAvailableDevices() { int devCnt = 0; cudaGetDeviceCount(&devCnt); return devCnt; } void NativeOps::enableDebugMode(bool reallyEnable) { nd4j::Environment::getInstance()->setDebug(reallyEnable); } void NativeOps::setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int NativeOps::ompGetMaxThreads() { return maxThreads; } int NativeOps::ompGetNumThreads() { return maxThreads; } void NativeOps::setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void NativeOps::enableVerboseMode(bool reallyEnable) { nd4j::Environment::getInstance()->setVerbose(reallyEnable); } int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].major; } int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].minor; } const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].name; } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, Nd4jPointer *ddata, Nd4jPointer *dinputShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hXShapeInfo = hZShapeInfo; auto hShapePointers = reinterpret_cast<Nd4jLong **>(inputShapeInfo); // numArrays will be used as number of TADs, so each block process 1 input int smem = 8192; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hZShapeInfo) == 2 && shape::order(hZShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0 || shape::order(hShapePointers[i]) != 'c') { isVstack = false; break; } } } // let's try to fit N-dimensional vstack if (!isVstack && !isScalar && dimension == 0 && shape::order(hXShapeInfo) == 'c') { auto length0 = shape::length(hShapePointers[0]); isVstack = true; for (int i = 0; i < numArrays; i++) { if (shape::elementWiseStride(hShapePointers[i]) <= 0 || shape::order(hShapePointers[i]) != 'c' || length0 != shape::length(hShapePointers[i])) { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hZShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hShapePointers[i]) || shape::elementWiseStride(hShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going scalar concat\n"); dim3 launchDims(128, 128, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelScalarGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), dZ), LIBND4J_TYPES); } else if (isVstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going VStack concat\n"); dim3 launchDims(128, 512, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelVStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES); } else if (isHstack) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going HStack concat\n"); dim3 launchDims(128, 128, 16384); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelHStackGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo), LIBND4J_TYPES); } else { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("Going generic concat\n"); auto devZTadShape = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto devZOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); dim3 launchDims(128, 128, 8192); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); BUILD_SINGLE_SELECTOR(zType, concatKernelGeneric, (launchDims, stream, numArrays, reinterpret_cast<Nd4jPointer *>(ddata[0]), reinterpret_cast<Nd4jPointer *>(dinputShapeInfo[0]), dZ, dZShapeInfo, reinterpret_cast<Nd4jPointer *>(tadPointers[0]), reinterpret_cast<Nd4jPointer *>(offsetPointers[0]), devZTadShape, devZOffsets), LIBND4J_TYPES); } if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); cudaError_t res = cudaStreamSynchronize(*stream); checkCudaErrors(res); nd4j::DebugHelper::checkErrorCode(stream, "Legacy ConcatFloat(...) failed"); } void NativeOps::specialConcat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { nd4j::SpecialMethods<float>::concatCpuGeneric( dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo); } /** * This method saves */ void NativeOps::tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *target, Nd4jLong *offsets) { //nd4j_printf("START ------->\n",""); //nd4j_printf("Shape pointer: [%p]\n", dXShapeInfo); //nd4j_printf("Dimension pointer: [%p]\n", dimension); //nd4j_printf("shape rank: [%i]; dimLength: [%i]\n", shape::rank(dXShapeInfo), dimensionLength); //shape::printShapeInfoLinear(dXShapeInfo); //fflush(stdout); //shape::printArray<int>(reinterpret_cast<void*>(dimension), dimensionLength, "dimensions"); //fflush(stdout); //nd4j_printf("END ------->\n",""); shape::TAD tad; tad.init(dXShapeInfo, dimension, dimensionLength); //nd4j_printf("Creating TAD shape...\n",""); tad.createTadOnlyShapeInfo(); //nd4j_printf("Creating TAD offsets...\n",""); tad.createOffsets(); //nd4j_printf("memcpy TAD shape...\n",""); std::memcpy(reinterpret_cast<void *>(target), tad.tadOnlyShapeInfo, shape::shapeInfoByteLength(tad.tadOnlyShapeInfo)); //nd4j_printf("memcpy TAD offsets...\n",""); std::memcpy(reinterpret_cast<void *>(offsets), tad.tadOffsets, tad.numTads * sizeof(Nd4jLong)); //nd4j_printf("memcpy finished...\n",""); } int NativeOps::memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaMemcpyKind kind; DEBUG_KERNEL(pStream, -1); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } case 3: { kind = cudaMemcpyDeviceToDevice; } break; } //cudaError_t dZ = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); cudaError_t dZ = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream); checkCudaErrors(dZ); if (dZ != 0) throw std::runtime_error("cudaMemcpyToSymbolAsync(...) failed"); return 1; } Nd4jPointer NativeOps::getConstantSpace() { Nd4jPointer dConstAddr; cudaError_t dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory); if (dZ != 0) throw std::runtime_error("cudaGetSymbolAddress(...) failed"); return dConstAddr; } void NativeOps::pullRows(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(64, 256, 1024); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } void NativeOps::average(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length, bool propagate) { cudaStream_t * stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(256, 256, 4096); // averagingKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dX, dz, n, length, propagate); BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES); } } void NativeOps::accumulate(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length) { auto stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(n, 256, 16384); BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n,length), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES); } } void NativeOps::shuffle(Nd4jPointer *extras, Nd4jPointer *x, Nd4jPointer *xShapeInfo, Nd4jPointer *dx, Nd4jPointer *dXShapeInfo, Nd4jPointer *z, Nd4jPointer *zShapeInfo, Nd4jPointer *dz, Nd4jPointer *dZShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); auto dX = reinterpret_cast<void **>(dx); auto dZ = reinterpret_cast<void **>(dz); auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo); auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo); auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets); auto xType = nd4j::ArrayOptions::dataType(xShape[0]); dim3 launchDims(N, 256, 8192); BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES); DEBUG_KERNEL(stream, 0); } /* void NativeOps::execMetaPredicateShape(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraA, void *extraB, double scalarA, double scalarB) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, functions::grid::GRIDShaped, ::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraA, extraB, scalarA, scalarB), LIBND4J_TYPES); // functions::grid::GRIDShaped<float>::execMetaPredicateShaped(stream, extras, opTypeA, opNumA, opTypeB, opNumB, N, dX, dXShapeInfo, dy, dYShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); DEBUG_KERNEL(stream, opNumA); } */ bool NativeOps::isExperimentalEnabled() { return nd4j::Environment::getInstance()->isExperimentalBuild(); } void NativeOps::setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int NativeOps::getDevice() { int curDevice = -1; cudaGetDevice(&curDevice); return curDevice; } void NativeOps::setElementThreshold(int num) { // this is no-op for CUDA } void NativeOps::setTADThreshold(int num) { // this is no-op for CUDA } void NativeOps::execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, bool biasCorrected, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); dim3 launchDims = dim3(256, 256, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce, ::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected, reductionPointer), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); dim3 launchDims(256, 256, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, nullptr, 1, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES) DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto numBlocks = shape::length(hZShapeInfo); dim3 launchDims(numBlocks, 256, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3 requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), LIBND4J_TYPES, FLOAT_TYPES) } //////////////////////////////////////////////////////////////////////// void NativeOps::execReduce3Scalar(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto allocationPointer = reinterpret_cast<int *>(extraPointers[3]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); if (xType != yType) throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Y operand to have X type", xType, yType); if (!DataTypeUtils::isR(zType)) throw nd4j::datatype_exception::build("NativeOps::execReduce3Scalar requires Z operand to have floating point data type", zType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, allocationPointer, reductionPointer, nullptr), LIBND4J_TYPES, FLOAT_TYPES); } void NativeOps::execScalarBool( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (xType != yType ) throw std::runtime_error("NativeOps::execScalarBool requires X & Y to have same type"); if (!DataTypeUtils::isB(zType) ) throw std::runtime_error("NativeOps::execScalarBool requires Z operand to have BOOL type"); BUILD_DOUBLE_SELECTOR(xType, zType, functions::scalar::ScalarBoolTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, BOOL_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalarBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (xType != yType ) throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires X & Y to have same type", xType, yType); if (!DataTypeUtils::isB(zType) ) throw nd4j::datatype_exception::build("NativeOps::execScalarBool requires Z operand to have BOOL type", nd4j::DataType::BOOL, zType); BUILD_DOUBLE_SELECTOR(xType, yType, functions::scalar::ScalarBoolTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, nullptr, nullptr, nullptr, nullptr), LIBND4J_TYPES, BOOL_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(256, 512, 8192); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled()) throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType); if (!Environment::getInstance()->isExperimentalBuild() && Environment::getInstance()->isDebug()) { auto sX = DataTypeUtils::asString(xType); auto sY = DataTypeUtils::asString(yType); auto sZ = DataTypeUtils::asString(zType); nd4j_printf("Running execScalar with dtypes: [%s], [%s], [%s]\n", sX.c_str(), sY.c_str(), sZ.c_str()); } #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo, hZShapeInfo, dScalar, extraParams), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled()) throw nd4j::datatype_exception::build("NativeOps::execScalar both operands must have same data type", xType, yType); dim3 launchDims(256, 256, 16384); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } void NativeOps::execAggregate(Nd4jPointer *extraPointers, int opNum, void **arguments, int numArguments, Nd4jLong **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, void *realArguments, int numRealArguments, nd4j::DataType dtype) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateKernelGeneric(launchDims, stream, opNum, arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), FLOAT_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execAggregateFloat(...) failed"); } void NativeOps::execAggregateBatch(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { // not implemented yet cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); BUILD_SINGLE_SELECTOR(dtype, functions::aggregate::AggregatedFunction, ::aggregateBatchKernelGeneric(launchDims, stream, opNum, numAggregates, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), FLOAT_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(cudaStreamSynchronize(*stream)); checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, dZ, dZShapeInfo, extraArguments), BUILD_SINGLE_SELECTOR(zType, functions::random::RandomFunction, ::executeCudaSingle(launchDims, extraPointers, opNum, stateDevice, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(cudaMemcpyAsync(stateHost, stateDevice, sizeOf, cudaMemcpyDeviceToHost, *stream)); checkCudaErrors(cudaStreamSynchronize(*stream)); cudaFree(stateDevice); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(cudaStreamSynchronize(*stream)); checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaDouble(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(cudaMemcpyAsync(stateHost, stateDevice, sizeOf, cudaMemcpyDeviceToHost, *stream)); checkCudaErrors(cudaStreamSynchronize(*stream)); cudaFree(stateDevice); } void NativeOps::execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto sizeOf = sizeof(nd4j::graph::RandomGenerator); Nd4jPointer stateDevice; cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&stateDevice), sizeOf); checkCudaErrors(cudaStreamSynchronize(*stream)); checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream)); dim3 launchDims = dim3(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hZShapeInfo); // functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments); BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction, ::executeCudaTriple(launchDims, extraPointers, opNum, stateDevice, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments), FLOAT_TYPES); checkCudaErrors(cudaMemcpyAsync(stateHost, stateDevice, sizeOf, cudaMemcpyDeviceToHost, *stream)); checkCudaErrors(cudaStreamSynchronize(*stream)); cudaFree(stateDevice); } Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // cudaStreamSynchronize(*stream); auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev)); buffer->propagateToDevice(buffer, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A"); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B"); return buffer; } void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice cudaDeviceSynchronize(); delete buffer; } void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); cudaStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream); } void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); cudaStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int NativeOps::lengthForShapeBufferPointer(Nd4jPointer buffer) { auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer NativeOps::pointerForAddress(Nd4jLong address) { return reinterpret_cast<Nd4jPointer >(address); } void NativeOps::tear(Nd4jPointer *extras, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); dim3 launchDims(512, 512, 512); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed"); } void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) { auto stream = reinterpret_cast<cudaStream_t *>(&extras[1]); auto g_scanBlockSums = reinterpret_cast<int **>(&extras[2]); int blockSize = 512; // max size of the thread blocks int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize)))); int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (nd4j::isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = nd4j::floorPow2(numElements); int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2); int np2LastBlock = 0; int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts int extraSpace = numEltsPerBlock / NUM_BANKS; int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); dim3 gridOnes(1, 1, 1); dim3 threadsOnes(numThreadsLastBlock, 1, 1); if (sharedMemSize < 2048) sharedMemSize = 2048; if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048; // execute the scan if (numBlocks > 1) { nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); nd4j::uniformAdd<<<grid, threads, 1024, *stream>>>(dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { nd4j::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0); } else { nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0); } } void NativeOps::encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed"); } void NativeOps::encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); //encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz); prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed"); } void NativeOps::encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 4096); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed"); } void NativeOps::decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto zType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed"); } void NativeOps::execReduce3All(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParamsVals, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D119 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims(shape::length(hZShapeInfo), 256, 32768); if (nd4j::Environment::getInstance()->isVerbose() && launchDims.x == 1) printf("AD119 opNum:[%i]\n", opNum); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != nd4j::DataType::BOOL && !this->isExperimentalEnabled()) throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType); if (yType != xType) throw nd4j::datatype_exception::build("NativeOps::execReduce3All both operands must have same data type", xType, yType); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3, ::execAll(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParamsVals, dZ, dZShapeInfo, dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), LIBND4J_TYPES, FLOAT_TYPES); DEBUG_KERNEL(stream, opNum); } void NativeOps::sort(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, bool descending) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[ 1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2*k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window<<=1) { int n = window; int rev = 0; do{ int half = n >> 1; BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES); n>>=1; rev = 1; } while(n > 1); } } nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed"); } void NativeOps::sortTad(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { // to be implemented cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadFloat(...) failed"); } void NativeOps::sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) { throw std::runtime_error("sortCooIndices:: Not implemented yet"); } Nd4jLong NativeOps::encodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed"); Nd4jLong dZ = (Nd4jLong) resultPointer[0]; resultPointer[0] = 0; return dZ; } void NativeOps::decodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed"); } Nd4jLong* NativeOps::mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) { return nullptr; } void NativeOps::munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) { } nd4j::graph::ResultWrapper* NativeOps::executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { return nullptr; } const char* NativeOps::getAllCustomOps() { return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations(); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { nd4j::graph::VariableSpace varSpace; Context block(2, &varSpace); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numBArgs; e++) block.getBArguments()->push_back(bArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]); // we shouldn't copy buffer if that's empty array void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; auto array = new nd4j::NDArray(buffer_, shape_); array->triggerAllocationFlag(false, false); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.workspace() != nullptr) shapeList->detach(); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { Context block(1); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e])); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } nd4j::ShapeList* NativeOps::calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<nd4j::NDArray*> inputs(numInputs); std::vector<nd4j::NDArray*> outputs(numOutputs); std::vector<double> ttArgs(numTArgs); std::vector<bool> bbArgs(0); std::vector<Nd4jLong> iiArgs(numIArgs); // filling block now with inputs for (int e = 0; e < numInputs; e++) { auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; inputs[e] = new nd4j::NDArray(buffer, shape); } // if not inplace - transferring output arrays if (!isInplace) for (int e = 0; e < numOutputs; e++) { // we want to keep original output shape intact auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e])); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e]; // FIXME: revisit this. bool canNullify = true; for (int i = 0; i < numInputs; i++) { void *ibuffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i]; if (ibuffer == buffer) { canNullify = false; break; } } if (canNullify) memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape))); auto array = new nd4j::NDArray(buffer, shape); outputs[e] = array; // and we want to release shape copy once we're done array->triggerAllocationFlag(false, true); } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; // hypothetically at this point we have everything filled auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace); //auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace); if (!isInplace) for (int e = 0; e < numOutputs; e++) { //shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]); //shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo()); //outputs[e]->printIndexedBuffer("C++ raw output"); //outputs[e]->printBuffer("C++ indexed output"); if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))) outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))); } /* if (!isInplace) { if (dZ->size() != numOutputs) { return ND4J_STATUS_BAD_OUTPUT; } for (int e = 0; e < numOutputs; e++) { auto buffer = (T *) outputBuffers[e]; auto shape = (int *) outputShapes[e]; nd4j::NDArray<T> tmp(buffer, shape); if (tmp.lengthOf() != dZ->at(e)->lengthOf()) { nd4j_printf("Provided output array for [%s] has length of %i, but actual dZ has length of %i\n", op->getOpName()->c_str(), tmp.lengthOf(), dZ->at(e)->lengthOf()); return ND4J_STATUS_BAD_OUTPUT; } tmp.assign(dZ->at(e)); } } else { // if op is inplace, our ResultSet holds pointers dZ->purge(); } delete dZ; */ for (auto v: inputs) delete v; for (auto v: outputs) delete v; return Status::OK(); } int NativeOps::execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace); } int NativeOps::registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) { auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<nd4j::NDArray*> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e])); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace); auto varSet = new nd4j::graph::VariablesSet(dZ); if (dZ == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet* NativeOps::executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } int NativeOps::unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) { nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId); return ND4J_STATUS_OK; } void NativeOps::deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void NativeOps::deleteIntArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } void NativeOps::deleteLongArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<Nd4jLong *>(pointer); delete[] ptr; } template <typename T> static void deleteVariablesSetT(Nd4jPointer pointer) { nd4j::graph::VariablesSet* ptr = reinterpret_cast<nd4j::graph::VariablesSet*>(pointer); delete ptr; } void NativeOps::deleteVariablesSet(Nd4jPointer pointer) { deleteVariablesSetT<double>(pointer); } void NativeOps::deleteShapeList(Nd4jPointer shapeList) { nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList); list->destroy(); delete list; } const char* NativeOps::getAllOperations() { return nd4j::OpTracker::getInstance()->exportOperations(); } Nd4jPointer NativeOps::getGraphState(Nd4jLong id) { return (Nd4jPointer) new nd4j::graph::GraphState(id); } void NativeOps::deleteGraphState(Nd4jPointer state) { auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state); delete stateP; } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are Node node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = inputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]); auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { // nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto dZ = LogicExecutor::processNode(graph, &node); if (dZ != Status::OK()) return dZ; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = outputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]); NDArray array(buffer, shapeInfo, varSpace->workspace()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus NativeOps::execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState*>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } void NativeOps::deleteResultWrapper(Nd4jPointer ptr) { // just 0 room for compiler s@!t auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr); delete p; } int NativeOps::estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) { throw std::runtime_error("estimateThreshold: Not implemented yet"); } /* * TypeDef: * void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ); */ void NativeOps::convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) { auto dx = reinterpret_cast<void *>(dX); auto dz = reinterpret_cast<void *>(dZ); if (srcType == ND4J_FLOAT8) { if (dstType == ND4J_FLOAT8) { // convertKernel<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: eventually we might want to add it } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_UINT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: still might want to add } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: .... ^^^ } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float16>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO... } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz); } else { printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT24) { } else if (srcType == ND4J_FLOAT32) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_DOUBLE) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { // } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_THRESHOLD) { if (dstType == ND4J_FLOAT16) { //nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz); } else if (dstType == ND4J_FLOAT32) { //nd4j::convertFromThreshold<float>(nullptr, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::convertFromThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } Nd4jPointer NativeOps::createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) { auto u = new nd4j::utf8string(string, length); return reinterpret_cast<Nd4jPointer>(u); } void NativeOps::deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) { delete(reinterpret_cast<nd4j::utf8string*>(ptr)); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; for (int e = 0; e < numOfSubArrs; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLenX); const auto yOffset = shape::getIndexOffset(i, yShapeInfo, arrLenY); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T> __host__ static void scatterUpdateCudaLauncher(const cudaStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { scatterUpdateCuda<T><<<512, 256, MAX_NUM_THREADS, *stream>>>(opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void NativeOps::scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs, void* hX, Nd4jLong* hXShapeInfo, Nd4jLong* hXOffsets, void* dX, Nd4jLong* dXShapeInfo, Nd4jLong* dXOffsets, void* hY, Nd4jLong* hYShapeInfo, Nd4jLong* hYOffsets, void* dY, Nd4jLong* dYShapeInfo, Nd4jLong* dYOffsets, int* hIindexes, int* dIndexes) { auto stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); nd4j::DataType type = ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(type, scatterUpdateCudaLauncher, (stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIndexes), LIBND4J_TYPES); }
e45ea20e2aa0b4605b08f2904c01a8272d8ede0d.hip
// !!! This is a file automatically generated by hipify!!! /* Fractal code for CS 4380 / CS 5351 Copyright (c) 2019 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdio> #include <cmath> #include <sys/time.h> #include "cs43805351.h" #include <hip/hip_runtime.h> static const int ThreadsPerBlock = 512; static __global__ void fractalKernal(const int width, const int frames, unsigned char* const pic) { const float Delta = 0.006; const float xMid = 0.232997; const float yMid = 0.550325; // compute frames const int pixels = frames * width * width; const int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < pixels) { const int frame = idx / (width * width); const int row = (idx / width) % width; const int col = idx % width; const float delta = Delta * powf(0.985f, frame); const float xMin = xMid - delta; const float yMin = yMid - delta; const float dw = 2.0f * delta / width; const float cy = yMin + row * dw; const float cx = xMin + col * dw; float x = cx; float y = cy; int depth = 256; float x2, y2; do { x2 = x * x; y2 = y * y; y = 2.0f * x * y + cy; x = x2 - y2 + cx; depth--; } while ((depth > 0) && ((x2 + y2) < 5.0f)); pic[frame * width * width + row * width + col] = (unsigned char)depth; } } static void CheckCuda() { hipError_t e; hipDeviceSynchronize(); if (hipSuccess != (e = hipGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, hipGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Fractal v1.8\n"); // check command line if (argc != 3) {fprintf(stderr, "USAGE: %s frame_width num_frames\n", argv[0]); exit(-1);} const int width = atoi(argv[1]); if (width < 10) {fprintf(stderr, "ERROR: frame_width must be at least 10\n"); exit(-1);} const int frames = atoi(argv[2]); if (frames < 1) {fprintf(stderr, "ERROR: num_frames must be at least 1\n"); exit(-1);} printf("frames: %d\n", frames); printf("width: %d\n", width); // allocate space for device copy const int n = frames * width * width; unsigned char* dev_pic; const int size = n * sizeof(unsigned char); hipMalloc((void **)&dev_pic, size); // allocate space for host copy unsigned char* pic = new unsigned char[n]; // copy inputs to device if (hipSuccess != hipMemcpy(dev_pic, pic, size, hipMemcpyHostToDevice)) {fprintf(stderr, "copying to device failed\n"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); // call kernal hipLaunchKernelGGL(( fractalKernal), dim3((n + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, width, frames, dev_pic); hipDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.4f s\n", runtime); CheckCuda(); // copy back to host if (hipSuccess != hipMemcpy(pic, dev_pic, size, hipMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} // write result to BMP files if ((width <= 256) && (frames <= 100)) { for (int frame = 0; frame < frames; frame++) { char name[32]; sprintf(name, "fractal%d.bmp", frame + 1000); writeBMP(width, width, &pic[frame * width * width], name); } } delete [] pic; hipFree(dev_pic); return 0; }
e45ea20e2aa0b4605b08f2904c01a8272d8ede0d.cu
/* Fractal code for CS 4380 / CS 5351 Copyright (c) 2019 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdio> #include <cmath> #include <sys/time.h> #include "cs43805351.h" #include <cuda.h> static const int ThreadsPerBlock = 512; static __global__ void fractalKernal(const int width, const int frames, unsigned char* const pic) { const float Delta = 0.006; const float xMid = 0.232997; const float yMid = 0.550325; // compute frames const int pixels = frames * width * width; const int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < pixels) { const int frame = idx / (width * width); const int row = (idx / width) % width; const int col = idx % width; const float delta = Delta * powf(0.985f, frame); const float xMin = xMid - delta; const float yMin = yMid - delta; const float dw = 2.0f * delta / width; const float cy = yMin + row * dw; const float cx = xMin + col * dw; float x = cx; float y = cy; int depth = 256; float x2, y2; do { x2 = x * x; y2 = y * y; y = 2.0f * x * y + cy; x = x2 - y2 + cx; depth--; } while ((depth > 0) && ((x2 + y2) < 5.0f)); pic[frame * width * width + row * width + col] = (unsigned char)depth; } } static void CheckCuda() { cudaError_t e; cudaDeviceSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, cudaGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Fractal v1.8\n"); // check command line if (argc != 3) {fprintf(stderr, "USAGE: %s frame_width num_frames\n", argv[0]); exit(-1);} const int width = atoi(argv[1]); if (width < 10) {fprintf(stderr, "ERROR: frame_width must be at least 10\n"); exit(-1);} const int frames = atoi(argv[2]); if (frames < 1) {fprintf(stderr, "ERROR: num_frames must be at least 1\n"); exit(-1);} printf("frames: %d\n", frames); printf("width: %d\n", width); // allocate space for device copy const int n = frames * width * width; unsigned char* dev_pic; const int size = n * sizeof(unsigned char); cudaMalloc((void **)&dev_pic, size); // allocate space for host copy unsigned char* pic = new unsigned char[n]; // copy inputs to device if (cudaSuccess != cudaMemcpy(dev_pic, pic, size, cudaMemcpyHostToDevice)) {fprintf(stderr, "copying to device failed\n"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); // call kernal fractalKernal<<< (n + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>> (width, frames, dev_pic); cudaDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.4f s\n", runtime); CheckCuda(); // copy back to host if (cudaSuccess != cudaMemcpy(pic, dev_pic, size, cudaMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} // write result to BMP files if ((width <= 256) && (frames <= 100)) { for (int frame = 0; frame < frames; frame++) { char name[32]; sprintf(name, "fractal%d.bmp", frame + 1000); writeBMP(width, width, &pic[frame * width * width], name); } } delete [] pic; cudaFree(dev_pic); return 0; }
f3d33e859fb1b155a7c8712dea739251278f0020.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "magnetic_orientation.h" #include <mirheo/core/pvs/rigid_object_vector.h> #include <mirheo/core/pvs/views/rov.h> #include <mirheo/core/simulation.h> #include <mirheo/core/utils/cuda_common.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/utils/quaternion.h> namespace mirheo { namespace MagneticOrientationPluginKernels { __global__ void applyMagneticField(ROVview view, real3 B, real3 M) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid >= view.nObjects) return; const auto q = static_cast<Quaternion<real>>(view.motions[gid].q); M = q.rotate(M); const real3 T = cross(M, B); atomicAdd(&view.motions[gid].torque.x, static_cast<RigidReal>(T.x)); atomicAdd(&view.motions[gid].torque.y, static_cast<RigidReal>(T.y)); atomicAdd(&view.motions[gid].torque.z, static_cast<RigidReal>(T.z)); } } // namespace MagneticOrientationPluginKernels MagneticOrientationPlugin::MagneticOrientationPlugin(const MirState *state, std::string name, std::string rovName, real3 moment, UniformMagneticFunc magneticFunction) : SimulationPlugin(state, name), rovName_(rovName), moment_(moment), magneticFunction_(magneticFunction) {} void MagneticOrientationPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm) { SimulationPlugin::setup(simulation, comm, interComm); rov_ = dynamic_cast<RigidObjectVector*>( simulation->getOVbyNameOrDie(rovName_) ); if (rov_ == nullptr) die("Need rigid object vector to interact with magnetic field, plugin '%s', OV name '%s'", getCName(), rovName_.c_str()); } void MagneticOrientationPlugin::beforeForces(hipStream_t stream) { ROVview view(rov_, rov_->local()); const int nthreads = 128; const auto t = getState()->currentTime; const auto B = magneticFunction_(t); SAFE_KERNEL_LAUNCH( MagneticOrientationPluginKernels::applyMagneticField, getNblocks(view.size, nthreads), nthreads, 0, stream, view, B, moment_); } } // namespace mirheo
f3d33e859fb1b155a7c8712dea739251278f0020.cu
#include "magnetic_orientation.h" #include <mirheo/core/pvs/rigid_object_vector.h> #include <mirheo/core/pvs/views/rov.h> #include <mirheo/core/simulation.h> #include <mirheo/core/utils/cuda_common.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/utils/quaternion.h> namespace mirheo { namespace MagneticOrientationPluginKernels { __global__ void applyMagneticField(ROVview view, real3 B, real3 M) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid >= view.nObjects) return; const auto q = static_cast<Quaternion<real>>(view.motions[gid].q); M = q.rotate(M); const real3 T = cross(M, B); atomicAdd(&view.motions[gid].torque.x, static_cast<RigidReal>(T.x)); atomicAdd(&view.motions[gid].torque.y, static_cast<RigidReal>(T.y)); atomicAdd(&view.motions[gid].torque.z, static_cast<RigidReal>(T.z)); } } // namespace MagneticOrientationPluginKernels MagneticOrientationPlugin::MagneticOrientationPlugin(const MirState *state, std::string name, std::string rovName, real3 moment, UniformMagneticFunc magneticFunction) : SimulationPlugin(state, name), rovName_(rovName), moment_(moment), magneticFunction_(magneticFunction) {} void MagneticOrientationPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm) { SimulationPlugin::setup(simulation, comm, interComm); rov_ = dynamic_cast<RigidObjectVector*>( simulation->getOVbyNameOrDie(rovName_) ); if (rov_ == nullptr) die("Need rigid object vector to interact with magnetic field, plugin '%s', OV name '%s'", getCName(), rovName_.c_str()); } void MagneticOrientationPlugin::beforeForces(cudaStream_t stream) { ROVview view(rov_, rov_->local()); const int nthreads = 128; const auto t = getState()->currentTime; const auto B = magneticFunction_(t); SAFE_KERNEL_LAUNCH( MagneticOrientationPluginKernels::applyMagneticField, getNblocks(view.size, nthreads), nthreads, 0, stream, view, B, moment_); } } // namespace mirheo
2e65ddcb18030bbe0a1479d96ae472fe3869e4de.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> __global__ void kernel (void){ } void do_cuda_stuff() { hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, ); }
2e65ddcb18030bbe0a1479d96ae472fe3869e4de.cu
#include <cuda_runtime.h> __global__ void kernel (void){ } void do_cuda_stuff() { kernel<<<1,1>>>(); }
807c886c857ca8239a3718e1968efd6e46d4cd9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _NN_KERNEL_H_ #define _NN_KERNEL_H_ #include <stdio.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) CUT_BANK_CHECKER(((float*)&As[0][0]), (BLOCK_SIZE * i + j)) #define BS(i, j) CUT_BANK_CHECKER(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif __constant__ int kernelTemplate[25] = { 0, 1, 2, 3, 4, 29, 30, 31, 32, 33, 58, 59, 60, 61, 62, 87, 88, 89, 90, 91, 116,117,118,119,120 }; __global__ void executeFirstLayer(float *Layer1_Neurons_GPU,float *Layer1_Weights_GPU,float *Layer2_Neurons_GPU) { int blockID=blockIdx.x; int pixelX=threadIdx.x; int pixelY=threadIdx.y; int weightBegin=blockID*26; int windowX=pixelX*2; int windowY=pixelY*2; float result=0; result+=Layer1_Weights_GPU[weightBegin]; ++weightBegin; for(int i=0;i<25;++i) { result+=Layer1_Neurons_GPU[(windowY*29+windowX+kernelTemplate[i])+(29*29*blockIdx.y)]*Layer1_Weights_GPU[weightBegin+i]; } result=(1.7159*tanhf(0.66666667*result)); Layer2_Neurons_GPU[(13*13*blockID+pixelY*13+pixelX)+(13*13*6*blockIdx.y)]=result; } __constant__ int kernelTemplate2[25] = { 0, 1, 2, 3, 4, 13, 14, 15, 16, 17, 26, 27, 28, 29, 30, 39, 40, 41, 42, 43, 52, 53, 54, 55, 56 }; __global__ void executeSecondLayer(float *Layer2_Neurons_GPU, float *Layer2_Weights_GPU,float *Layer3_Neurons_GPU) { int blockID=blockIdx.x; int pixelX=threadIdx.x; int pixelY=threadIdx.y; int weightBegin=blockID*26*6; int windowX=pixelX*2; int windowY=pixelY*2; float result=0; result+=Layer2_Weights_GPU[weightBegin]; if(blockID==1 && pixelX==0 && pixelY==0) { result+=0; } ++weightBegin; for (int i=0; i<25; ++i ) { result+=Layer2_Neurons_GPU[(windowX + 13*windowY +kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6]; result+=Layer2_Neurons_GPU[(169 + windowX + 13*windowY +kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6+1]; result+=Layer2_Neurons_GPU[(338 + windowX + 13*windowY + kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6+2]; result+=Layer2_Neurons_GPU[(507 + windowX + 13*windowY + kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6+3]; result+=Layer2_Neurons_GPU[(676 + windowX + 13*windowY + kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6+4]; result+=Layer2_Neurons_GPU[(845 + windowX + 13*windowY + kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6+5]; } result=(1.7159*tanhf(0.66666667*result)); Layer3_Neurons_GPU[(5*5*blockID+pixelY*5+pixelX)+(1250*blockIdx.y)]=result; } __global__ void executeThirdLayer(float *Layer3_Neurons_GPU, float *Layer3_Weights_GPU,float *Layer4_Neurons_GPU) { int blockID=blockIdx.x; //int pixelY=threadIdx.y; int weightBegin=blockID*1251; float result=0; result+=Layer3_Weights_GPU[weightBegin]; ++weightBegin; for (int i=0; i<1250; ++i ) { result+=Layer3_Neurons_GPU[i+(1250*blockIdx.y)]*Layer3_Weights_GPU[weightBegin+i]; } result=(1.7159*tanhf(0.66666667*result)); Layer4_Neurons_GPU[blockID+(100*blockIdx.y)]=result; } __global__ void executeFourthLayer(float *Layer4_Neurons_GPU,float *Layer4_Weights_GPU,float *Layer5_Neurons_GPU) { int blockID=blockIdx.x; //int pixelY=threadIdx.y; int weightBegin=blockID*101; float result=0; result+=Layer4_Weights_GPU[weightBegin]; ++weightBegin; for (int i=0; i<100; ++i ) { result+=Layer4_Neurons_GPU[i+(100*blockIdx.y)]*Layer4_Weights_GPU[weightBegin+i]; } result=(1.7159*tanhf(0.66666667*result)); Layer5_Neurons_GPU[blockID+(10*blockIdx.y)]=result; } #endif // #ifndef _NN_KERNEL_H_
807c886c857ca8239a3718e1968efd6e46d4cd9d.cu
#ifndef _NN_KERNEL_H_ #define _NN_KERNEL_H_ #include <stdio.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) CUT_BANK_CHECKER(((float*)&As[0][0]), (BLOCK_SIZE * i + j)) #define BS(i, j) CUT_BANK_CHECKER(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif __constant__ int kernelTemplate[25] = { 0, 1, 2, 3, 4, 29, 30, 31, 32, 33, 58, 59, 60, 61, 62, 87, 88, 89, 90, 91, 116,117,118,119,120 }; __global__ void executeFirstLayer(float *Layer1_Neurons_GPU,float *Layer1_Weights_GPU,float *Layer2_Neurons_GPU) { int blockID=blockIdx.x; int pixelX=threadIdx.x; int pixelY=threadIdx.y; int weightBegin=blockID*26; int windowX=pixelX*2; int windowY=pixelY*2; float result=0; result+=Layer1_Weights_GPU[weightBegin]; ++weightBegin; for(int i=0;i<25;++i) { result+=Layer1_Neurons_GPU[(windowY*29+windowX+kernelTemplate[i])+(29*29*blockIdx.y)]*Layer1_Weights_GPU[weightBegin+i]; } result=(1.7159*tanhf(0.66666667*result)); Layer2_Neurons_GPU[(13*13*blockID+pixelY*13+pixelX)+(13*13*6*blockIdx.y)]=result; } __constant__ int kernelTemplate2[25] = { 0, 1, 2, 3, 4, 13, 14, 15, 16, 17, 26, 27, 28, 29, 30, 39, 40, 41, 42, 43, 52, 53, 54, 55, 56 }; __global__ void executeSecondLayer(float *Layer2_Neurons_GPU, float *Layer2_Weights_GPU,float *Layer3_Neurons_GPU) { int blockID=blockIdx.x; int pixelX=threadIdx.x; int pixelY=threadIdx.y; int weightBegin=blockID*26*6; int windowX=pixelX*2; int windowY=pixelY*2; float result=0; result+=Layer2_Weights_GPU[weightBegin]; if(blockID==1 && pixelX==0 && pixelY==0) { result+=0; } ++weightBegin; for (int i=0; i<25; ++i ) { result+=Layer2_Neurons_GPU[(windowX + 13*windowY +kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6]; result+=Layer2_Neurons_GPU[(169 + windowX + 13*windowY +kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6+1]; result+=Layer2_Neurons_GPU[(338 + windowX + 13*windowY + kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6+2]; result+=Layer2_Neurons_GPU[(507 + windowX + 13*windowY + kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6+3]; result+=Layer2_Neurons_GPU[(676 + windowX + 13*windowY + kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6+4]; result+=Layer2_Neurons_GPU[(845 + windowX + 13*windowY + kernelTemplate2[i])+(13*13*6*blockIdx.y)]*Layer2_Weights_GPU[weightBegin+i*6+5]; } result=(1.7159*tanhf(0.66666667*result)); Layer3_Neurons_GPU[(5*5*blockID+pixelY*5+pixelX)+(1250*blockIdx.y)]=result; } __global__ void executeThirdLayer(float *Layer3_Neurons_GPU, float *Layer3_Weights_GPU,float *Layer4_Neurons_GPU) { int blockID=blockIdx.x; //int pixelY=threadIdx.y; int weightBegin=blockID*1251; float result=0; result+=Layer3_Weights_GPU[weightBegin]; ++weightBegin; for (int i=0; i<1250; ++i ) { result+=Layer3_Neurons_GPU[i+(1250*blockIdx.y)]*Layer3_Weights_GPU[weightBegin+i]; } result=(1.7159*tanhf(0.66666667*result)); Layer4_Neurons_GPU[blockID+(100*blockIdx.y)]=result; } __global__ void executeFourthLayer(float *Layer4_Neurons_GPU,float *Layer4_Weights_GPU,float *Layer5_Neurons_GPU) { int blockID=blockIdx.x; //int pixelY=threadIdx.y; int weightBegin=blockID*101; float result=0; result+=Layer4_Weights_GPU[weightBegin]; ++weightBegin; for (int i=0; i<100; ++i ) { result+=Layer4_Neurons_GPU[i+(100*blockIdx.y)]*Layer4_Weights_GPU[weightBegin+i]; } result=(1.7159*tanhf(0.66666667*result)); Layer5_Neurons_GPU[blockID+(10*blockIdx.y)]=result; } #endif // #ifndef _NN_KERNEL_H_
d349f6e14654d6e49f0c1a26e9ec05bd580acb45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/impl/FaissAssert.h> #include <algorithm> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/Tensor.cuh> namespace faiss { namespace gpu { template <typename T, int kRowsPerBlock, int kRowUnroll, int kColLoad> __global__ void sumAlongColumns( Tensor<T, 1, true> input, Tensor<T, 2, true> output) { static_assert(kRowsPerBlock % kRowUnroll == 0, "must fit rows"); // blockIdx.x: which chunk of rows we are responsible for updating // blockIdx.y: which chunk of columns we are responsible for // updating int rowStart = blockIdx.x * kRowsPerBlock; int rowEnd = rowStart + kRowsPerBlock; int colStart = blockIdx.y * blockDim.x * kColLoad; // FIXME: if we have exact multiples, don't need this bool endRow = (blockIdx.x == gridDim.x - 1); bool endCol = (blockIdx.y == gridDim.y - 1); if (endRow) { if (output.getSize(0) % kRowsPerBlock == 0) { endRow = false; } } if (endCol) { for (int col = colStart + threadIdx.x; col < input.getSize(0); col += blockDim.x) { T val = input[col]; if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { T out = output[row][col]; out = Math<T>::add(out, val); output[row][col] = out; } } else { T rows[kRowUnroll]; for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { rows[i] = output[row + i][col]; } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { rows[i] = Math<T>::add(rows[i], val); } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { output[row + i][col] = rows[i]; } } } } } else { int col = colStart + threadIdx.x; T val[kColLoad]; #pragma unroll for (int i = 0; i < kColLoad; ++i) { val[i] = input[col + i * blockDim.x]; } if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { #pragma unroll for (int i = 0; i < kColLoad; ++i) { T out = output[row][col + i * blockDim.x]; out = Math<T>::add(out, val[i]); output[row][col + i * blockDim.x] = out; } } } else { T rows[kRowUnroll * kColLoad]; for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { rows[i * kColLoad + j] = output[row + i][col + j * blockDim.x]; } } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { rows[i * kColLoad + j] = Math<T>::add(rows[i * kColLoad + j], val[j]); } } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { output[row + i][col + j * blockDim.x] = rows[i * kColLoad + j]; } } } } } } template <typename T, int kRowsPerBlock, int kRowUnroll, int kColLoad> __global__ void assignAlongColumns( Tensor<T, 1, true> input, Tensor<T, 2, true> output) { static_assert(kRowsPerBlock % kRowUnroll == 0, "must fit rows"); // blockIdx.x: which chunk of rows we are responsible for updating // blockIdx.y: which chunk of columns we are responsible for // updating int rowStart = blockIdx.x * kRowsPerBlock; int rowEnd = rowStart + kRowsPerBlock; int colStart = blockIdx.y * blockDim.x * kColLoad; // FIXME: if we have exact multiples, don't need this bool endRow = (blockIdx.x == gridDim.x - 1); bool endCol = (blockIdx.y == gridDim.y - 1); if (endRow) { if (output.getSize(0) % kRowsPerBlock == 0) { endRow = false; } } if (endCol) { for (int col = colStart + threadIdx.x; col < input.getSize(0); col += blockDim.x) { T val = input[col]; if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { output[row][col] = val; } } else { for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { output[row + i][col] = val; } } } } } else { int col = colStart + threadIdx.x; T val[kColLoad]; #pragma unroll for (int i = 0; i < kColLoad; ++i) { val[i] = input[col + i * blockDim.x]; } if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { #pragma unroll for (int i = 0; i < kColLoad; ++i) { output[row][col + i * blockDim.x] = val[i]; } } } else { for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { output[row + i][col + j * blockDim.x] = val[j]; } } } } } } template <typename T, bool ZeroClamp> __global__ void sumAlongRows( Tensor<T, 1, true> input, Tensor<T, 2, true> output) { __shared__ T sval; int row = blockIdx.x; if (threadIdx.x == 0) { sval = input[row]; } __syncthreads(); T val = sval; // FIXME: speed up for (int i = threadIdx.x; i < output.getSize(1); i += blockDim.x) { T out = output[row][i]; out = Math<T>::add(out, val); if (ZeroClamp) { out = Math<T>::lt(out, Math<T>::zero()) ? Math<T>::zero() : out; } output[row][i] = out; } } template <typename T, typename TVec> void runSumAlongColumns( Tensor<T, 1, true>& input, Tensor<T, 2, true>& output, hipStream_t stream) { FAISS_ASSERT(input.getSize(0) == output.getSize(1)); int threadsPerBlock = 256; constexpr int kRowUnroll = 4; constexpr int kRowsPerBlock = kRowUnroll * 4; constexpr int kColLoad = 4; auto block = dim3(threadsPerBlock); if (input.template canCastResize<TVec>() && output.template canCastResize<TVec>()) { auto inputV = input.template castResize<TVec>(); auto outputV = output.template castResize<TVec>(); auto grid = dim3( utils::divUp(outputV.getSize(0), kRowsPerBlock), utils::divUp(outputV.getSize(1), threadsPerBlock * kColLoad)); hipLaunchKernelGGL(( sumAlongColumns<TVec, kRowsPerBlock, kRowUnroll, kColLoad>) , dim3(grid), dim3(block), 0, stream, inputV, outputV); } else { auto grid = dim3( utils::divUp(output.getSize(0), kRowsPerBlock), utils::divUp(output.getSize(1), threadsPerBlock * kColLoad)); hipLaunchKernelGGL(( sumAlongColumns<T, kRowsPerBlock, kRowUnroll, kColLoad>) , dim3(grid), dim3(block), 0, stream, input, output); } CUDA_TEST_ERROR(); } void runSumAlongColumns( Tensor<float, 1, true>& input, Tensor<float, 2, true>& output, hipStream_t stream) { runSumAlongColumns<float, float4>(input, output, stream); } void runSumAlongColumns( Tensor<half, 1, true>& input, Tensor<half, 2, true>& output, hipStream_t stream) { runSumAlongColumns<half, half2>(input, output, stream); } template <typename T, typename TVec> void runAssignAlongColumns( Tensor<T, 1, true>& input, Tensor<T, 2, true>& output, hipStream_t stream) { FAISS_ASSERT(input.getSize(0) == output.getSize(1)); int threadsPerBlock = 256; constexpr int kRowUnroll = 4; constexpr int kRowsPerBlock = kRowUnroll * 4; constexpr int kColLoad = 4; auto block = dim3(threadsPerBlock); if (input.template canCastResize<TVec>() && output.template canCastResize<TVec>()) { auto inputV = input.template castResize<TVec>(); auto outputV = output.template castResize<TVec>(); auto grid = dim3( utils::divUp(outputV.getSize(0), kRowsPerBlock), utils::divUp(outputV.getSize(1), threadsPerBlock * kColLoad)); hipLaunchKernelGGL(( assignAlongColumns<TVec, kRowsPerBlock, kRowUnroll, kColLoad>) , dim3(grid), dim3(block), 0, stream, inputV, outputV); } else { auto grid = dim3( utils::divUp(output.getSize(0), kRowsPerBlock), utils::divUp(output.getSize(1), threadsPerBlock * kColLoad)); hipLaunchKernelGGL(( assignAlongColumns<T, kRowsPerBlock, kRowUnroll, kColLoad>) , dim3(grid), dim3(block), 0, stream, input, output); } CUDA_TEST_ERROR(); } void runAssignAlongColumns( Tensor<float, 1, true>& input, Tensor<float, 2, true>& output, hipStream_t stream) { runAssignAlongColumns<float, float4>(input, output, stream); } void runAssignAlongColumns( Tensor<half, 1, true>& input, Tensor<half, 2, true>& output, hipStream_t stream) { runAssignAlongColumns<half, half2>(input, output, stream); } template <typename T> void runSumAlongRows( Tensor<T, 1, true>& input, Tensor<T, 2, true>& output, bool zeroClamp, hipStream_t stream) { FAISS_ASSERT(input.getSize(0) == output.getSize(0)); int threadsPerBlock = ::min(output.getSize(1), getMaxThreadsCurrentDevice()); auto grid = dim3(output.getSize(0)); auto block = dim3(threadsPerBlock); if (zeroClamp) { hipLaunchKernelGGL(( sumAlongRows<T, true>), dim3(grid), dim3(block), 0, stream, input, output); } else { hipLaunchKernelGGL(( sumAlongRows<T, false>), dim3(grid), dim3(block), 0, stream, input, output); } CUDA_TEST_ERROR(); } void runSumAlongRows( Tensor<float, 1, true>& input, Tensor<float, 2, true>& output, bool zeroClamp, hipStream_t stream) { runSumAlongRows<float>(input, output, zeroClamp, stream); } void runSumAlongRows( Tensor<half, 1, true>& input, Tensor<half, 2, true>& output, bool zeroClamp, hipStream_t stream) { runSumAlongRows<half>(input, output, zeroClamp, stream); } } // namespace gpu } // namespace faiss
d349f6e14654d6e49f0c1a26e9ec05bd580acb45.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/impl/FaissAssert.h> #include <algorithm> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/Tensor.cuh> namespace faiss { namespace gpu { template <typename T, int kRowsPerBlock, int kRowUnroll, int kColLoad> __global__ void sumAlongColumns( Tensor<T, 1, true> input, Tensor<T, 2, true> output) { static_assert(kRowsPerBlock % kRowUnroll == 0, "must fit rows"); // blockIdx.x: which chunk of rows we are responsible for updating // blockIdx.y: which chunk of columns we are responsible for // updating int rowStart = blockIdx.x * kRowsPerBlock; int rowEnd = rowStart + kRowsPerBlock; int colStart = blockIdx.y * blockDim.x * kColLoad; // FIXME: if we have exact multiples, don't need this bool endRow = (blockIdx.x == gridDim.x - 1); bool endCol = (blockIdx.y == gridDim.y - 1); if (endRow) { if (output.getSize(0) % kRowsPerBlock == 0) { endRow = false; } } if (endCol) { for (int col = colStart + threadIdx.x; col < input.getSize(0); col += blockDim.x) { T val = input[col]; if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { T out = output[row][col]; out = Math<T>::add(out, val); output[row][col] = out; } } else { T rows[kRowUnroll]; for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { rows[i] = output[row + i][col]; } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { rows[i] = Math<T>::add(rows[i], val); } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { output[row + i][col] = rows[i]; } } } } } else { int col = colStart + threadIdx.x; T val[kColLoad]; #pragma unroll for (int i = 0; i < kColLoad; ++i) { val[i] = input[col + i * blockDim.x]; } if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { #pragma unroll for (int i = 0; i < kColLoad; ++i) { T out = output[row][col + i * blockDim.x]; out = Math<T>::add(out, val[i]); output[row][col + i * blockDim.x] = out; } } } else { T rows[kRowUnroll * kColLoad]; for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { rows[i * kColLoad + j] = output[row + i][col + j * blockDim.x]; } } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { rows[i * kColLoad + j] = Math<T>::add(rows[i * kColLoad + j], val[j]); } } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { output[row + i][col + j * blockDim.x] = rows[i * kColLoad + j]; } } } } } } template <typename T, int kRowsPerBlock, int kRowUnroll, int kColLoad> __global__ void assignAlongColumns( Tensor<T, 1, true> input, Tensor<T, 2, true> output) { static_assert(kRowsPerBlock % kRowUnroll == 0, "must fit rows"); // blockIdx.x: which chunk of rows we are responsible for updating // blockIdx.y: which chunk of columns we are responsible for // updating int rowStart = blockIdx.x * kRowsPerBlock; int rowEnd = rowStart + kRowsPerBlock; int colStart = blockIdx.y * blockDim.x * kColLoad; // FIXME: if we have exact multiples, don't need this bool endRow = (blockIdx.x == gridDim.x - 1); bool endCol = (blockIdx.y == gridDim.y - 1); if (endRow) { if (output.getSize(0) % kRowsPerBlock == 0) { endRow = false; } } if (endCol) { for (int col = colStart + threadIdx.x; col < input.getSize(0); col += blockDim.x) { T val = input[col]; if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { output[row][col] = val; } } else { for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { output[row + i][col] = val; } } } } } else { int col = colStart + threadIdx.x; T val[kColLoad]; #pragma unroll for (int i = 0; i < kColLoad; ++i) { val[i] = input[col + i * blockDim.x]; } if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { #pragma unroll for (int i = 0; i < kColLoad; ++i) { output[row][col + i * blockDim.x] = val[i]; } } } else { for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { output[row + i][col + j * blockDim.x] = val[j]; } } } } } } template <typename T, bool ZeroClamp> __global__ void sumAlongRows( Tensor<T, 1, true> input, Tensor<T, 2, true> output) { __shared__ T sval; int row = blockIdx.x; if (threadIdx.x == 0) { sval = input[row]; } __syncthreads(); T val = sval; // FIXME: speed up for (int i = threadIdx.x; i < output.getSize(1); i += blockDim.x) { T out = output[row][i]; out = Math<T>::add(out, val); if (ZeroClamp) { out = Math<T>::lt(out, Math<T>::zero()) ? Math<T>::zero() : out; } output[row][i] = out; } } template <typename T, typename TVec> void runSumAlongColumns( Tensor<T, 1, true>& input, Tensor<T, 2, true>& output, cudaStream_t stream) { FAISS_ASSERT(input.getSize(0) == output.getSize(1)); int threadsPerBlock = 256; constexpr int kRowUnroll = 4; constexpr int kRowsPerBlock = kRowUnroll * 4; constexpr int kColLoad = 4; auto block = dim3(threadsPerBlock); if (input.template canCastResize<TVec>() && output.template canCastResize<TVec>()) { auto inputV = input.template castResize<TVec>(); auto outputV = output.template castResize<TVec>(); auto grid = dim3( utils::divUp(outputV.getSize(0), kRowsPerBlock), utils::divUp(outputV.getSize(1), threadsPerBlock * kColLoad)); sumAlongColumns<TVec, kRowsPerBlock, kRowUnroll, kColLoad> <<<grid, block, 0, stream>>>(inputV, outputV); } else { auto grid = dim3( utils::divUp(output.getSize(0), kRowsPerBlock), utils::divUp(output.getSize(1), threadsPerBlock * kColLoad)); sumAlongColumns<T, kRowsPerBlock, kRowUnroll, kColLoad> <<<grid, block, 0, stream>>>(input, output); } CUDA_TEST_ERROR(); } void runSumAlongColumns( Tensor<float, 1, true>& input, Tensor<float, 2, true>& output, cudaStream_t stream) { runSumAlongColumns<float, float4>(input, output, stream); } void runSumAlongColumns( Tensor<half, 1, true>& input, Tensor<half, 2, true>& output, cudaStream_t stream) { runSumAlongColumns<half, half2>(input, output, stream); } template <typename T, typename TVec> void runAssignAlongColumns( Tensor<T, 1, true>& input, Tensor<T, 2, true>& output, cudaStream_t stream) { FAISS_ASSERT(input.getSize(0) == output.getSize(1)); int threadsPerBlock = 256; constexpr int kRowUnroll = 4; constexpr int kRowsPerBlock = kRowUnroll * 4; constexpr int kColLoad = 4; auto block = dim3(threadsPerBlock); if (input.template canCastResize<TVec>() && output.template canCastResize<TVec>()) { auto inputV = input.template castResize<TVec>(); auto outputV = output.template castResize<TVec>(); auto grid = dim3( utils::divUp(outputV.getSize(0), kRowsPerBlock), utils::divUp(outputV.getSize(1), threadsPerBlock * kColLoad)); assignAlongColumns<TVec, kRowsPerBlock, kRowUnroll, kColLoad> <<<grid, block, 0, stream>>>(inputV, outputV); } else { auto grid = dim3( utils::divUp(output.getSize(0), kRowsPerBlock), utils::divUp(output.getSize(1), threadsPerBlock * kColLoad)); assignAlongColumns<T, kRowsPerBlock, kRowUnroll, kColLoad> <<<grid, block, 0, stream>>>(input, output); } CUDA_TEST_ERROR(); } void runAssignAlongColumns( Tensor<float, 1, true>& input, Tensor<float, 2, true>& output, cudaStream_t stream) { runAssignAlongColumns<float, float4>(input, output, stream); } void runAssignAlongColumns( Tensor<half, 1, true>& input, Tensor<half, 2, true>& output, cudaStream_t stream) { runAssignAlongColumns<half, half2>(input, output, stream); } template <typename T> void runSumAlongRows( Tensor<T, 1, true>& input, Tensor<T, 2, true>& output, bool zeroClamp, cudaStream_t stream) { FAISS_ASSERT(input.getSize(0) == output.getSize(0)); int threadsPerBlock = std::min(output.getSize(1), getMaxThreadsCurrentDevice()); auto grid = dim3(output.getSize(0)); auto block = dim3(threadsPerBlock); if (zeroClamp) { sumAlongRows<T, true><<<grid, block, 0, stream>>>(input, output); } else { sumAlongRows<T, false><<<grid, block, 0, stream>>>(input, output); } CUDA_TEST_ERROR(); } void runSumAlongRows( Tensor<float, 1, true>& input, Tensor<float, 2, true>& output, bool zeroClamp, cudaStream_t stream) { runSumAlongRows<float>(input, output, zeroClamp, stream); } void runSumAlongRows( Tensor<half, 1, true>& input, Tensor<half, 2, true>& output, bool zeroClamp, cudaStream_t stream) { runSumAlongRows<half>(input, output, zeroClamp, stream); } } // namespace gpu } // namespace faiss