hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
98b6302323c111c563702ca43db9c3825507f815.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <stdint.h> #include "cuStopwatch.cu" // Building lookup table for square of numbers in a stupid way __global__ void build_lookup() { __shared__ uint32_t lookup[1024]; uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; // Build the table if(tid < 1024){ lookup[tid] = 0; for(uint32_t i = 0; i < tid * tid; i++) lookup[tid] += 1; __syncthreads(); // Check the table, there can be no zero entries if(tid < 32){ for(uint32_t i = 0; i < 1024; i+=32) if(lookup[tid+i]!=(tid+i)*(tid+i)) printf("Error on entry %u!\n", tid+i); } } return; } int main() { // Perform computation cuStopwatch sw1; sw1.start(); hipLaunchKernelGGL(( build_lookup), dim3(1), dim3(1024), 0, 0, ); printf("Computation time: %.4fms\n", sw1.stop()); // printf("%s", hipGetErrorString(hipPeekAtLastError())); return 0; }
98b6302323c111c563702ca43db9c3825507f815.cu
#include <stdio.h> #include <cuda_runtime.h> #include <stdint.h> #include "cuStopwatch.cu" // Building lookup table for square of numbers in a stupid way __global__ void build_lookup() { __shared__ uint32_t lookup[1024]; uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; // Build the table if(tid < 1024){ lookup[tid] = 0; for(uint32_t i = 0; i < tid * tid; i++) lookup[tid] += 1; __syncthreads(); // Check the table, there can be no zero entries if(tid < 32){ for(uint32_t i = 0; i < 1024; i+=32) if(lookup[tid+i]!=(tid+i)*(tid+i)) printf("Error on entry %u!\n", tid+i); } } return; } int main() { // Perform computation cuStopwatch sw1; sw1.start(); build_lookup<<<1, 1024>>>(); printf("Computation time: %.4fms\n", sw1.stop()); // printf("%s", cudaGetErrorString(cudaPeekAtLastError())); return 0; }
c850af35cfbbde01ec2a56b53b132cc3b547c472.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "uniformRandom.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; hiprandState_t *states = NULL; hipMalloc(&states, XSIZE*YSIZE); float *d_values = NULL; hipMalloc(&d_values, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( uniformRandom), dim3(gridBlock),dim3(threadBlock), 0, 0, states,d_values); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( uniformRandom), dim3(gridBlock),dim3(threadBlock), 0, 0, states,d_values); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( uniformRandom), dim3(gridBlock),dim3(threadBlock), 0, 0, states,d_values); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c850af35cfbbde01ec2a56b53b132cc3b547c472.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "uniformRandom.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; curandState_t *states = NULL; cudaMalloc(&states, XSIZE*YSIZE); float *d_values = NULL; cudaMalloc(&d_values, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); uniformRandom<<<gridBlock,threadBlock>>>(states,d_values); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { uniformRandom<<<gridBlock,threadBlock>>>(states,d_values); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { uniformRandom<<<gridBlock,threadBlock>>>(states,d_values); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
437bfe41b8bd76f4e8e7bf043bdf4aea2bcb58b1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "postprocess_matrix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *matrix = NULL; hipMalloc(&matrix, XSIZE*YSIZE); long *long_indices = NULL; hipMalloc(&long_indices, XSIZE*YSIZE); int *indices = NULL; hipMalloc(&indices, XSIZE*YSIZE); unsigned int N_POINTS = 1; unsigned int K = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( postprocess_matrix), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,long_indices,indices,N_POINTS,K); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( postprocess_matrix), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,long_indices,indices,N_POINTS,K); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( postprocess_matrix), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,long_indices,indices,N_POINTS,K); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
437bfe41b8bd76f4e8e7bf043bdf4aea2bcb58b1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "postprocess_matrix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *matrix = NULL; cudaMalloc(&matrix, XSIZE*YSIZE); long *long_indices = NULL; cudaMalloc(&long_indices, XSIZE*YSIZE); int *indices = NULL; cudaMalloc(&indices, XSIZE*YSIZE); unsigned int N_POINTS = 1; unsigned int K = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); postprocess_matrix<<<gridBlock,threadBlock>>>(matrix,long_indices,indices,N_POINTS,K); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { postprocess_matrix<<<gridBlock,threadBlock>>>(matrix,long_indices,indices,N_POINTS,K); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { postprocess_matrix<<<gridBlock,threadBlock>>>(matrix,long_indices,indices,N_POINTS,K); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
399b0aa8ec61d82cb8afdf9edcf3fe72f612d1bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //- ======================================================================= //+ KTridiag CR algorithm (standalone version) //- ======================================================================= //---- Header Dependencies ----------------------------------------------- #include "tools/cudacl.hxx" #include "inc/op_copy.hxx" #include "inc/op_twiddle.hxx" #include "inc/op_mixr.hxx" #include "inc/op_reduce.hxx" #include "inc/op_load.hxx" #include "inc/op_shfl.hxx" #include "KLauncher3.hxx" #ifndef __CUDA_ARCH__ #define __CUDA_ARCH__ CUDA_ARCH #endif #if __CUDA_ARCH__ < 400 #define tabla triXf32A #endif #if __CUDA_ARCH__ >= 400 #define tabla triXf32B #endif //---- Butterfly operator ------------------------------------------------ template<class DTYPE, int size> struct butterfly { //- The first butterfly step is an optimized version of 'butterfly_step' static inline __device__ void init(Eqn<DTYPE>* E1, int s = 1); //- Generic butterfly step, is more efficient to call 'butterfly_init' first static inline __device__ void step(Eqn<DTYPE>* E1, int s = 1); }; template<class DTYPE> struct butterfly<DTYPE, 2> { //- The initial Rad<2> case is optimized static inline __device__ void init(Eqn<DTYPE>* E1, int s = 1) { } //- The general Rad<2> case is defined according to the equation static inline __device__ void step(Eqn<DTYPE>* E1, int s = 1) { Eqn<DTYPE> eq1 = reduce(E1[0],E1[1],2); Eqn<DTYPE> eqR = reduce(eq1,E1[2], 0); E1[1]=eqR; } }; //---- Radix operator ---------------------------------------------------- //- Generic radix stage, used in the main loop of the algorithm template<int RAD, class DTYPE> inline __device__ void radix(Eqn<DTYPE>* C) { butterfly<DTYPE, RAD>::step( C); } //- Mixed-radix stage, only called once before the main loop template<int SIZE, int RAD, class DTYPE> inline __device__ void radix(Eqn<DTYPE>* C) { #pragma unroll for(int i = 0; i < SIZE; i+= RAD) butterfly<DTYPE, RAD>::init(C+i); } __device__ int isMultipleOf(const int N, const int value) { int bit = ~0; int mask = bit<<(N); int mask2 = ~mask; if(!(value&mask2)) return 1; return 0; } //------- Cuda Kernels --------------------------------------------------- //- Kernel for tridiagonal equation systems that fit in Shared Memory //? Todo: Allow configurable 'stride' for other distributions // X in global memory, N larger than 64 template<class DTYPE, int N, int DIR, int RAD, int SHM> __global__ void KTridiagCR(const DTYPE* __restrict__ srcL, const DTYPE* __restrict__ srcC, const DTYPE* __restrict__ srcR, DTYPE* dstX, int stride) { // Obtain group-1D, thread-X and batch-Y identifiers int groupId = get_group_id(0) + get_group_id(1) * get_num_groups(0); int threadId = get_local_id(0); // Thread horizontal (N) int batchId = get_local_id(1); // Thread vertical (batch) int verticalId = groupId * get_local_size(1) + batchId; // Offset for accesing thread data int shmOffset = batchId * N; int glbRPos = verticalId * stride + threadId * RAD; int glbWPos = verticalId * stride /*+ threadId*/; int glbWStr = N / RAD; // Statically allocate registers and shared memory Eqn<DTYPE> regC[3]; //? Test: What happens when allocating a ShMem array per coefficient __shared__ Eqn<DTYPE> shm[N > RAD ? SHM : 1]; // Load 'regC'. Left and right equations are initialized by 'radix_init' //? Test: ShMem exchange instead of consecutive load for larger types load<DTYPE, RAD>::col(regC, 'x', srcL + glbRPos); load<DTYPE, RAD>::col(regC, 'y', srcC + glbRPos); load<DTYPE, RAD>::col(regC, 'z', srcR + glbRPos); load<DTYPE, RAD>::col(regC, 'w', dstX + glbRPos); copy<RAD>(shm+RAD*threadId+shmOffset,1,regC); __syncthreads(); if(threadIdx.x!=(blockDim.x-1)) copy<1>(&regC[2],&shm[shmOffset+RAD*threadId+2],0);//No last thread else regC[2]=regC[1]; radix<RAD>(regC); // The first radix stage is always executed const int MIXRAD = MIXR<N, RAD>::val; int num_threads= blockDim.x; const int warpSize=32; const int warpAttack = warpSize; #pragma unroll for(int accRad = MIXRAD; accRad < (N/warpAttack); accRad *= RAD) { int cont = accRad; int strideW = cont; int indexW = strideW*threadId + strideW -1+shmOffset; int strideR = 2*cont; int indexR = strideR * threadId + strideR-1+shmOffset; if(cont > 1) __syncthreads(); if(threadId<num_threads) copy<1>(shm+indexW, 0, &regC[1]); __syncthreads(); num_threads/=2; if(threadId<num_threads) copy<3>(regC,shm+indexR-cont,cont,((indexR-shmOffset+cont)>=N)); radix<RAD>(regC); } if (threadId<warpSize) { //shuffling reduction int i,j; for(i=1, j=1; i < (warpSize/2) ; i*=2,j++) { Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown =shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); if(isMultipleOf(j,threadId+1)) { Eqn<DTYPE> eq1 = reduce(EqUp,regC[1],2); Eqn<DTYPE> eqR = reduce(eq1,EqDown, 0); regC[1]=eqR; } } //exchanging Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown = shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); DTYPE x = 0; if(threadId==(warpSize/2 -1)) { Eqn<DTYPE> eqA, eqB; eqA=regC[1]; eqB=EqDown; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.y*eqA.w - eqA.z*eqB.w)/tmp; } if(threadId==(warpSize-1)) { Eqn<DTYPE> eqA, eqB; eqB = regC[1]; eqA = EqUp; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.w*eqA.y - eqA.w*eqB.x)/tmp; } //shuffling substitution for(i=warpSize/4;i>0;i/=2) { j--; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,i,warpSize); DTYPE xDown =shfl<DTYPE>::shfl_Down(x,i,warpSize); if(isMultipleOf(j,threadId-(i-1))) { Eqn<DTYPE> eq = regC[1]; x= (eq.w - eq.x*xUp - eq.z*xDown)/eq.y; } } dstX[glbWPos+((N/warpAttack))*(threadId+1)-1]=x; } __syncthreads(); num_threads=warpSize; #pragma unroll for (int j = N/warpAttack; j > 1; j/=2) { int delta = j/2; __syncthreads(); if (threadId < num_threads) { int d = glbWPos; int i = j * threadId + j/2 - 1; Eqn<DTYPE> eq; copy<1>(&eq,shm+shmOffset+i,1); if(i == delta - 1) dstX[d+i] = (eq.w - eq.z*dstX[d+i+delta])/eq.y; else dstX[d+i] = (eq.w - eq.x*dstX[d+i-delta] - eq.z*dstX[d+i+delta])/eq.y; } num_threads *= 2; } } // X in global memory, N smaller than 64 template<class DTYPE, int N, int DIR, int RAD, int SHM> __global__ void KTridiagCR2(const DTYPE* __restrict__ srcL, const DTYPE* __restrict__ srcC, const DTYPE* __restrict__ srcR, DTYPE* dstX, int stride) { // Obtain group-1D, thread-X and batch-Y identifiers int groupId = get_group_id(0) + get_group_id(1) * get_num_groups(0); int threadId = get_local_id(0); // Thread horizontal (N) int batchId = get_local_id(1); // Thread vertical (batch) int verticalId = groupId * get_local_size(1) + batchId; // Offset for accesing thread data int shmOffset = batchId * N; int glbRPos = verticalId * stride + threadId * RAD; int glbWPos = verticalId * stride; int glbWStr = N / RAD; // Statically allocate registers and shared memory Eqn<DTYPE> regC[3]; if(threadId!=(blockDim.x-1)){ load<DTYPE, 3>::col(regC, 'x', srcL + glbRPos); load<DTYPE, 3>::col(regC, 'y', srcC + glbRPos); load<DTYPE, 3>::col(regC, 'z', srcR + glbRPos); load<DTYPE, 3>::col(regC, 'w', dstX + glbRPos); } else { load<DTYPE, 2>::col(regC, 'x', srcL + glbRPos); load<DTYPE, 2>::col(regC, 'y', srcC + glbRPos); load<DTYPE, 2>::col(regC, 'z', srcR + glbRPos); load<DTYPE, 2>::col(regC, 'w', dstX + glbRPos); regC[2]=regC[1]; } Eqn<DTYPE> eqUpInit = regC[0]; radix<RAD>(regC); // The first radix stage is always executed const int MIXRAD = MIXR<N, RAD>::val; int num_threads= blockDim.x; const int warpSize=(N>32)?32:(N/RAD); const int warpAttack = warpSize; //shuffling reduction int i,j; for(i=1, j=1; i < (warpSize/2) ; i*=2,j++) { // i is increasing by x2 // j is the logarithm of 2^x number Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown =shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); //if thread+1 is multiple of 2^j then it modifies the equation if(isMultipleOf(j,threadId+1)) { //Reduce ... Eqn<DTYPE> eq1 = reduce(EqUp,regC[1],2); Eqn<DTYPE> eqR = reduce(eq1,EqDown, 0); regC[1]=eqR; } } //exchanging Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown = shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); DTYPE x = 0; if(threadId==(warpSize/2 -1)) { Eqn<DTYPE> eqA, eqB; eqA=regC[1]; eqB=EqDown; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.y*eqA.w - eqA.z*eqB.w)/tmp; } if(threadId==(warpSize-1)) { Eqn<DTYPE> eqA, eqB; eqB = regC[1]; eqA = EqUp; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.w*eqA.y - eqA.w*eqB.x)/tmp; } //shuffling substitution for(i=warpSize/4;i>0;i/=2) { j--; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,i,warpSize); DTYPE xDown =shfl<DTYPE>::shfl_Down(x,i,warpSize); if(isMultipleOf(j,threadId-(i-1))) { //substitution Eqn<DTYPE> eq = regC[1]; x= (eq.w - eq.x*xUp - eq.z*xDown)/eq.y; } } Eqn<DTYPE> eq = eqUpInit; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,1,warpSize); DTYPE x2 = (eq.w -eq.x*xUp - eq.z*x)/eq.y; dstX[glbWPos +((N/warpAttack))*(threadId+1)-2]=x2; dstX[glbWPos+((N/warpAttack))*(threadId+1)-1]=x; } // X is in SHM, N smaller than 64 template<class DTYPE, int N, int DIR, int RAD, int SHM> __global__ void KTridiagCR3(const DTYPE* __restrict__ srcL, const DTYPE* __restrict__ srcC, const DTYPE* __restrict__ srcR, DTYPE* dstX, int stride) { // Obtain group-1D, thread-X and batch-Y identifiers int groupId = get_group_id(0) + get_group_id(1) * get_num_groups(0); int threadId = get_local_id(0); // Thread horizontal (N) int batchId = get_local_id(1); // Thread vertical (batch) int verticalId = groupId * get_local_size(1) + batchId; // Offset for accesing thread data int shmOffset = batchId * N; int glbRPos = verticalId * stride + threadId * RAD; int glbWPos = verticalId * stride /*+ threadId*/; int glbWStr = N / RAD; // Statically allocate registers and shared memory Eqn<DTYPE> regC[3]; __shared__ DTYPE X[N> RAD ? SHM : 1]; // Load 'regC'. Left and right equations are initialized by 'radix_init' if(threadId!=(blockDim.x-1)){ load<DTYPE, 3>::col(regC, 'x', srcL + glbRPos); load<DTYPE, 3>::col(regC, 'y', srcC + glbRPos); load<DTYPE, 3>::col(regC, 'z', srcR + glbRPos); load<DTYPE, 3>::col(regC, 'w', dstX + glbRPos); } else { load<DTYPE, 2>::col(regC, 'x', srcL + glbRPos); load<DTYPE, 2>::col(regC, 'y', srcC + glbRPos); load<DTYPE, 2>::col(regC, 'z', srcR + glbRPos); load<DTYPE, 2>::col(regC, 'w', dstX + glbRPos); regC[2]=regC[1]; } Eqn<DTYPE> eqUpInit = regC[0]; radix<RAD>(regC); // The first radix stage is always executed const int MIXRAD = MIXR<N, RAD>::val; int num_threads= blockDim.x; const int warpSize=(N>32)?32:N/RAD; const int warpAttack = warpSize; //shuffling reduction int i,j; for(i=1, j=1; i < (warpSize/2) ; i*=2,j++) { Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown =shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); if(isMultipleOf(j,threadId+1)) { Eqn<DTYPE> eq1 = reduce(EqUp,regC[1],2); Eqn<DTYPE> eqR = reduce(eq1,EqDown, 0); regC[1]=eqR; } } //exchanging Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown = shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); DTYPE x = 0; if(threadId==(warpSize/2 -1)) { Eqn<DTYPE> eqA, eqB; eqA=regC[1]; eqB=EqDown; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.y*eqA.w - eqA.z*eqB.w)/tmp; } if(threadId==(warpSize-1)) { Eqn<DTYPE> eqA, eqB; eqB = regC[1]; eqA = EqUp; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.w*eqA.y - eqA.w*eqB.x)/tmp; } //shuffling substitution for(i=warpSize/4;i>0;i/=2) { j--; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,i,warpSize); DTYPE xDown =shfl<DTYPE>::shfl_Down(x,i,warpSize); if(isMultipleOf(j,threadId-(i-1))) { //Substitution Eqn<DTYPE> eq = regC[1]; x= (eq.w - eq.x*xUp - eq.z*xDown)/eq.y; } } Eqn<DTYPE> eq = eqUpInit; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,1,warpSize); DTYPE x2 = (eq.w -eq.x*xUp - eq.z*x)/eq.y; X[shmOffset+((N/warpAttack))*(threadId+1)-2]=x2; X[shmOffset+((N/warpAttack))*(threadId+1)-1]=x; __syncthreads(); #pragma unroll for(int i=0;i<(RAD);i++) { dstX[glbWPos + threadId+ i * glbWStr]= X[shmOffset+threadId+i*glbWStr]; } } // X is in SHM, N is larger than 64 template<class DTYPE, int N, int DIR, int RAD, int SHM> __global__ void KTridiagCR4(const DTYPE* __restrict__ srcL, const DTYPE* __restrict__ srcC, const DTYPE* __restrict__ srcR, DTYPE* dstX, int stride) { // Obtain group-1D, thread-X and batch-Y identifiers int groupId = get_group_id(0) + get_group_id(1) * get_num_groups(0); int threadId = get_local_id(0); // Thread horizontal (N) int batchId = get_local_id(1); // Thread vertical (batch) int verticalId = groupId * get_local_size(1) + batchId; // Offset for accesing thread data int shmOffset = batchId * N; int glbRPos = verticalId * stride + threadId * RAD; int glbWPos = verticalId * stride ; int glbWStr = N / RAD; // Statically allocate registers and shared memory Eqn<DTYPE> regC[3]; __shared__ Eqn<DTYPE> shm[N > RAD ? SHM : 1]; __shared__ DTYPE X[N> RAD ? SHM : 1]; load<DTYPE, RAD>::col(regC, 'x', srcL + glbRPos); load<DTYPE, RAD>::col(regC, 'y', srcC + glbRPos); load<DTYPE, RAD>::col(regC, 'z', srcR + glbRPos); load<DTYPE, RAD>::col(regC, 'w', dstX + glbRPos); copy<RAD>(shm+RAD*threadId+shmOffset,1,regC); __syncthreads(); if(threadIdx.x!=(blockDim.x-1)) copy<1>(&regC[2],&shm[shmOffset+RAD*threadId+2],0);//No last thread else regC[2]=regC[1]; radix<RAD>(regC); // The first radix stage is always executed const int MIXRAD = MIXR<N, RAD>::val; int num_threads= blockDim.x; const int warpSize=32; const int warpAttack = warpSize; #pragma unroll for(int accRad = MIXRAD; accRad < (N/warpAttack); accRad *= RAD) { int cont = accRad; int strideW = cont; int indexW = strideW*threadId + strideW -1+shmOffset; int strideR = 2*cont; int indexR = strideR * threadId + strideR-1+shmOffset; if(cont > 1) __syncthreads(); if(threadId<num_threads) copy<1>(shm+indexW, 0, &regC[1]); __syncthreads(); num_threads/=2; if(threadId<num_threads) copy<3>(regC,shm+indexR-cont,cont,((indexR-shmOffset+cont)>=N)); radix<RAD>(regC); } if (threadId<warpSize) { int i,j; for(i=1, j=1; i < (warpSize/2) ; i*=2,j++) { Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown =shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); if(isMultipleOf(j,threadId+1)) { Eqn<DTYPE> eq1 = reduce(EqUp,regC[1],2); Eqn<DTYPE> eqR = reduce(eq1,EqDown, 0); regC[1]=eqR; } } Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown = shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); DTYPE x = 0; if(threadId==(warpSize/2 -1)) { Eqn<DTYPE> eqA, eqB; eqA=regC[1]; eqB=EqDown; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.y*eqA.w - eqA.z*eqB.w)/tmp; } if(threadId==(warpSize-1)) { Eqn<DTYPE> eqA, eqB; eqB = regC[1]; eqA = EqUp; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.w*eqA.y - eqA.w*eqB.x)/tmp; } //shuffling substitution for(i=warpSize/4;i>0;i/=2) { j--; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,i,warpSize); DTYPE xDown =shfl<DTYPE>::shfl_Down(x,i,warpSize); if(isMultipleOf(j,threadId-(i-1))) { //substitution Eqn<DTYPE> eq = regC[1]; x= (eq.w - eq.x*xUp - eq.z*xDown)/eq.y; } } X[shmOffset+((N/warpAttack))*(threadId+1)-1]=x; } __syncthreads(); num_threads=warpSize; #pragma unroll for (int j = N/warpAttack; j > 1; j/=2) { int delta = j/2; __syncthreads(); if (threadId < num_threads) { int d = shmOffset; int i = j * threadId + j/2 - 1; Eqn<DTYPE> eq; copy<1>(&eq,shm+shmOffset+i,1); if(i == delta - 1) X[d+i]= (eq.w - eq.z*X[d+i+delta])/eq.y; else X[d+i]= (eq.w - eq.x*X[d+i-delta] - eq.z*X[d+i+delta])/eq.y; } num_threads *= 2; } __syncthreads(); #pragma unroll for(int i=0;i<(RAD);i++) { dstX[glbWPos + threadId+ i * glbWStr]= X[shmOffset+threadId+i*glbWStr]; } } // --- BranchTable ------------------------------------------------------- const static kernelCfg<float> triXf32A[] = { //! GPU dependent NULL_ROW(1), NULL_ROW(2), ROW(KTridiagCR2, float, 4, 128, 2), ROW(KTridiagCR2, float, 8, 256, 2), ROW(KTridiagCR2, float, 16, 256, 2), ROW(KTridiagCR2, float, 32, 256, 2), ROW(KTridiagCR2, float, 64, 256, 2), ROW(KTridiagCR, float, 128, 256, 2), ROW(KTridiagCR, float, 256, 256, 2), ROW(KTridiagCR, float, 512, 512, 2), ROW(KTridiagCR, float, 1024,1024, 2), NULL_ROW(4096), }; //Maxwell configuration const static kernelCfg<float> triXf32B[] = { //! GPU dependent NULL_ROW(1), NULL_ROW(2), ROW(KTridiagCR3, float, 4, 128, 2), ROW(KTridiagCR3, float, 8, 256, 2), ROW(KTridiagCR3, float, 16, 256, 2), ROW(KTridiagCR3, float, 32, 256, 2), ROW(KTridiagCR3, float, 64, 128, 2), ROW(KTridiagCR4, float, 128, 128, 2), ROW(KTridiagCR , float, 256, 256, 2), ROW(KTridiagCR , float, 512, 512, 2), ROW(KTridiagCR4, float, 1024,1024, 2), NULL_ROW(4096), }; //---- Interface Functions ----------------------------------------------- //- Main library function for 'float' equations int KTridiagCR(float* data, int dir, int N, int M, int batch) { if(N>1024) return -1; return KLauncher3(tabla, sizeof(tabla), data, dir, N, batch); }
399b0aa8ec61d82cb8afdf9edcf3fe72f612d1bc.cu
//- ======================================================================= //+ KTridiag CR algorithm (standalone version) //- ======================================================================= //---- Header Dependencies ----------------------------------------------- #include "tools/cudacl.hxx" #include "inc/op_copy.hxx" #include "inc/op_twiddle.hxx" #include "inc/op_mixr.hxx" #include "inc/op_reduce.hxx" #include "inc/op_load.hxx" #include "inc/op_shfl.hxx" #include "KLauncher3.hxx" #ifndef __CUDA_ARCH__ #define __CUDA_ARCH__ CUDA_ARCH #endif #if __CUDA_ARCH__ < 400 #define tabla triXf32A #endif #if __CUDA_ARCH__ >= 400 #define tabla triXf32B #endif //---- Butterfly operator ------------------------------------------------ template<class DTYPE, int size> struct butterfly { //- The first butterfly step is an optimized version of 'butterfly_step' static inline __device__ void init(Eqn<DTYPE>* E1, int s = 1); //- Generic butterfly step, is more efficient to call 'butterfly_init' first static inline __device__ void step(Eqn<DTYPE>* E1, int s = 1); }; template<class DTYPE> struct butterfly<DTYPE, 2> { //- The initial Rad<2> case is optimized static inline __device__ void init(Eqn<DTYPE>* E1, int s = 1) { } //- The general Rad<2> case is defined according to the equation static inline __device__ void step(Eqn<DTYPE>* E1, int s = 1) { Eqn<DTYPE> eq1 = reduce(E1[0],E1[1],2); Eqn<DTYPE> eqR = reduce(eq1,E1[2], 0); E1[1]=eqR; } }; //---- Radix operator ---------------------------------------------------- //- Generic radix stage, used in the main loop of the algorithm template<int RAD, class DTYPE> inline __device__ void radix(Eqn<DTYPE>* C) { butterfly<DTYPE, RAD>::step( C); } //- Mixed-radix stage, only called once before the main loop template<int SIZE, int RAD, class DTYPE> inline __device__ void radix(Eqn<DTYPE>* C) { #pragma unroll for(int i = 0; i < SIZE; i+= RAD) butterfly<DTYPE, RAD>::init(C+i); } __device__ int isMultipleOf(const int N, const int value) { int bit = ~0; int mask = bit<<(N); int mask2 = ~mask; if(!(value&mask2)) return 1; return 0; } //------- Cuda Kernels --------------------------------------------------- //- Kernel for tridiagonal equation systems that fit in Shared Memory //? Todo: Allow configurable 'stride' for other distributions // X in global memory, N larger than 64 template<class DTYPE, int N, int DIR, int RAD, int SHM> __global__ void KTridiagCR(const DTYPE* __restrict__ srcL, const DTYPE* __restrict__ srcC, const DTYPE* __restrict__ srcR, DTYPE* dstX, int stride) { // Obtain group-1D, thread-X and batch-Y identifiers int groupId = get_group_id(0) + get_group_id(1) * get_num_groups(0); int threadId = get_local_id(0); // Thread horizontal (N) int batchId = get_local_id(1); // Thread vertical (batch) int verticalId = groupId * get_local_size(1) + batchId; // Offset for accesing thread data int shmOffset = batchId * N; int glbRPos = verticalId * stride + threadId * RAD; int glbWPos = verticalId * stride /*+ threadId*/; int glbWStr = N / RAD; // Statically allocate registers and shared memory Eqn<DTYPE> regC[3]; //? Test: What happens when allocating a ShMem array per coefficient __shared__ Eqn<DTYPE> shm[N > RAD ? SHM : 1]; // Load 'regC'. Left and right equations are initialized by 'radix_init' //? Test: ShMem exchange instead of consecutive load for larger types load<DTYPE, RAD>::col(regC, 'x', srcL + glbRPos); load<DTYPE, RAD>::col(regC, 'y', srcC + glbRPos); load<DTYPE, RAD>::col(regC, 'z', srcR + glbRPos); load<DTYPE, RAD>::col(regC, 'w', dstX + glbRPos); copy<RAD>(shm+RAD*threadId+shmOffset,1,regC); __syncthreads(); if(threadIdx.x!=(blockDim.x-1)) copy<1>(&regC[2],&shm[shmOffset+RAD*threadId+2],0);//No last thread else regC[2]=regC[1]; radix<RAD>(regC); // The first radix stage is always executed const int MIXRAD = MIXR<N, RAD>::val; int num_threads= blockDim.x; const int warpSize=32; const int warpAttack = warpSize; #pragma unroll for(int accRad = MIXRAD; accRad < (N/warpAttack); accRad *= RAD) { int cont = accRad; int strideW = cont; int indexW = strideW*threadId + strideW -1+shmOffset; int strideR = 2*cont; int indexR = strideR * threadId + strideR-1+shmOffset; if(cont > 1) __syncthreads(); if(threadId<num_threads) copy<1>(shm+indexW, 0, &regC[1]); __syncthreads(); num_threads/=2; if(threadId<num_threads) copy<3>(regC,shm+indexR-cont,cont,((indexR-shmOffset+cont)>=N)); radix<RAD>(regC); } if (threadId<warpSize) { //shuffling reduction int i,j; for(i=1, j=1; i < (warpSize/2) ; i*=2,j++) { Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown =shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); if(isMultipleOf(j,threadId+1)) { Eqn<DTYPE> eq1 = reduce(EqUp,regC[1],2); Eqn<DTYPE> eqR = reduce(eq1,EqDown, 0); regC[1]=eqR; } } //exchanging Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown = shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); DTYPE x = 0; if(threadId==(warpSize/2 -1)) { Eqn<DTYPE> eqA, eqB; eqA=regC[1]; eqB=EqDown; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.y*eqA.w - eqA.z*eqB.w)/tmp; } if(threadId==(warpSize-1)) { Eqn<DTYPE> eqA, eqB; eqB = regC[1]; eqA = EqUp; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.w*eqA.y - eqA.w*eqB.x)/tmp; } //shuffling substitution for(i=warpSize/4;i>0;i/=2) { j--; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,i,warpSize); DTYPE xDown =shfl<DTYPE>::shfl_Down(x,i,warpSize); if(isMultipleOf(j,threadId-(i-1))) { Eqn<DTYPE> eq = regC[1]; x= (eq.w - eq.x*xUp - eq.z*xDown)/eq.y; } } dstX[glbWPos+((N/warpAttack))*(threadId+1)-1]=x; } __syncthreads(); num_threads=warpSize; #pragma unroll for (int j = N/warpAttack; j > 1; j/=2) { int delta = j/2; __syncthreads(); if (threadId < num_threads) { int d = glbWPos; int i = j * threadId + j/2 - 1; Eqn<DTYPE> eq; copy<1>(&eq,shm+shmOffset+i,1); if(i == delta - 1) dstX[d+i] = (eq.w - eq.z*dstX[d+i+delta])/eq.y; else dstX[d+i] = (eq.w - eq.x*dstX[d+i-delta] - eq.z*dstX[d+i+delta])/eq.y; } num_threads *= 2; } } // X in global memory, N smaller than 64 template<class DTYPE, int N, int DIR, int RAD, int SHM> __global__ void KTridiagCR2(const DTYPE* __restrict__ srcL, const DTYPE* __restrict__ srcC, const DTYPE* __restrict__ srcR, DTYPE* dstX, int stride) { // Obtain group-1D, thread-X and batch-Y identifiers int groupId = get_group_id(0) + get_group_id(1) * get_num_groups(0); int threadId = get_local_id(0); // Thread horizontal (N) int batchId = get_local_id(1); // Thread vertical (batch) int verticalId = groupId * get_local_size(1) + batchId; // Offset for accesing thread data int shmOffset = batchId * N; int glbRPos = verticalId * stride + threadId * RAD; int glbWPos = verticalId * stride; int glbWStr = N / RAD; // Statically allocate registers and shared memory Eqn<DTYPE> regC[3]; if(threadId!=(blockDim.x-1)){ load<DTYPE, 3>::col(regC, 'x', srcL + glbRPos); load<DTYPE, 3>::col(regC, 'y', srcC + glbRPos); load<DTYPE, 3>::col(regC, 'z', srcR + glbRPos); load<DTYPE, 3>::col(regC, 'w', dstX + glbRPos); } else { load<DTYPE, 2>::col(regC, 'x', srcL + glbRPos); load<DTYPE, 2>::col(regC, 'y', srcC + glbRPos); load<DTYPE, 2>::col(regC, 'z', srcR + glbRPos); load<DTYPE, 2>::col(regC, 'w', dstX + glbRPos); regC[2]=regC[1]; } Eqn<DTYPE> eqUpInit = regC[0]; radix<RAD>(regC); // The first radix stage is always executed const int MIXRAD = MIXR<N, RAD>::val; int num_threads= blockDim.x; const int warpSize=(N>32)?32:(N/RAD); const int warpAttack = warpSize; //shuffling reduction int i,j; for(i=1, j=1; i < (warpSize/2) ; i*=2,j++) { // i is increasing by x2 // j is the logarithm of 2^x number Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown =shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); //if thread+1 is multiple of 2^j then it modifies the equation if(isMultipleOf(j,threadId+1)) { //Reduce ... Eqn<DTYPE> eq1 = reduce(EqUp,regC[1],2); Eqn<DTYPE> eqR = reduce(eq1,EqDown, 0); regC[1]=eqR; } } //exchanging Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown = shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); DTYPE x = 0; if(threadId==(warpSize/2 -1)) { Eqn<DTYPE> eqA, eqB; eqA=regC[1]; eqB=EqDown; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.y*eqA.w - eqA.z*eqB.w)/tmp; } if(threadId==(warpSize-1)) { Eqn<DTYPE> eqA, eqB; eqB = regC[1]; eqA = EqUp; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.w*eqA.y - eqA.w*eqB.x)/tmp; } //shuffling substitution for(i=warpSize/4;i>0;i/=2) { j--; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,i,warpSize); DTYPE xDown =shfl<DTYPE>::shfl_Down(x,i,warpSize); if(isMultipleOf(j,threadId-(i-1))) { //substitution Eqn<DTYPE> eq = regC[1]; x= (eq.w - eq.x*xUp - eq.z*xDown)/eq.y; } } Eqn<DTYPE> eq = eqUpInit; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,1,warpSize); DTYPE x2 = (eq.w -eq.x*xUp - eq.z*x)/eq.y; dstX[glbWPos +((N/warpAttack))*(threadId+1)-2]=x2; dstX[glbWPos+((N/warpAttack))*(threadId+1)-1]=x; } // X is in SHM, N smaller than 64 template<class DTYPE, int N, int DIR, int RAD, int SHM> __global__ void KTridiagCR3(const DTYPE* __restrict__ srcL, const DTYPE* __restrict__ srcC, const DTYPE* __restrict__ srcR, DTYPE* dstX, int stride) { // Obtain group-1D, thread-X and batch-Y identifiers int groupId = get_group_id(0) + get_group_id(1) * get_num_groups(0); int threadId = get_local_id(0); // Thread horizontal (N) int batchId = get_local_id(1); // Thread vertical (batch) int verticalId = groupId * get_local_size(1) + batchId; // Offset for accesing thread data int shmOffset = batchId * N; int glbRPos = verticalId * stride + threadId * RAD; int glbWPos = verticalId * stride /*+ threadId*/; int glbWStr = N / RAD; // Statically allocate registers and shared memory Eqn<DTYPE> regC[3]; __shared__ DTYPE X[N> RAD ? SHM : 1]; // Load 'regC'. Left and right equations are initialized by 'radix_init' if(threadId!=(blockDim.x-1)){ load<DTYPE, 3>::col(regC, 'x', srcL + glbRPos); load<DTYPE, 3>::col(regC, 'y', srcC + glbRPos); load<DTYPE, 3>::col(regC, 'z', srcR + glbRPos); load<DTYPE, 3>::col(regC, 'w', dstX + glbRPos); } else { load<DTYPE, 2>::col(regC, 'x', srcL + glbRPos); load<DTYPE, 2>::col(regC, 'y', srcC + glbRPos); load<DTYPE, 2>::col(regC, 'z', srcR + glbRPos); load<DTYPE, 2>::col(regC, 'w', dstX + glbRPos); regC[2]=regC[1]; } Eqn<DTYPE> eqUpInit = regC[0]; radix<RAD>(regC); // The first radix stage is always executed const int MIXRAD = MIXR<N, RAD>::val; int num_threads= blockDim.x; const int warpSize=(N>32)?32:N/RAD; const int warpAttack = warpSize; //shuffling reduction int i,j; for(i=1, j=1; i < (warpSize/2) ; i*=2,j++) { Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown =shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); if(isMultipleOf(j,threadId+1)) { Eqn<DTYPE> eq1 = reduce(EqUp,regC[1],2); Eqn<DTYPE> eqR = reduce(eq1,EqDown, 0); regC[1]=eqR; } } //exchanging Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown = shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); DTYPE x = 0; if(threadId==(warpSize/2 -1)) { Eqn<DTYPE> eqA, eqB; eqA=regC[1]; eqB=EqDown; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.y*eqA.w - eqA.z*eqB.w)/tmp; } if(threadId==(warpSize-1)) { Eqn<DTYPE> eqA, eqB; eqB = regC[1]; eqA = EqUp; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.w*eqA.y - eqA.w*eqB.x)/tmp; } //shuffling substitution for(i=warpSize/4;i>0;i/=2) { j--; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,i,warpSize); DTYPE xDown =shfl<DTYPE>::shfl_Down(x,i,warpSize); if(isMultipleOf(j,threadId-(i-1))) { //Substitution Eqn<DTYPE> eq = regC[1]; x= (eq.w - eq.x*xUp - eq.z*xDown)/eq.y; } } Eqn<DTYPE> eq = eqUpInit; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,1,warpSize); DTYPE x2 = (eq.w -eq.x*xUp - eq.z*x)/eq.y; X[shmOffset+((N/warpAttack))*(threadId+1)-2]=x2; X[shmOffset+((N/warpAttack))*(threadId+1)-1]=x; __syncthreads(); #pragma unroll for(int i=0;i<(RAD);i++) { dstX[glbWPos + threadId+ i * glbWStr]= X[shmOffset+threadId+i*glbWStr]; } } // X is in SHM, N is larger than 64 template<class DTYPE, int N, int DIR, int RAD, int SHM> __global__ void KTridiagCR4(const DTYPE* __restrict__ srcL, const DTYPE* __restrict__ srcC, const DTYPE* __restrict__ srcR, DTYPE* dstX, int stride) { // Obtain group-1D, thread-X and batch-Y identifiers int groupId = get_group_id(0) + get_group_id(1) * get_num_groups(0); int threadId = get_local_id(0); // Thread horizontal (N) int batchId = get_local_id(1); // Thread vertical (batch) int verticalId = groupId * get_local_size(1) + batchId; // Offset for accesing thread data int shmOffset = batchId * N; int glbRPos = verticalId * stride + threadId * RAD; int glbWPos = verticalId * stride ; int glbWStr = N / RAD; // Statically allocate registers and shared memory Eqn<DTYPE> regC[3]; __shared__ Eqn<DTYPE> shm[N > RAD ? SHM : 1]; __shared__ DTYPE X[N> RAD ? SHM : 1]; load<DTYPE, RAD>::col(regC, 'x', srcL + glbRPos); load<DTYPE, RAD>::col(regC, 'y', srcC + glbRPos); load<DTYPE, RAD>::col(regC, 'z', srcR + glbRPos); load<DTYPE, RAD>::col(regC, 'w', dstX + glbRPos); copy<RAD>(shm+RAD*threadId+shmOffset,1,regC); __syncthreads(); if(threadIdx.x!=(blockDim.x-1)) copy<1>(&regC[2],&shm[shmOffset+RAD*threadId+2],0);//No last thread else regC[2]=regC[1]; radix<RAD>(regC); // The first radix stage is always executed const int MIXRAD = MIXR<N, RAD>::val; int num_threads= blockDim.x; const int warpSize=32; const int warpAttack = warpSize; #pragma unroll for(int accRad = MIXRAD; accRad < (N/warpAttack); accRad *= RAD) { int cont = accRad; int strideW = cont; int indexW = strideW*threadId + strideW -1+shmOffset; int strideR = 2*cont; int indexR = strideR * threadId + strideR-1+shmOffset; if(cont > 1) __syncthreads(); if(threadId<num_threads) copy<1>(shm+indexW, 0, &regC[1]); __syncthreads(); num_threads/=2; if(threadId<num_threads) copy<3>(regC,shm+indexR-cont,cont,((indexR-shmOffset+cont)>=N)); radix<RAD>(regC); } if (threadId<warpSize) { int i,j; for(i=1, j=1; i < (warpSize/2) ; i*=2,j++) { Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown =shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); if(isMultipleOf(j,threadId+1)) { Eqn<DTYPE> eq1 = reduce(EqUp,regC[1],2); Eqn<DTYPE> eqR = reduce(eq1,EqDown, 0); regC[1]=eqR; } } Eqn<DTYPE> EqUp = shfl<DTYPE>::shfl_Eq_Up(regC[1],i,warpSize); Eqn<DTYPE> EqDown = shfl<DTYPE>::shfl_Eq_Down(regC[1],i,warpSize); DTYPE x = 0; if(threadId==(warpSize/2 -1)) { Eqn<DTYPE> eqA, eqB; eqA=regC[1]; eqB=EqDown; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.y*eqA.w - eqA.z*eqB.w)/tmp; } if(threadId==(warpSize-1)) { Eqn<DTYPE> eqA, eqB; eqB = regC[1]; eqA = EqUp; DTYPE tmp = (eqB.y*eqA.y)-(eqA.z*eqB.x); x = (eqB.w*eqA.y - eqA.w*eqB.x)/tmp; } //shuffling substitution for(i=warpSize/4;i>0;i/=2) { j--; DTYPE xUp = shfl<DTYPE>::shfl_Up(x,i,warpSize); DTYPE xDown =shfl<DTYPE>::shfl_Down(x,i,warpSize); if(isMultipleOf(j,threadId-(i-1))) { //substitution Eqn<DTYPE> eq = regC[1]; x= (eq.w - eq.x*xUp - eq.z*xDown)/eq.y; } } X[shmOffset+((N/warpAttack))*(threadId+1)-1]=x; } __syncthreads(); num_threads=warpSize; #pragma unroll for (int j = N/warpAttack; j > 1; j/=2) { int delta = j/2; __syncthreads(); if (threadId < num_threads) { int d = shmOffset; int i = j * threadId + j/2 - 1; Eqn<DTYPE> eq; copy<1>(&eq,shm+shmOffset+i,1); if(i == delta - 1) X[d+i]= (eq.w - eq.z*X[d+i+delta])/eq.y; else X[d+i]= (eq.w - eq.x*X[d+i-delta] - eq.z*X[d+i+delta])/eq.y; } num_threads *= 2; } __syncthreads(); #pragma unroll for(int i=0;i<(RAD);i++) { dstX[glbWPos + threadId+ i * glbWStr]= X[shmOffset+threadId+i*glbWStr]; } } // --- BranchTable ------------------------------------------------------- const static kernelCfg<float> triXf32A[] = { //! GPU dependent NULL_ROW(1), NULL_ROW(2), ROW(KTridiagCR2, float, 4, 128, 2), ROW(KTridiagCR2, float, 8, 256, 2), ROW(KTridiagCR2, float, 16, 256, 2), ROW(KTridiagCR2, float, 32, 256, 2), ROW(KTridiagCR2, float, 64, 256, 2), ROW(KTridiagCR, float, 128, 256, 2), ROW(KTridiagCR, float, 256, 256, 2), ROW(KTridiagCR, float, 512, 512, 2), ROW(KTridiagCR, float, 1024,1024, 2), NULL_ROW(4096), }; //Maxwell configuration const static kernelCfg<float> triXf32B[] = { //! GPU dependent NULL_ROW(1), NULL_ROW(2), ROW(KTridiagCR3, float, 4, 128, 2), ROW(KTridiagCR3, float, 8, 256, 2), ROW(KTridiagCR3, float, 16, 256, 2), ROW(KTridiagCR3, float, 32, 256, 2), ROW(KTridiagCR3, float, 64, 128, 2), ROW(KTridiagCR4, float, 128, 128, 2), ROW(KTridiagCR , float, 256, 256, 2), ROW(KTridiagCR , float, 512, 512, 2), ROW(KTridiagCR4, float, 1024,1024, 2), NULL_ROW(4096), }; //---- Interface Functions ----------------------------------------------- //- Main library function for 'float' equations int KTridiagCR(float* data, int dir, int N, int M, int batch) { if(N>1024) return -1; return KLauncher3(tabla, sizeof(tabla), data, dir, N, batch); }
dbbc905c88e04ee1fd1dc2770df2f6b5eb1ab26e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Thinning.h" #include <iostream> using namespace std; static __global__ void _thinGpuIter1Ker(ImageCuda outimg, ImageCuda tempimg, int *devchangecount, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p6 | p7 | !p1) & p8); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } static __global__ void _thinGpuIter2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p2 | p3 | !p5) & p4); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } __host__ int Thinning::thinGpu(Image *inimg, Image *outimg) { int errcode; hipError_t cudaerrcode; if (inimg == NULL || outimg == NULL) return NULL_POINTER; int *devchangecount = NULL; int changeCount; cudaerrcode = hipMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } Image *tempimg = NULL; errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) { return errcode; } ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { return errcode; } ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) { return errcode; } dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; changeCount = 1; // int iter_num = 0; while (changeCount > 0) { // iter_num ++; changeCount = 0; cudaerrcode = hipMemcpy(devchangecount, &changeCount, sizeof (int), hipMemcpyHostToDevice); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } hipLaunchKernelGGL(( _thinGpuIter1Ker), dim3(gridsize), dim3(blocksize), 0, 0, outsubimgCud, tempsubimgCud, devchangecount, highPixel, lowPixel); if (hipGetLastError() != hipSuccess) { return CUDA_ERROR; } cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } hipLaunchKernelGGL(( _thinGpuIter2Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devchangecount, highPixel, lowPixel); if (hipGetLastError() != hipSuccess) { return CUDA_ERROR; } cudaerrcode = hipMemcpy(&changeCount, devchangecount, sizeof (int), hipMemcpyDeviceToHost); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } } // cout << "thinGH iter_num = " << iter_num << endl; hipFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } static __global__ void _thinGpuFourIter1Ker(ImageCuda outimg, ImageCuda tempimg, int *devchangecount, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p6 | p7 | !p1) & p8); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } for (int i = 0; i < 3; ++i) { if (++dstr > tempimg.imgMeta.height - 1) return ; curpos += tempimg.pitchBytes; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p6 | p7 | !p1) & p8); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } } static __global__ void _thinGpuFourIter2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p2 | p3 | !p5) & p4); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } for (int i = 0; i < 3; ++i) { if (++dstr > tempimg.imgMeta.height - 1) return ; curpos += tempimg.pitchBytes; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p2 | p3 | !p5) & p4); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } } __host__ int Thinning::thinGpuFour(Image *inimg, Image *outimg) { int errcode; hipError_t cudaerrcode; if (inimg == NULL || outimg == NULL) return NULL_POINTER; Image *tempimg = NULL; int *devchangecount = NULL; int changeCount; cudaerrcode = hipMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != hipSuccess) return CUDA_ERROR; errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) return errcode; ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) return errcode; dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / blocksize.y * 4; changeCount = 1; while (changeCount > 0) { changeCount = 0; cudaerrcode = hipMemcpy(devchangecount, &changeCount, sizeof (int), hipMemcpyHostToDevice); if (cudaerrcode != hipSuccess) return CUDA_ERROR; cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) return CUDA_ERROR; hipLaunchKernelGGL(( _thinGpuFourIter1Ker), dim3(gridsize), dim3(blocksize), 0, 0, outsubimgCud, tempsubimgCud, devchangecount, highPixel, lowPixel); if (hipGetLastError() != hipSuccess) return CUDA_ERROR; cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) return CUDA_ERROR; hipLaunchKernelGGL(( _thinGpuFourIter2Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devchangecount, highPixel, lowPixel); if (hipGetLastError() != hipSuccess) return CUDA_ERROR; cudaerrcode = hipMemcpy(&changeCount, devchangecount, sizeof (int), hipMemcpyDeviceToHost); if (cudaerrcode != hipSuccess) return CUDA_ERROR; } hipFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } static __global__ void _thinGpuPtIter1Ker(ImageCuda outimg, ImageCuda tempimg, unsigned char *devlutthin, int *devchangecount, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos ; if (*outptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } static __global__ void _thinGpuPtIter2Ker(ImageCuda tempimg, ImageCuda outimg, unsigned char *devlutthin, int *devchangecount, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *temptr; int curpos = dstr * outimg.pitchBytes + dstc; temptr = tempimg.imgMeta.imgData + curpos; if (*temptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index + 256]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } __host__ int Thinning::thinGpuPt (Image *inimg, Image *outimg) { int errcode; hipError_t cudaerrcode; if (inimg == NULL || outimg == NULL) return NULL_POINTER; unsigned char *devlutthin = NULL; Image *tempimg = NULL; int *devchangecount = NULL; cudaerrcode = hipMalloc((void **)&devlutthin, 512 * sizeof (unsigned char)); if (cudaerrcode != hipSuccess) return CUDA_ERROR; unsigned char lutthin[] = { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0}; cudaerrcode = hipMemcpy(devlutthin, lutthin, 512 * sizeof (unsigned char), hipMemcpyHostToDevice); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } int changeCount; cudaerrcode = hipMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) { return errcode; } ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { return errcode; } ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) { return errcode; } dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; changeCount = 1; while (changeCount > 0) { changeCount = 0; cudaerrcode = hipMemcpy(devchangecount, &changeCount, sizeof (int), hipMemcpyHostToDevice); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } hipLaunchKernelGGL(( _thinGpuPtIter1Ker), dim3(gridsize), dim3(blocksize), 0, 0, outsubimgCud, tempsubimgCud, devlutthin, devchangecount, highPixel, lowPixel); if (hipGetLastError() != hipSuccess) { return CUDA_ERROR; } cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } hipLaunchKernelGGL(( _thinGpuPtIter2Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devlutthin, devchangecount, lowPixel); if (hipGetLastError() != hipSuccess) { return CUDA_ERROR; } cudaerrcode = hipMemcpy(&changeCount, devchangecount, sizeof (int), hipMemcpyDeviceToHost); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } } hipFree(devlutthin); hipFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } static __global__ void _thinGpuPtFourIter1Ker(ImageCuda outimg, ImageCuda tempimg, int *devchangecount, unsigned char *devlutthin, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos ; if (*outptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } for (int i = 0; i < 3; ++i) { if (++dstr >= tempimg.imgMeta.height - 1) return ; curpos += tempimg.pitchBytes; outptr = tempimg.imgMeta.imgData + curpos ; if (*outptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } } static __global__ void _thinGpuPtFourIter2Ker(ImageCuda tempimg, ImageCuda outimg, unsigned char *devlutthin, int *devchangecount, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * outimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index + 256]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } for (int i = 0; i < 3; ++i) { if (++dstr >= tempimg.imgMeta.height - 1) return ; curpos += tempimg.pitchBytes; outptr = tempimg.imgMeta.imgData + curpos ; if (*outptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index + 256]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } } __host__ int Thinning::thinGpuPtFour (Image *inimg, Image *outimg) { int errcode; hipError_t cudaerrcode; if (inimg == NULL || outimg == NULL) return NULL_POINTER; unsigned char *devlutthin = NULL; Image *tempimg = NULL; int *devchangecount = NULL; cudaerrcode = hipMalloc((void **)&devlutthin, 512 * sizeof (unsigned char)); if (cudaerrcode != hipSuccess) return CUDA_ERROR; unsigned char lutthin[] = { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0}; cudaerrcode = hipMemcpy(devlutthin, lutthin, 512 * sizeof (unsigned char), hipMemcpyHostToDevice); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } int changeCount; cudaerrcode = hipMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) { return errcode; } ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { return errcode; } ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) { return errcode; } dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / blocksize.y * 4; changeCount = 1; while (changeCount > 0) { changeCount = 0; cudaerrcode = hipMemcpy(devchangecount, &changeCount, sizeof (int), hipMemcpyHostToDevice); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } hipLaunchKernelGGL(( _thinGpuPtFourIter1Ker), dim3(gridsize), dim3(blocksize), 0, 0, outsubimgCud, tempsubimgCud, devchangecount, devlutthin, highPixel, lowPixel); if (hipGetLastError() != hipSuccess) { return CUDA_ERROR; } cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } hipLaunchKernelGGL(( _thinGpuPtFourIter2Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, devlutthin, devchangecount, lowPixel); if (hipGetLastError() != hipSuccess) { return CUDA_ERROR; } cudaerrcode = hipMemcpy(&changeCount, devchangecount, sizeof (int), hipMemcpyDeviceToHost); if (cudaerrcode != hipSuccess) { return CUDA_ERROR; } } hipFree(devlutthin); hipFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } // __constant__ unsigned char devlutthin[] = { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0}; // static __global__ void _thinGpuPtFourIter1Ker(ImageCuda outimg, // ImageCuda tempimg, // int *devchangecount, // unsigned char highpixel, // unsigned char lowpixel) // { // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = blockIdx.y * blockDim.y + threadIdx.y; // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // unsigned char *outptr; // int curpos = dstr * tempimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos ; // if (*outptr != lowpixel) { // int index = 0; // int row1 = (dstr - 1) * tempimg.pitchBytes; // int row2 = row1 + tempimg.pitchBytes; // int row3 = row2 + tempimg.pitchBytes; // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; // index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; // if (devlutthin[index]) { // outimg.imgMeta.imgData[curpos] = lowpixel; // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= tempimg.imgMeta.height - 1) // return ; // curpos += tempimg.pitchBytes; // outptr = tempimg.imgMeta.imgData + curpos ; // if (*outptr != lowpixel) { // int index = 0; // int row1 = (dstr - 1) * tempimg.pitchBytes; // int row2 = row1 + tempimg.pitchBytes; // int row3 = row2 + tempimg.pitchBytes; // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; // index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; // if (devlutthin[index]) { // outimg.imgMeta.imgData[curpos] = lowpixel; // *devchangecount = 1; // } // } // } // } // static __global__ void _thinGpuPtFourIter2Ker(ImageCuda tempimg, // ImageCuda outimg, // int *devchangecount, // unsigned char lowpixel) // { // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = blockIdx.y * blockDim.y + threadIdx.y; // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // unsigned char *outptr; // int curpos = dstr * outimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos; // if (*outptr != lowpixel) { // int index = 0; // int row1 = (dstr - 1) * tempimg.pitchBytes; // int row2 = row1 + tempimg.pitchBytes; // int row3 = row2 + tempimg.pitchBytes; // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; // index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; // if (devlutthin[index + 256]) { // outimg.imgMeta.imgData[curpos] = lowpixel; // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= tempimg.imgMeta.height - 1) // return ; // curpos += tempimg.pitchBytes; // outptr = tempimg.imgMeta.imgData + curpos ; // if (*outptr != lowpixel) { // int index = 0; // int row1 = (dstr - 1) * tempimg.pitchBytes; // int row2 = row1 + tempimg.pitchBytes; // int row3 = row2 + tempimg.pitchBytes; // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; // index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; // if (devlutthin[index + 256]) { // outimg.imgMeta.imgData[curpos] = lowpixel; // *devchangecount = 1; // } // } // } // } // __host__ int Thinning::thinGpuPtFour (Image *inimg, Image *outimg) // { // int errcode; // hipError_t cudaerrcode; // if (inimg == NULL || outimg == NULL) // return NULL_POINTER; // unsigned char *devlutthin = NULL; // Image *tempimg = NULL; // int *devchangecount = NULL; // int changeCount; // cudaerrcode = hipMalloc((void **)&devchangecount, sizeof (int)); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // errcode = ImageBasicOp::newImage(&tempimg); // if (errcode != NO_ERROR) // return errcode; // errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, // inimg->height); // if (errcode != NO_ERROR) { // return errcode; // } // errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); // if (errcode != NO_ERROR) { // return errcode; // } // ImageCuda outsubimgCud; // errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); // if (errcode != NO_ERROR) { // return errcode; // } // ImageCuda tempsubimgCud; // errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); // if (errcode != NO_ERROR) { // return errcode; // } // dim3 gridsize, blocksize; // blocksize.x = DEF_BLOCK_X; // blocksize.y = DEF_BLOCK_Y; // gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; // gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / blocksize.y * 4; // changeCount = 1; // while (changeCount > 0) { // changeCount = 0; // cudaerrcode = hipMemcpy(devchangecount, &changeCount, sizeof (int), // hipMemcpyHostToDevice); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // hipLaunchKernelGGL(( _thinGpuPtFourIter1Ker), dim3(gridsize), dim3(blocksize), 0, 0, outsubimgCud, tempsubimgCud, devchangecount, // highPixel, // lowPixel); // if (hipGetLastError() != hipSuccess) { // return CUDA_ERROR; // } // cudaerrcode = hipMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // hipLaunchKernelGGL(( _thinGpuPtFourIter2Ker), dim3(gridsize), dim3(blocksize), 0, 0, tempsubimgCud, outsubimgCud, // devchangecount, // lowPixel); // if (hipGetLastError() != hipSuccess) { // return CUDA_ERROR; // } // cudaerrcode = hipMemcpy(&changeCount, devchangecount, sizeof (int), // hipMemcpyDeviceToHost); // if (cudaerrcode != hipSuccess) { // return CUDA_ERROR; // } // } // hipFree(devlutthin); // hipFree(devchangecount); // ImageBasicOp::deleteImage(tempimg); // return NO_ERROR; // }
dbbc905c88e04ee1fd1dc2770df2f6b5eb1ab26e.cu
#include "Thinning.h" #include <iostream> using namespace std; static __global__ void _thinGpuIter1Ker(ImageCuda outimg, ImageCuda tempimg, int *devchangecount, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p6 | p7 | !p1) & p8); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } static __global__ void _thinGpuIter2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p2 | p3 | !p5) & p4); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } __host__ int Thinning::thinGpu(Image *inimg, Image *outimg) { int errcode; cudaError_t cudaerrcode; if (inimg == NULL || outimg == NULL) return NULL_POINTER; int *devchangecount = NULL; int changeCount; cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } Image *tempimg = NULL; errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) { return errcode; } ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { return errcode; } ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) { return errcode; } dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; changeCount = 1; // int iter_num = 0; while (changeCount > 0) { // iter_num ++; changeCount = 0; cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } _thinGpuIter1Ker<<<gridsize, blocksize>>>(outsubimgCud, tempsubimgCud, devchangecount, highPixel, lowPixel); if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } _thinGpuIter2Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount, highPixel, lowPixel); if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } } // cout << "thinGH iter_num = " << iter_num << endl; cudaFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } static __global__ void _thinGpuFourIter1Ker(ImageCuda outimg, ImageCuda tempimg, int *devchangecount, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p6 | p7 | !p1) & p8); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } for (int i = 0; i < 3; ++i) { if (++dstr > tempimg.imgMeta.height - 1) return ; curpos += tempimg.pitchBytes; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p6 | p7 | !p1) & p8); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } } static __global__ void _thinGpuFourIter2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p2 | p3 | !p5) & p4); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } for (int i = 0; i < 3; ++i) { if (++dstr > tempimg.imgMeta.height - 1) return ; curpos += tempimg.pitchBytes; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) + (!p6 & (p7 | p8)) + (!p8 & (p1 | p2)); int N1 = (p1 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8); int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p1); int N = N1 < N2 ? N1 : N2; int m = ((p2 | p3 | !p5) & p4); if (C == 1 && (N >= 2 && N <= 3) && m == 0) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } } __host__ int Thinning::thinGpuFour(Image *inimg, Image *outimg) { int errcode; cudaError_t cudaerrcode; if (inimg == NULL || outimg == NULL) return NULL_POINTER; Image *tempimg = NULL; int *devchangecount = NULL; int changeCount; cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) return errcode; ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) return errcode; dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / blocksize.y * 4; changeCount = 1; while (changeCount > 0) { changeCount = 0; cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; _thinGpuFourIter1Ker<<<gridsize, blocksize>>>(outsubimgCud, tempsubimgCud, devchangecount, highPixel, lowPixel); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; _thinGpuFourIter2Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devchangecount, highPixel, lowPixel); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; } cudaFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } static __global__ void _thinGpuPtIter1Ker(ImageCuda outimg, ImageCuda tempimg, unsigned char *devlutthin, int *devchangecount, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos ; if (*outptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == highpixel; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == highpixel; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == highpixel; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == highpixel; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == highpixel; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == highpixel; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == highpixel; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == highpixel; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } static __global__ void _thinGpuPtIter2Ker(ImageCuda tempimg, ImageCuda outimg, unsigned char *devlutthin, int *devchangecount, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *temptr; int curpos = dstr * outimg.pitchBytes + dstc; temptr = tempimg.imgMeta.imgData + curpos; if (*temptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index + 256]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } __host__ int Thinning::thinGpuPt (Image *inimg, Image *outimg) { int errcode; cudaError_t cudaerrcode; if (inimg == NULL || outimg == NULL) return NULL_POINTER; unsigned char *devlutthin = NULL; Image *tempimg = NULL; int *devchangecount = NULL; cudaerrcode = cudaMalloc((void **)&devlutthin, 512 * sizeof (unsigned char)); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; unsigned char lutthin[] = { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0}; cudaerrcode = cudaMemcpy(devlutthin, lutthin, 512 * sizeof (unsigned char), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } int changeCount; cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) { return errcode; } ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { return errcode; } ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) { return errcode; } dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; changeCount = 1; while (changeCount > 0) { changeCount = 0; cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } _thinGpuPtIter1Ker<<<gridsize, blocksize>>>(outsubimgCud, tempsubimgCud, devlutthin, devchangecount, highPixel, lowPixel); if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } _thinGpuPtIter2Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devlutthin, devchangecount, lowPixel); if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } } cudaFree(devlutthin); cudaFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } static __global__ void _thinGpuPtFourIter1Ker(ImageCuda outimg, ImageCuda tempimg, int *devchangecount, unsigned char *devlutthin, unsigned char highpixel, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * tempimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos ; if (*outptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } for (int i = 0; i < 3; ++i) { if (++dstr >= tempimg.imgMeta.height - 1) return ; curpos += tempimg.pitchBytes; outptr = tempimg.imgMeta.imgData + curpos ; if (*outptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } } static __global__ void _thinGpuPtFourIter2Ker(ImageCuda tempimg, ImageCuda outimg, unsigned char *devlutthin, int *devchangecount, unsigned char lowpixel) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; if (dstc >= tempimg.imgMeta.width - 1 || dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) return; unsigned char *outptr; int curpos = dstr * outimg.pitchBytes + dstc; outptr = tempimg.imgMeta.imgData + curpos; if (*outptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index + 256]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } for (int i = 0; i < 3; ++i) { if (++dstr >= tempimg.imgMeta.height - 1) return ; curpos += tempimg.pitchBytes; outptr = tempimg.imgMeta.imgData + curpos ; if (*outptr != lowpixel) { int index = 0; int row1 = (dstr - 1) * tempimg.pitchBytes; int row2 = row1 + tempimg.pitchBytes; int row3 = row2 + tempimg.pitchBytes; unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; if (devlutthin[index + 256]) { outimg.imgMeta.imgData[curpos] = lowpixel; *devchangecount = 1; } } } } __host__ int Thinning::thinGpuPtFour (Image *inimg, Image *outimg) { int errcode; cudaError_t cudaerrcode; if (inimg == NULL || outimg == NULL) return NULL_POINTER; unsigned char *devlutthin = NULL; Image *tempimg = NULL; int *devchangecount = NULL; cudaerrcode = cudaMalloc((void **)&devlutthin, 512 * sizeof (unsigned char)); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; unsigned char lutthin[] = { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0}; cudaerrcode = cudaMemcpy(devlutthin, lutthin, 512 * sizeof (unsigned char), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } int changeCount; cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); if (errcode != NO_ERROR) { return errcode; } ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { return errcode; } ImageCuda tempsubimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); if (errcode != NO_ERROR) { return errcode; } dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / blocksize.y * 4; changeCount = 1; while (changeCount > 0) { changeCount = 0; cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } _thinGpuPtFourIter1Ker<<<gridsize, blocksize>>>(outsubimgCud, tempsubimgCud, devchangecount, devlutthin, highPixel, lowPixel); if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, outimg->imgData, outsubimgCud.deviceId, outsubimgCud.pitchBytes * outimg->height); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } _thinGpuPtFourIter2Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, devlutthin, devchangecount, lowPixel); if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { return CUDA_ERROR; } } cudaFree(devlutthin); cudaFree(devchangecount); ImageBasicOp::deleteImage(tempimg); return NO_ERROR; } // __constant__ unsigned char devlutthin[] = { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0}; // static __global__ void _thinGpuPtFourIter1Ker(ImageCuda outimg, // ImageCuda tempimg, // int *devchangecount, // unsigned char highpixel, // unsigned char lowpixel) // { // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = blockIdx.y * blockDim.y + threadIdx.y; // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // unsigned char *outptr; // int curpos = dstr * tempimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos ; // if (*outptr != lowpixel) { // int index = 0; // int row1 = (dstr - 1) * tempimg.pitchBytes; // int row2 = row1 + tempimg.pitchBytes; // int row3 = row2 + tempimg.pitchBytes; // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; // index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; // if (devlutthin[index]) { // outimg.imgMeta.imgData[curpos] = lowpixel; // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= tempimg.imgMeta.height - 1) // return ; // curpos += tempimg.pitchBytes; // outptr = tempimg.imgMeta.imgData + curpos ; // if (*outptr != lowpixel) { // int index = 0; // int row1 = (dstr - 1) * tempimg.pitchBytes; // int row2 = row1 + tempimg.pitchBytes; // int row3 = row2 + tempimg.pitchBytes; // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; // index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; // if (devlutthin[index]) { // outimg.imgMeta.imgData[curpos] = lowpixel; // *devchangecount = 1; // } // } // } // } // static __global__ void _thinGpuPtFourIter2Ker(ImageCuda tempimg, // ImageCuda outimg, // int *devchangecount, // unsigned char lowpixel) // { // int dstc = blockIdx.x * blockDim.x + threadIdx.x; // int dstr = blockIdx.y * blockDim.y + threadIdx.y; // if (dstc >= tempimg.imgMeta.width - 1 || // dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1) // return; // unsigned char *outptr; // int curpos = dstr * outimg.pitchBytes + dstc; // outptr = tempimg.imgMeta.imgData + curpos; // if (*outptr != lowpixel) { // int index = 0; // int row1 = (dstr - 1) * tempimg.pitchBytes; // int row2 = row1 + tempimg.pitchBytes; // int row3 = row2 + tempimg.pitchBytes; // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; // index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; // if (devlutthin[index + 256]) { // outimg.imgMeta.imgData[curpos] = lowpixel; // *devchangecount = 1; // } // } // for (int i = 0; i < 3; ++i) { // if (++dstr >= tempimg.imgMeta.height - 1) // return ; // curpos += tempimg.pitchBytes; // outptr = tempimg.imgMeta.imgData + curpos ; // if (*outptr != lowpixel) { // int index = 0; // int row1 = (dstr - 1) * tempimg.pitchBytes; // int row2 = row1 + tempimg.pitchBytes; // int row3 = row2 + tempimg.pitchBytes; // unsigned char p1 = tempimg.imgMeta.imgData[dstc-1 + row1] == 255; // unsigned char p2 = tempimg.imgMeta.imgData[dstc+ row1] == 255; // unsigned char p3 = tempimg.imgMeta.imgData[dstc+1 + row1] == 255; // unsigned char p4 = tempimg.imgMeta.imgData[dstc+1 + row2] == 255; // unsigned char p5 = tempimg.imgMeta.imgData[dstc+1 + row3] == 255; // unsigned char p6 = tempimg.imgMeta.imgData[dstc+ row3] == 255; // unsigned char p7 = tempimg.imgMeta.imgData[dstc-1 + row3] == 255; // unsigned char p8 = tempimg.imgMeta.imgData[dstc-1 + row2] == 255; // index = p1 * 1 + p2 * 2 + p3 * 4 + p4 * 8 + p5 * 16 + p6 * 32 + p7 * 64 + p8 * 128; // if (devlutthin[index + 256]) { // outimg.imgMeta.imgData[curpos] = lowpixel; // *devchangecount = 1; // } // } // } // } // __host__ int Thinning::thinGpuPtFour (Image *inimg, Image *outimg) // { // int errcode; // cudaError_t cudaerrcode; // if (inimg == NULL || outimg == NULL) // return NULL_POINTER; // unsigned char *devlutthin = NULL; // Image *tempimg = NULL; // int *devchangecount = NULL; // int changeCount; // cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // errcode = ImageBasicOp::newImage(&tempimg); // if (errcode != NO_ERROR) // return errcode; // errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, // inimg->height); // if (errcode != NO_ERROR) { // return errcode; // } // errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); // if (errcode != NO_ERROR) { // return errcode; // } // ImageCuda outsubimgCud; // errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); // if (errcode != NO_ERROR) { // return errcode; // } // ImageCuda tempsubimgCud; // errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); // if (errcode != NO_ERROR) { // return errcode; // } // dim3 gridsize, blocksize; // blocksize.x = DEF_BLOCK_X; // blocksize.y = DEF_BLOCK_Y; // gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; // gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / blocksize.y * 4; // changeCount = 1; // while (changeCount > 0) { // changeCount = 0; // cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), // cudaMemcpyHostToDevice); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // _thinGpuPtFourIter1Ker<<<gridsize, blocksize>>>(outsubimgCud, tempsubimgCud, devchangecount, // highPixel, // lowPixel); // if (cudaGetLastError() != cudaSuccess) { // return CUDA_ERROR; // } // cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, // outimg->imgData, outsubimgCud.deviceId, // outsubimgCud.pitchBytes * outimg->height); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // _thinGpuPtFourIter2Ker<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud, // devchangecount, // lowPixel); // if (cudaGetLastError() != cudaSuccess) { // return CUDA_ERROR; // } // cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), // cudaMemcpyDeviceToHost); // if (cudaerrcode != cudaSuccess) { // return CUDA_ERROR; // } // } // cudaFree(devlutthin); // cudaFree(devchangecount); // ImageBasicOp::deleteImage(tempimg); // return NO_ERROR; // }
6d999abc4ac61b15695efefc7cf565f2a6204b5b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void convolution(float *N , float *M, float *P , int Tile_Size, int Mask_Width , int Width) { int k = blockIdx.x * blockDim.x + threadIdx.x; int l = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float N_ds[7][7]; int n = Mask_Width/2; int halo_index_left = (blockIdx.x - 1) * blockDim.x + threadIdx.x; int halo_index_top = (blockIdx.y - 1) * blockDim.y + threadIdx.y; int halo_index_right = (blockIdx.x +1) * blockDim.x + threadIdx.x; int halo_index_bottom = (blockIdx.y + 1) * blockDim.y + threadIdx.y; N_ds[n + threadIdx.y][n + threadIdx.x]= N[(blockIdx.y * blockDim.y + threadIdx.y)*Width + (blockIdx.x * blockDim.x + threadIdx.x)]; if(threadIdx.x >= blockDim.x-n && threadIdx.y >= blockDim.y - n) { N_ds[threadIdx.y -(blockDim.y - n)][threadIdx.x -(blockDim.x - n)] = (halo_index_left < 0 || halo_index_top < 0)?0:N[halo_index_top* Width + halo_index_left]; N_ds[threadIdx.y -(blockDim.y - n)][threadIdx.x -(blockDim.x - n) + n] = (halo_index_top<0)?0:N[halo_index_top*Width + (blockDim.x*blockIdx.x + threadIdx.x)]; N_ds[threadIdx.y -(blockDim.y - n) + n][threadIdx.x -(blockDim.x - n)] = (halo_index_left<0)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_left]; } if(threadIdx.x < n && threadIdx.y >= blockDim.y - n){ N_ds[threadIdx.y -(blockDim.y - n)][n + blockDim.x + threadIdx.x] = (halo_index_right >= Width || halo_index_top < 0)?0:N[halo_index_top*Width + halo_index_right]; N_ds[threadIdx.y -(blockDim.y - n)][threadIdx.x -(blockDim.x - n) + n] = (halo_index_top<0)?0:N[halo_index_top*Width + (blockDim.x*blockIdx.x + threadIdx.x)]; N_ds[threadIdx.y -(blockDim.y - n) + n][n + blockDim.x + threadIdx.x] = (halo_index_right >= Width)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_right]; } if(threadIdx.y < n && threadIdx.x >= blockDim.x - n){ N_ds[n + blockDim.y + threadIdx.y][threadIdx.x -(blockDim.x - n)] = (halo_index_bottom >= Width || halo_index_left < 0)?0:N[halo_index_bottom*Width + halo_index_left]; N_ds[n + blockDim.y + threadIdx.y][threadIdx.x -(blockDim.x - n) + n] = (halo_index_bottom >= Width)?0:N[halo_index_bottom*Width + (blockDim.x*blockIdx.x + threadIdx.x)]; N_ds[threadIdx.y -(blockDim.y - n) + n][threadIdx.x -(blockDim.x - n)] = (halo_index_left < 0)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_left]; } if(threadIdx.x < n && threadIdx.y < n){ N_ds[n + blockDim.y + threadIdx.y][n + blockDim.x + threadIdx.x] = (halo_index_right >= Width || halo_index_bottom >= Width)?0:N[halo_index_bottom*Width + halo_index_right]; N_ds[n + blockDim.y + threadIdx.y][threadIdx.x -(blockDim.x - n) + n] = (halo_index_bottom >= Width)?0:N[halo_index_bottom*Width + (blockDim.x*blockIdx.x + threadIdx.x)]; N_ds[threadIdx.y -(blockDim.y - n) + n][n + blockDim.x + threadIdx.x] = (halo_index_right >=Width)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_right]; } if(threadIdx.y < n && threadIdx.x > n && threadIdx.x <= blockDim.x-n) N_ds[n + blockDim.y + threadIdx.y][n + threadIdx.x] = (halo_index_bottom >=Width)?0:N[(halo_index_bottom*Width) + (blockIdx.x * blockDim.x + threadIdx.x)]; if(threadIdx.x < n && threadIdx.y > n && threadIdx.y <= blockDim.y-n) N_ds[n + threadIdx.y][n + blockDim.x + threadIdx.x] = (halo_index_right >=Width)?0:N[(blockDim.y * blockIdx.y + threadIdx.y)*Width + (halo_index_right)]; if(threadIdx.y >= blockDim.y - n && threadIdx.x > n && threadIdx.x <= blockDim.x-n) N_ds[threadIdx.y -(blockDim.y - n)][n+threadIdx.x] = (halo_index_top < 0)?0:N[(halo_index_top*Width) + (blockDim.x*blockIdx.x + threadIdx.x)]; if(threadIdx.x >= blockDim.x - n && threadIdx.y > n && threadIdx.y <= blockDim.y-n) N_ds[n+threadIdx.y][threadIdx.x -(blockDim.x - n)] = (halo_index_left < 0)?0:N[(blockDim.y * blockIdx.y + threadIdx.y)*Width + halo_index_left]; __syncthreads(); float Pvalue = 0; for(int i =0; i < Mask_Width; i++){ for(int j =0; j < Mask_Width ; j++) { Pvalue += N_ds[threadIdx.y + i][threadIdx.x + j] * M[(i*Mask_Width) + j]; } } P[(l*Width) + k] = Pvalue; }
6d999abc4ac61b15695efefc7cf565f2a6204b5b.cu
__global__ void convolution(float *N , float *M, float *P , int Tile_Size, int Mask_Width , int Width) { int k = blockIdx.x * blockDim.x + threadIdx.x; int l = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float N_ds[7][7]; int n = Mask_Width/2; int halo_index_left = (blockIdx.x - 1) * blockDim.x + threadIdx.x; int halo_index_top = (blockIdx.y - 1) * blockDim.y + threadIdx.y; int halo_index_right = (blockIdx.x +1) * blockDim.x + threadIdx.x; int halo_index_bottom = (blockIdx.y + 1) * blockDim.y + threadIdx.y; N_ds[n + threadIdx.y][n + threadIdx.x]= N[(blockIdx.y * blockDim.y + threadIdx.y)*Width + (blockIdx.x * blockDim.x + threadIdx.x)]; if(threadIdx.x >= blockDim.x-n && threadIdx.y >= blockDim.y - n) { N_ds[threadIdx.y -(blockDim.y - n)][threadIdx.x -(blockDim.x - n)] = (halo_index_left < 0 || halo_index_top < 0)?0:N[halo_index_top* Width + halo_index_left]; N_ds[threadIdx.y -(blockDim.y - n)][threadIdx.x -(blockDim.x - n) + n] = (halo_index_top<0)?0:N[halo_index_top*Width + (blockDim.x*blockIdx.x + threadIdx.x)]; N_ds[threadIdx.y -(blockDim.y - n) + n][threadIdx.x -(blockDim.x - n)] = (halo_index_left<0)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_left]; } if(threadIdx.x < n && threadIdx.y >= blockDim.y - n){ N_ds[threadIdx.y -(blockDim.y - n)][n + blockDim.x + threadIdx.x] = (halo_index_right >= Width || halo_index_top < 0)?0:N[halo_index_top*Width + halo_index_right]; N_ds[threadIdx.y -(blockDim.y - n)][threadIdx.x -(blockDim.x - n) + n] = (halo_index_top<0)?0:N[halo_index_top*Width + (blockDim.x*blockIdx.x + threadIdx.x)]; N_ds[threadIdx.y -(blockDim.y - n) + n][n + blockDim.x + threadIdx.x] = (halo_index_right >= Width)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_right]; } if(threadIdx.y < n && threadIdx.x >= blockDim.x - n){ N_ds[n + blockDim.y + threadIdx.y][threadIdx.x -(blockDim.x - n)] = (halo_index_bottom >= Width || halo_index_left < 0)?0:N[halo_index_bottom*Width + halo_index_left]; N_ds[n + blockDim.y + threadIdx.y][threadIdx.x -(blockDim.x - n) + n] = (halo_index_bottom >= Width)?0:N[halo_index_bottom*Width + (blockDim.x*blockIdx.x + threadIdx.x)]; N_ds[threadIdx.y -(blockDim.y - n) + n][threadIdx.x -(blockDim.x - n)] = (halo_index_left < 0)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_left]; } if(threadIdx.x < n && threadIdx.y < n){ N_ds[n + blockDim.y + threadIdx.y][n + blockDim.x + threadIdx.x] = (halo_index_right >= Width || halo_index_bottom >= Width)?0:N[halo_index_bottom*Width + halo_index_right]; N_ds[n + blockDim.y + threadIdx.y][threadIdx.x -(blockDim.x - n) + n] = (halo_index_bottom >= Width)?0:N[halo_index_bottom*Width + (blockDim.x*blockIdx.x + threadIdx.x)]; N_ds[threadIdx.y -(blockDim.y - n) + n][n + blockDim.x + threadIdx.x] = (halo_index_right >=Width)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_right]; } if(threadIdx.y < n && threadIdx.x > n && threadIdx.x <= blockDim.x-n) N_ds[n + blockDim.y + threadIdx.y][n + threadIdx.x] = (halo_index_bottom >=Width)?0:N[(halo_index_bottom*Width) + (blockIdx.x * blockDim.x + threadIdx.x)]; if(threadIdx.x < n && threadIdx.y > n && threadIdx.y <= blockDim.y-n) N_ds[n + threadIdx.y][n + blockDim.x + threadIdx.x] = (halo_index_right >=Width)?0:N[(blockDim.y * blockIdx.y + threadIdx.y)*Width + (halo_index_right)]; if(threadIdx.y >= blockDim.y - n && threadIdx.x > n && threadIdx.x <= blockDim.x-n) N_ds[threadIdx.y -(blockDim.y - n)][n+threadIdx.x] = (halo_index_top < 0)?0:N[(halo_index_top*Width) + (blockDim.x*blockIdx.x + threadIdx.x)]; if(threadIdx.x >= blockDim.x - n && threadIdx.y > n && threadIdx.y <= blockDim.y-n) N_ds[n+threadIdx.y][threadIdx.x -(blockDim.x - n)] = (halo_index_left < 0)?0:N[(blockDim.y * blockIdx.y + threadIdx.y)*Width + halo_index_left]; __syncthreads(); float Pvalue = 0; for(int i =0; i < Mask_Width; i++){ for(int j =0; j < Mask_Width ; j++) { Pvalue += N_ds[threadIdx.y + i][threadIdx.x + j] * M[(i*Mask_Width) + j]; } } P[(l*Width) + k] = Pvalue; }
98c435b8ac08416a7246a9bb09d60d4e1d97e784.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "absolute_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" namespace nnforge { namespace cuda { __global__ void absolute_upd_kernel( const float4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = fabsf(val.x); val.y = fabsf(val.y); val.z = fabsf(val.z); val.w = fabsf(val.w); output[elem_id] = val; } } __global__ void absolute_deriviative_upd_kernel( float4 * __restrict input_errors, const float4 * __restrict output_errors, const float4 * __restrict input_neurons, bool add_update_to_destination, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 inp = input_neurons[elem_id]; float4 current_error = output_errors[elem_id]; if (inp.x < 0.0F) current_error.x = -current_error.x; if (inp.y < 0.0F) current_error.y = -current_error.y; if (inp.z < 0.0F) current_error.z = -current_error.z; if (inp.w < 0.0F) current_error.w = -current_error.w; float4 current_dst; if (add_update_to_destination) { current_dst = input_errors[elem_id]; current_error.x += current_dst.x; current_error.y += current_dst.y; current_error.z += current_dst.z; current_error.w += current_dst.w; } input_errors[elem_id] = current_error; } } void absolute_layer_updater_cuda::enqueue_forward_propagation( hipStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_fixed_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { int elem_count = (output_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( absolute_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_buffers[0], *output_buffer, elem_count); } void absolute_layer_updater_cuda::enqueue_backward_data_propagation( hipStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_fixed_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { int elem_count = (output_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( absolute_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *output_errors_buffer, *input_neurons_buffers[0], add_update_to_destination, elem_count); } int absolute_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const { return 0; } bool absolute_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const { return true; } bool absolute_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } } }
98c435b8ac08416a7246a9bb09d60d4e1d97e784.cu
/* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "absolute_layer_updater_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" namespace nnforge { namespace cuda { __global__ void absolute_upd_kernel( const float4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = fabsf(val.x); val.y = fabsf(val.y); val.z = fabsf(val.z); val.w = fabsf(val.w); output[elem_id] = val; } } __global__ void absolute_deriviative_upd_kernel( float4 * __restrict input_errors, const float4 * __restrict output_errors, const float4 * __restrict input_neurons, bool add_update_to_destination, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 inp = input_neurons[elem_id]; float4 current_error = output_errors[elem_id]; if (inp.x < 0.0F) current_error.x = -current_error.x; if (inp.y < 0.0F) current_error.y = -current_error.y; if (inp.z < 0.0F) current_error.z = -current_error.z; if (inp.w < 0.0F) current_error.w = -current_error.w; float4 current_dst; if (add_update_to_destination) { current_dst = input_errors[elem_id]; current_error.x += current_dst.x; current_error.y += current_dst.y; current_error.z += current_dst.z; current_error.w += current_dst.w; } input_errors[elem_id] = current_error; } } void absolute_layer_updater_cuda::enqueue_forward_propagation( cudaStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_fixed_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { int elem_count = (output_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); absolute_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_buffers[0], *output_buffer, elem_count); } void absolute_layer_updater_cuda::enqueue_backward_data_propagation( cudaStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_fixed_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { int elem_count = (output_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); absolute_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *output_errors_buffer, *input_neurons_buffers[0], add_update_to_destination, elem_count); } int absolute_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const { return 0; } bool absolute_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const { return true; } bool absolute_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } } }
b2464233c2117fae997a42de4b656cc46dc1fcb6.hip
// !!! This is a file automatically generated by hipify!!! /** * Derived from the nVIDIA CUDA 8.0 samples by * * Eyal Rozenberg <E.Rozenberg@cwi.nl> * * The derivation is specifically permitted in the nVIDIA CUDA Samples EULA * and the deriver is the owner of this code according to the EULA. * * Use this reasonably. If you want to discuss licensing formalities, please * contact the deriving author. * * * This sample illustrates the usage of CUDA streams for overlapping * kernel execution with device/host memcopies. The kernel is used to * initialize an array to a specific value, after which the array is * copied to the host (CPU) memory. To increase performance, multiple * kernel/memcopy pairs are launched asynchronously, each pair in its * own stream. Devices with Compute Capability 1.1 can overlap a kernel * and a memcopy as long as they are issued in different streams. Kernels * are serialized. Thus, if n pairs are launched, streamed approach * can reduce the memcopy cost to the (1/n)th of a single copy of the entire * data set. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif const char *sSDKsample = "simpleStreams"; const char *sEventSyncMethod[] = { "hipEventDefault", "hipEventBlockingSync", "hipEventDisableTiming", NULL }; const char *sDeviceSyncMethod[] = { "hipDeviceScheduleAuto", "hipDeviceScheduleSpin", "hipDeviceScheduleYield", "INVALID", "hipDeviceScheduleBlockingSync", NULL }; // System includes // CUDA runtime #include "hip/hip_runtime.h" // helper functions and utilities to work with CUDA #include "../helper_cuda.h" #include <cuda/api_wrappers.hpp> #ifndef WIN32 #include <sys/mman.h> // for mmap() / munmap() #endif #include <cstdlib> #include <fstream> #include <vector> #include <iostream> #include <algorithm> // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) ) __global__ void init_array(int *g_data, int *factor, int num_iterations) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i=0; i<num_iterations; i++) { g_data[idx] += *factor; // non-coalesced on purpose, to burn time } } bool correct_data(int *a, const int n, const int c) { for (int i = 0; i < n; i++) { if (a[i] != c) { std::cout << i << ": " << a[i] << " " << c << "\n"; return false; } } return true; } static const char *sSyncMethod[] = { "0 (Automatic Blocking)", "1 (Spin Blocking)", "2 (Yield Blocking)", "3 (Undefined Blocking Method)", "4 (Blocking Sync Event) = low CPU utilization", NULL }; void printHelp() { std::cout << "Usage: " << sSDKsample << " [options below]\n" << "\t--sync_method=n for CPU/GPU synchronization\n" << "\t n=" << sSyncMethod[0] << "\n" << "\t n=" << sSyncMethod[1] << "\n" << "\t n=" << sSyncMethod[2] << "\n" << "\t <Default> n=" << sSyncMethod[4] << "\n" << "\t--use_generic_memory (default) use generic page-aligned for system memory\n" << "\t--use_cuda_malloc_host (optional) use hipHostMalloc to allocate system memory\n"; } int main(int argc, char **argv) { int cuda_device_id = 0; int nstreams = 4; // number of streams for CUDA calls int nreps = 10; // number of times each experiment is repeated int n = 16 * 1024 * 1024; // number of ints in the data set int nbytes = n * sizeof(int); // number of data bytes dim3 threads, blocks; // kernel launch configuration float scale_factor = 1.0f; // allocate generic memory and pin it laster instead of using hipHostMalloc() int device_sync_method = hipDeviceScheduleBlockingSync; // by default we use BlockingSync int niterations; // number of iterations for the loop inside the kernel if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printHelp(); return EXIT_SUCCESS; } if ((device_sync_method = getCmdLineArgumentInt(argc, (const char **)argv, "sync_method")) >= 0) { if (device_sync_method == 0 || device_sync_method == 1 || device_sync_method == 2 || device_sync_method == 4) { std::cout << "Device synchronization method set to = " << sSyncMethod[device_sync_method] << "\n"; std::cout << "Setting reps to 100 to demonstrate steady state\n"; nreps = 100; } else { std::cout << "Invalid command line option sync_method=\"" << device_sync_method << "\"\n"; return EXIT_FAILURE; } } else { printHelp(); return EXIT_SUCCESS; } if (checkCmdLineFlag(argc, (const char **)argv, "use_cuda_malloc_host")) { std::cout << "To simplify this example, support for using cuda_malloc_host instead of " << "pinned memory has been dropped.\n"; return EXIT_FAILURE; } std::cout << "\n> "; cuda_device_id = findCudaDevice(argc, (const char **)argv); // check the compute capability of the device auto num_devices = cuda::device::count(); if ( 0 == num_devices) { std::cerr << "your system does not have a CUDA capable device, waiving test...\n"; return EXIT_WAIVED; } // check if the command-line chosen device ID is within range, exit if not if (cuda_device_id >= num_devices) { std::cout << "cuda_device=" << cuda_device_id << " is invalid, " << "must choose device ID between 0 and " << num_devices-1 << "\n"; return EXIT_FAILURE; } cuda::device::current::set(cuda_device_id); auto current_device = cuda::device::current::get(); // Checking for compute capabilities auto properties = current_device.properties(); auto compute_capability = properties.compute_capability(); if (compute_capability < cuda::device::compute_capability_t({1, 1}) ) { std::cout << properties.name << " does not have Compute Capability 1.1 or newer. Reducing workload.\n"; } if (compute_capability.major() >= 2) { niterations = 5; } else { if (compute_capability.minor() > 1) { niterations = 5; } else { niterations = 1; // reduced workload for compute capability 1.0 and 1.1 } } // Check if GPU can map host memory (Generic Method), if not then we override bPinGenericMemory to be false std::cout << "Device: <" << properties.name << "> canMapHostMemory: " << (properties.canMapHostMemory ? "Yes" : "No") << "\n"; if (not properties.can_map_host_memory()) { std::cout << "Cannot allocate pinned memory (and map GPU device memory to it); aborting.\n"; return EXIT_FAILURE; } // Anything that is less than 32 Cores will have scaled down workload auto faux_cores_per_sm = compute_capability.max_in_flight_threads_per_processor(); auto faux_cores_overall = properties.max_in_flight_threads_on_device(); scale_factor = max((32.0f / faux_cores_overall), 1.0f); n = (int)rint((float)n / scale_factor); std::cout << "> CUDA Capable: SM " << compute_capability.major() << "." << compute_capability.minor() << " hardware\n"; std::cout << "> " << properties.multiProcessorCount << " Multiprocessor(s)" << " x " << faux_cores_per_sm << " (Cores/Multiprocessor) = " << faux_cores_overall << " (Cores)\n"; std::cout << "> scale_factor = " << 1.0f/scale_factor << "\n"; std::cout << "> array_size = " << n << "\n\n"; // enable use of blocking sync, to reduce CPU usage std::cout << "> Using CPU/GPU Device Synchronization method " << sDeviceSyncMethod[device_sync_method] << "\n"; cuda::host_thread_synch_scheduling_policy_t policy; switch(device_sync_method) { case 0: policy = cuda::heuristic; break; case 1: policy = cuda::spin; break; case 2: policy = cuda::yield; break; case 4: policy = cuda::block; break; default: // should not be able to get here exit(EXIT_FAILURE); } current_device.set_synch_scheduling_policy(policy); current_device.enable_mapping_host_memory(); // allocate host memory int c = 5; // value to which the array will be initialized // Allocate Host memory auto h_a = cuda::memory::host::make_unique<int[]>(n); // allocate device memory // pointers to data and init value in the device memory auto d_a = cuda::memory::device::make_unique<int[]>(cuda_device_id, n); auto d_c = cuda::memory::device::make_unique<int>(cuda_device_id); cuda::memory::copy_single(*d_c.get(), c); std::cout << "\nStarting Test\n"; // allocate and initialize an array of stream handles std::vector<cuda::stream_t<>> streams; std::generate_n( std::back_inserter(streams), nstreams, [&current_device]() { // Note: we could omit the specific requirement of synchronization // with the default stream, since that's the CUDA default - but I // think it's important to state that's the case return current_device.create_stream( cuda::stream::implicitly_synchronizes_with_default_stream); } ); // create CUDA event handles // use blocking sync auto use_blocking_sync = (device_sync_method == hipDeviceScheduleBlockingSync); auto start_event = cuda::event::create(current_device, use_blocking_sync); auto stop_event = cuda::event::create(current_device, use_blocking_sync); // time memcopy from device start_event.record(cuda::stream::default_stream_id); // record in stream-0, to ensure that all previous CUDA calls have completed cuda::memory::async::copy(h_a.get(), d_a.get(), nbytes, streams[0].id()); stop_event.record(cuda::stream::default_stream_id); // record in stream-0, to ensure that all previous CUDA calls have completed stop_event.synchronize(); // block until the event is actually recorded auto time_memcpy = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "memcopy:\t" << time_memcpy.count() << "\n"; // time kernel threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(cuda::stream::default_stream_id); hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[0].id(), d_a.get(), d_c.get(), niterations); stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); auto time_kernel = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "kernel:\t\t" << time_kernel.count() << "\n"; ////////////////////////////////////////////////////////////////////// // time non-streamed execution for reference threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(cuda::stream::default_stream_id); for (int k = 0; k < nreps; k++) { hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, 0, d_a.get(), d_c.get(), niterations); cuda::memory::copy(h_a.get(), d_a.get(), nbytes); } stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); auto elapsed_time = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "non-streamed:\t" << elapsed_time.count() / nreps << "\n"; ////////////////////////////////////////////////////////////////////// // time execution with nstreams streams threads=dim3(512,1); blocks=dim3(n/(nstreams*threads.x),1); memset(h_a.get(), 255, nbytes); // set host memory bits to all 1s, for testing correctness cuda::memory::device::zero(d_a.get(), nbytes); // set device memory to all 0s, for testing correctness start_event.record(cuda::stream::default_stream_id); for (int k = 0; k < nreps; k++) { // asynchronously launch nstreams kernels, each operating on its own portion of data for (int i = 0; i < nstreams; i++) { hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[i].id(), d_a.get() + i *n / nstreams, d_c.get(), niterations); } // asynchronously launch nstreams memcopies. Note that memcopy in stream x will only // commence executing when all previous CUDA calls in stream x have completed for (int i = 0; i < nstreams; i++) { cuda::memory::async::copy( h_a.get() + i * n / nstreams, d_a.get() + i * n / nstreams, nbytes / nstreams, streams[i].id()); } } stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); elapsed_time = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << nstreams <<" streams:\t" << elapsed_time.count() / nreps << "\n"; // check whether the output is correct std::cout << "-------------------------------\n"; bool bResults = correct_data(h_a.get(), n, c*nreps*niterations); std::cout << (bResults ? "SUCCESS" : "FAILURE") << "\n"; return bResults ? EXIT_SUCCESS : EXIT_FAILURE; }
b2464233c2117fae997a42de4b656cc46dc1fcb6.cu
/** * Derived from the nVIDIA CUDA 8.0 samples by * * Eyal Rozenberg <E.Rozenberg@cwi.nl> * * The derivation is specifically permitted in the nVIDIA CUDA Samples EULA * and the deriver is the owner of this code according to the EULA. * * Use this reasonably. If you want to discuss licensing formalities, please * contact the deriving author. * * * This sample illustrates the usage of CUDA streams for overlapping * kernel execution with device/host memcopies. The kernel is used to * initialize an array to a specific value, after which the array is * copied to the host (CPU) memory. To increase performance, multiple * kernel/memcopy pairs are launched asynchronously, each pair in its * own stream. Devices with Compute Capability 1.1 can overlap a kernel * and a memcopy as long as they are issued in different streams. Kernels * are serialized. Thus, if n pairs are launched, streamed approach * can reduce the memcopy cost to the (1/n)th of a single copy of the entire * data set. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif const char *sSDKsample = "simpleStreams"; const char *sEventSyncMethod[] = { "cudaEventDefault", "cudaEventBlockingSync", "cudaEventDisableTiming", NULL }; const char *sDeviceSyncMethod[] = { "cudaDeviceScheduleAuto", "cudaDeviceScheduleSpin", "cudaDeviceScheduleYield", "INVALID", "cudaDeviceScheduleBlockingSync", NULL }; // System includes // CUDA runtime #include "cuda_runtime.h" // helper functions and utilities to work with CUDA #include "../helper_cuda.h" #include <cuda/api_wrappers.hpp> #ifndef WIN32 #include <sys/mman.h> // for mmap() / munmap() #endif #include <cstdlib> #include <fstream> #include <vector> #include <iostream> #include <algorithm> // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) ) __global__ void init_array(int *g_data, int *factor, int num_iterations) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i=0; i<num_iterations; i++) { g_data[idx] += *factor; // non-coalesced on purpose, to burn time } } bool correct_data(int *a, const int n, const int c) { for (int i = 0; i < n; i++) { if (a[i] != c) { std::cout << i << ": " << a[i] << " " << c << "\n"; return false; } } return true; } static const char *sSyncMethod[] = { "0 (Automatic Blocking)", "1 (Spin Blocking)", "2 (Yield Blocking)", "3 (Undefined Blocking Method)", "4 (Blocking Sync Event) = low CPU utilization", NULL }; void printHelp() { std::cout << "Usage: " << sSDKsample << " [options below]\n" << "\t--sync_method=n for CPU/GPU synchronization\n" << "\t n=" << sSyncMethod[0] << "\n" << "\t n=" << sSyncMethod[1] << "\n" << "\t n=" << sSyncMethod[2] << "\n" << "\t <Default> n=" << sSyncMethod[4] << "\n" << "\t--use_generic_memory (default) use generic page-aligned for system memory\n" << "\t--use_cuda_malloc_host (optional) use cudaMallocHost to allocate system memory\n"; } int main(int argc, char **argv) { int cuda_device_id = 0; int nstreams = 4; // number of streams for CUDA calls int nreps = 10; // number of times each experiment is repeated int n = 16 * 1024 * 1024; // number of ints in the data set int nbytes = n * sizeof(int); // number of data bytes dim3 threads, blocks; // kernel launch configuration float scale_factor = 1.0f; // allocate generic memory and pin it laster instead of using cudaHostAlloc() int device_sync_method = cudaDeviceBlockingSync; // by default we use BlockingSync int niterations; // number of iterations for the loop inside the kernel if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printHelp(); return EXIT_SUCCESS; } if ((device_sync_method = getCmdLineArgumentInt(argc, (const char **)argv, "sync_method")) >= 0) { if (device_sync_method == 0 || device_sync_method == 1 || device_sync_method == 2 || device_sync_method == 4) { std::cout << "Device synchronization method set to = " << sSyncMethod[device_sync_method] << "\n"; std::cout << "Setting reps to 100 to demonstrate steady state\n"; nreps = 100; } else { std::cout << "Invalid command line option sync_method=\"" << device_sync_method << "\"\n"; return EXIT_FAILURE; } } else { printHelp(); return EXIT_SUCCESS; } if (checkCmdLineFlag(argc, (const char **)argv, "use_cuda_malloc_host")) { std::cout << "To simplify this example, support for using cuda_malloc_host instead of " << "pinned memory has been dropped.\n"; return EXIT_FAILURE; } std::cout << "\n> "; cuda_device_id = findCudaDevice(argc, (const char **)argv); // check the compute capability of the device auto num_devices = cuda::device::count(); if ( 0 == num_devices) { std::cerr << "your system does not have a CUDA capable device, waiving test...\n"; return EXIT_WAIVED; } // check if the command-line chosen device ID is within range, exit if not if (cuda_device_id >= num_devices) { std::cout << "cuda_device=" << cuda_device_id << " is invalid, " << "must choose device ID between 0 and " << num_devices-1 << "\n"; return EXIT_FAILURE; } cuda::device::current::set(cuda_device_id); auto current_device = cuda::device::current::get(); // Checking for compute capabilities auto properties = current_device.properties(); auto compute_capability = properties.compute_capability(); if (compute_capability < cuda::device::compute_capability_t({1, 1}) ) { std::cout << properties.name << " does not have Compute Capability 1.1 or newer. Reducing workload.\n"; } if (compute_capability.major() >= 2) { niterations = 5; } else { if (compute_capability.minor() > 1) { niterations = 5; } else { niterations = 1; // reduced workload for compute capability 1.0 and 1.1 } } // Check if GPU can map host memory (Generic Method), if not then we override bPinGenericMemory to be false std::cout << "Device: <" << properties.name << "> canMapHostMemory: " << (properties.canMapHostMemory ? "Yes" : "No") << "\n"; if (not properties.can_map_host_memory()) { std::cout << "Cannot allocate pinned memory (and map GPU device memory to it); aborting.\n"; return EXIT_FAILURE; } // Anything that is less than 32 Cores will have scaled down workload auto faux_cores_per_sm = compute_capability.max_in_flight_threads_per_processor(); auto faux_cores_overall = properties.max_in_flight_threads_on_device(); scale_factor = max((32.0f / faux_cores_overall), 1.0f); n = (int)rint((float)n / scale_factor); std::cout << "> CUDA Capable: SM " << compute_capability.major() << "." << compute_capability.minor() << " hardware\n"; std::cout << "> " << properties.multiProcessorCount << " Multiprocessor(s)" << " x " << faux_cores_per_sm << " (Cores/Multiprocessor) = " << faux_cores_overall << " (Cores)\n"; std::cout << "> scale_factor = " << 1.0f/scale_factor << "\n"; std::cout << "> array_size = " << n << "\n\n"; // enable use of blocking sync, to reduce CPU usage std::cout << "> Using CPU/GPU Device Synchronization method " << sDeviceSyncMethod[device_sync_method] << "\n"; cuda::host_thread_synch_scheduling_policy_t policy; switch(device_sync_method) { case 0: policy = cuda::heuristic; break; case 1: policy = cuda::spin; break; case 2: policy = cuda::yield; break; case 4: policy = cuda::block; break; default: // should not be able to get here exit(EXIT_FAILURE); } current_device.set_synch_scheduling_policy(policy); current_device.enable_mapping_host_memory(); // allocate host memory int c = 5; // value to which the array will be initialized // Allocate Host memory auto h_a = cuda::memory::host::make_unique<int[]>(n); // allocate device memory // pointers to data and init value in the device memory auto d_a = cuda::memory::device::make_unique<int[]>(cuda_device_id, n); auto d_c = cuda::memory::device::make_unique<int>(cuda_device_id); cuda::memory::copy_single(*d_c.get(), c); std::cout << "\nStarting Test\n"; // allocate and initialize an array of stream handles std::vector<cuda::stream_t<>> streams; std::generate_n( std::back_inserter(streams), nstreams, [&current_device]() { // Note: we could omit the specific requirement of synchronization // with the default stream, since that's the CUDA default - but I // think it's important to state that's the case return current_device.create_stream( cuda::stream::implicitly_synchronizes_with_default_stream); } ); // create CUDA event handles // use blocking sync auto use_blocking_sync = (device_sync_method == cudaDeviceBlockingSync); auto start_event = cuda::event::create(current_device, use_blocking_sync); auto stop_event = cuda::event::create(current_device, use_blocking_sync); // time memcopy from device start_event.record(cuda::stream::default_stream_id); // record in stream-0, to ensure that all previous CUDA calls have completed cuda::memory::async::copy(h_a.get(), d_a.get(), nbytes, streams[0].id()); stop_event.record(cuda::stream::default_stream_id); // record in stream-0, to ensure that all previous CUDA calls have completed stop_event.synchronize(); // block until the event is actually recorded auto time_memcpy = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "memcopy:\t" << time_memcpy.count() << "\n"; // time kernel threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(cuda::stream::default_stream_id); init_array<<<blocks, threads, 0, streams[0].id()>>>(d_a.get(), d_c.get(), niterations); stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); auto time_kernel = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "kernel:\t\t" << time_kernel.count() << "\n"; ////////////////////////////////////////////////////////////////////// // time non-streamed execution for reference threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(cuda::stream::default_stream_id); for (int k = 0; k < nreps; k++) { init_array<<<blocks, threads>>>(d_a.get(), d_c.get(), niterations); cuda::memory::copy(h_a.get(), d_a.get(), nbytes); } stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); auto elapsed_time = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "non-streamed:\t" << elapsed_time.count() / nreps << "\n"; ////////////////////////////////////////////////////////////////////// // time execution with nstreams streams threads=dim3(512,1); blocks=dim3(n/(nstreams*threads.x),1); memset(h_a.get(), 255, nbytes); // set host memory bits to all 1s, for testing correctness cuda::memory::device::zero(d_a.get(), nbytes); // set device memory to all 0s, for testing correctness start_event.record(cuda::stream::default_stream_id); for (int k = 0; k < nreps; k++) { // asynchronously launch nstreams kernels, each operating on its own portion of data for (int i = 0; i < nstreams; i++) { init_array<<<blocks, threads, 0, streams[i].id()>>>(d_a.get() + i *n / nstreams, d_c.get(), niterations); } // asynchronously launch nstreams memcopies. Note that memcopy in stream x will only // commence executing when all previous CUDA calls in stream x have completed for (int i = 0; i < nstreams; i++) { cuda::memory::async::copy( h_a.get() + i * n / nstreams, d_a.get() + i * n / nstreams, nbytes / nstreams, streams[i].id()); } } stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); elapsed_time = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << nstreams <<" streams:\t" << elapsed_time.count() / nreps << "\n"; // check whether the output is correct std::cout << "-------------------------------\n"; bool bResults = correct_data(h_a.get(), n, c*nreps*niterations); std::cout << (bResults ? "SUCCESS" : "FAILURE") << "\n"; return bResults ? EXIT_SUCCESS : EXIT_FAILURE; }
4152dc2c72d114cafd4585cb55414062613957da.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void checkIndex(void){ printf("ThreadIdx: (%d,%d,%d) BlockIdx: (%d,%d,%d) BockDim: (%d,%d,%d) gridDim: (%d,%d,%d)\n", \ threadIdx.x,threadIdx.y,threadIdx.z, blockIdx.x,blockIdx.y,blockIdx.z, blockDim.x,blockDim.y,blockDim.z, gridDim.x,gridDim.y,gridDim.z); } int main(int argc, char** argv){ int nElem = 6; dim3 block(3); dim3 grid((nElem + block.x) / block.x); // round it to the correct size printf("no. elements: %d\n", nElem); printf("no. blocks: (%d,%d,%d)\n", grid.x, grid.y, grid.z); printf("no. threads: (%d,%d,%d)\n", block.x, block.y, block.z); hipLaunchKernelGGL(( checkIndex) , dim3(grid), dim3(block) , 0, 0, ); hipDeviceReset(); }
4152dc2c72d114cafd4585cb55414062613957da.cu
#include <stdio.h> #include <cuda_runtime.h> __global__ void checkIndex(void){ printf("ThreadIdx: (%d,%d,%d) BlockIdx: (%d,%d,%d) BockDim: (%d,%d,%d) gridDim: (%d,%d,%d)\n", \ threadIdx.x,threadIdx.y,threadIdx.z, blockIdx.x,blockIdx.y,blockIdx.z, blockDim.x,blockDim.y,blockDim.z, gridDim.x,gridDim.y,gridDim.z); } int main(int argc, char** argv){ int nElem = 6; dim3 block(3); dim3 grid((nElem + block.x) / block.x); // round it to the correct size printf("no. elements: %d\n", nElem); printf("no. blocks: (%d,%d,%d)\n", grid.x, grid.y, grid.z); printf("no. threads: (%d,%d,%d)\n", block.x, block.y, block.z); checkIndex <<< grid, block >>>(); cudaDeviceReset(); }
89a36a8c7541de50e8bcafa7ea9a955ffd95fe82.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #define BUFFSIZE 4096 #define WINDOW_LENGTH 25 #define AXIS 3 #define Y_INDEX 1 #define Z_INDEX 2 /* Does all the processing on the CUDA device. Was intentionally NOT broken up into multiple functions for performance reasons, however is pretty well commented */ __global__ void cudaMagic( int* mag, int* in, int* means, float* sd, int* max, int* min, int* x, int* y, int* z, int* xcoords, int* ycoords, int* zcoords, int numOfLines, int length ) { int i = blockDim.x * blockIdx.x + threadIdx.x; int count, tempCount, avg = 0, sdx = 0, sdy = 0, sdz = 0; int sumx = 0, sumy = 0, sumz = 0, absumx = 0, absumy = 0, absumz = 0; int xmax = 0, ymax = 0, zmax =0, xmin = 0, ymin = 0, zmin = 0; /*makes a flat arrays of all windows */ if( i >= WINDOW_LENGTH && i <= numOfLines ){ for( count = i - WINDOW_LENGTH, tempCount = 0 ; count < i ; count++, tempCount++ ){ x[(i - WINDOW_LENGTH) * WINDOW_LENGTH + tempCount] = xcoords[count]; y[(i - WINDOW_LENGTH) * WINDOW_LENGTH + tempCount] = ycoords[count]; z[(i - WINDOW_LENGTH) * WINDOW_LENGTH + tempCount] = zcoords[count]; } } __syncthreads(); if( i < length ){ /* Initialize the max and min values to the first value */ xmax = x[i*WINDOW_LENGTH]; ymax = y[i*WINDOW_LENGTH]; zmax = z[i*WINDOW_LENGTH]; xmin = x[i*WINDOW_LENGTH]; ymin = y[i*WINDOW_LENGTH]; zmin = z[i*WINDOW_LENGTH]; for( count = i ; count < i + WINDOW_LENGTH ; count++ ){ /* Calculates the sum of the absolute values for the window */ absumx += fabsf(x[i * WINDOW_LENGTH + count]); absumy += fabsf(y[i * WINDOW_LENGTH + count]); absumz += fabsf(z[i * WINDOW_LENGTH + count]); /* Calculates the sums for the window */ sumx += x[i * WINDOW_LENGTH + count]; sumy += y[i * WINDOW_LENGTH + count]; sumz += z[i * WINDOW_LENGTH + count]; /* Calculates the average of the entire window */ avg += (x[i * WINDOW_LENGTH + count] + y[i * WINDOW_LENGTH + count] + z[i * WINDOW_LENGTH + count] ); /* Obtains the max coordinates for the window */ xmax = fmaxf( x[i * WINDOW_LENGTH + count], xmax ); ymax = fmaxf( y[i * WINDOW_LENGTH + count], ymax ); zmax = fmaxf( z[i * WINDOW_LENGTH + count], zmax ); /* Obtains the min coordinates for the window */ xmin = fminf( x[i * WINDOW_LENGTH + count], xmin ); ymin = fminf( y[i * WINDOW_LENGTH + count], ymin ); zmin = fminf( z[i * WINDOW_LENGTH + count], zmin ); } __syncthreads(); /* Extra loop to calculate standard deviation because it relies on results of sumx, sumy, and sumz */ for( count = 0 ; count < WINDOW_LENGTH ; count++ ){ sdx += powf( (x[i * WINDOW_LENGTH + count] - (sumx/WINDOW_LENGTH)), 2 ); sdy += powf( (y[i * WINDOW_LENGTH + count] - (sumy/WINDOW_LENGTH)), 2 ); sdz += powf( (z[i * WINDOW_LENGTH + count] - (sumz/WINDOW_LENGTH)), 2 ); } /* Writes all the results to their appropriate arrays */ mag[i] = (absumx + absumy + absumz) / WINDOW_LENGTH; in[i] = avg / WINDOW_LENGTH; means[i] = sumx / WINDOW_LENGTH; means[Y_INDEX * length + i] = sumy / WINDOW_LENGTH; means[Z_INDEX * length + i] = sumz / WINDOW_LENGTH; sd[i] = sqrtf( sdx ); sd[Y_INDEX * length + i] = sqrtf( sdy ); sd[Z_INDEX * length + i] = sqrtf( sdz ); max[i] = xmax; max[Y_INDEX * length + i] = ymax; max[Z_INDEX * length + i] = zmax; min[i] = xmin; min[Y_INDEX * length + i] = ymin; min[Z_INDEX * length + i] = zmin; } } /* Allocates device arrays with error checking (INT) */ __host__ void allocate_dev( int** array, const int size ) { hipError_t err = hipSuccess; err = hipMalloc((void **)array, size ); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate array on device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } /* Allocates device arrays with error checking (FLOAT */ __host__ void allocate_devf( float** array, const int size ) { hipError_t err = hipSuccess; err = hipMalloc((void **)array, size ); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate array on device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } /* Allocates the local arrays to copy the result arrays from the device to */ __host__ int allocateLocal( int** mag, int** in, int** means, float** sd, int** max, int** min, const int length ) { /* Allocate local result array (mag) */ (*mag) = (int*)malloc( length * sizeof( int ) ); (*in) = (int*)malloc( length * sizeof( int ) ); (*means) = (int*)malloc( length * AXIS * sizeof( int ) ); (*sd) = (float*)malloc( length * AXIS * sizeof( float ) ); (*max) = (int*)malloc( length * AXIS * sizeof( int ) ); (*min) = (int*)malloc( length * AXIS * sizeof( int ) ); if( *mag == NULL || *in == NULL || *means == NULL || *sd == NULL || *max == NULL || *min == NULL ){ fprintf( stderr, "Malloc of local array failed!\n" ); exit( EXIT_FAILURE ); } return 0; } /* Returns the number of lines there are in the file pointed to by the argument fp */ __host__ int getLineCount( FILE** const fp ) { int count = 0; char c; /* Get the number of lines in the file */ for( c = getc( *fp ) ; c != EOF ; c = getc( *fp ) ){ if( c == '\n' ){ count++; } } rewind( *fp ); return count; } /*populates the local arrays with the data */ __host__ int readData( int** x, int** y, int** z, int* numOfLines ) { char* token; char line[BUFFSIZE]; int count; const char del[2] = ","; FILE* input; /* Open file for reading */ if( ( input = fopen( "sheep_imu_data.csv", "r" ) ) == NULL ){ fprintf( stderr, "Failed to open file!" ); return 1; } *numOfLines = getLineCount( &input ); *x = (int*)malloc( (*numOfLines) * sizeof(int) ); *y = (int*)malloc( (*numOfLines) * sizeof(int) ); *z = (int*)malloc( (*numOfLines) * sizeof(int) ); if( *x == NULL || *y == NULL || *z == NULL ){ fprintf( stderr, "Malloc of local array failed!\n" ); exit( EXIT_FAILURE ); } count = 0; while( fgets( line, BUFFSIZE, input ) ){ token = strtok( line, del ); (*x)[count] = atoi( token ); token = strtok( NULL, del ); (*y)[count] = atoi( token ); token = strtok( NULL, del ); (*z)[count] = atoi( token ); count++; } fclose( input ); return 0; } /* Writes arrays to a csv file */ __host__ void writeCSV( const char* const filename, int** mag, int** intensity, int** means, float** sd, int** max, int** min, const int length ) { FILE* fp; unsigned int count; if( ( fp = fopen( filename, "w+" ) ) == NULL ){ fprintf( stderr, "Failed to open or create new file!" ); } for( count = 0 ; count < length ; count++ ){ fprintf( fp, "%d, %d, %d, %d, %d, %f, %f, %f, %d, %d, %d, %d, %d, %d\n", (*mag)[count], (*intensity)[count], (*means)[count], (*means)[Y_INDEX*length+count], (*means)[Z_INDEX*length+count], (*sd)[count], (*sd)[Y_INDEX*length+count], (*sd)[Z_INDEX*length+count], (*max)[count], (*max)[Y_INDEX*length+count], (*max)[Z_INDEX*length+count], (*min)[count], (*min)[Y_INDEX*length+count], (*min)[Z_INDEX*length+count] ); } fclose( fp ); }
89a36a8c7541de50e8bcafa7ea9a955ffd95fe82.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #define BUFFSIZE 4096 #define WINDOW_LENGTH 25 #define AXIS 3 #define Y_INDEX 1 #define Z_INDEX 2 /* Does all the processing on the CUDA device. Was intentionally NOT broken up into multiple functions for performance reasons, however is pretty well commented */ __global__ void cudaMagic( int* mag, int* in, int* means, float* sd, int* max, int* min, int* x, int* y, int* z, int* xcoords, int* ycoords, int* zcoords, int numOfLines, int length ) { int i = blockDim.x * blockIdx.x + threadIdx.x; int count, tempCount, avg = 0, sdx = 0, sdy = 0, sdz = 0; int sumx = 0, sumy = 0, sumz = 0, absumx = 0, absumy = 0, absumz = 0; int xmax = 0, ymax = 0, zmax =0, xmin = 0, ymin = 0, zmin = 0; /*makes a flat arrays of all windows */ if( i >= WINDOW_LENGTH && i <= numOfLines ){ for( count = i - WINDOW_LENGTH, tempCount = 0 ; count < i ; count++, tempCount++ ){ x[(i - WINDOW_LENGTH) * WINDOW_LENGTH + tempCount] = xcoords[count]; y[(i - WINDOW_LENGTH) * WINDOW_LENGTH + tempCount] = ycoords[count]; z[(i - WINDOW_LENGTH) * WINDOW_LENGTH + tempCount] = zcoords[count]; } } __syncthreads(); if( i < length ){ /* Initialize the max and min values to the first value */ xmax = x[i*WINDOW_LENGTH]; ymax = y[i*WINDOW_LENGTH]; zmax = z[i*WINDOW_LENGTH]; xmin = x[i*WINDOW_LENGTH]; ymin = y[i*WINDOW_LENGTH]; zmin = z[i*WINDOW_LENGTH]; for( count = i ; count < i + WINDOW_LENGTH ; count++ ){ /* Calculates the sum of the absolute values for the window */ absumx += fabsf(x[i * WINDOW_LENGTH + count]); absumy += fabsf(y[i * WINDOW_LENGTH + count]); absumz += fabsf(z[i * WINDOW_LENGTH + count]); /* Calculates the sums for the window */ sumx += x[i * WINDOW_LENGTH + count]; sumy += y[i * WINDOW_LENGTH + count]; sumz += z[i * WINDOW_LENGTH + count]; /* Calculates the average of the entire window */ avg += (x[i * WINDOW_LENGTH + count] + y[i * WINDOW_LENGTH + count] + z[i * WINDOW_LENGTH + count] ); /* Obtains the max coordinates for the window */ xmax = fmaxf( x[i * WINDOW_LENGTH + count], xmax ); ymax = fmaxf( y[i * WINDOW_LENGTH + count], ymax ); zmax = fmaxf( z[i * WINDOW_LENGTH + count], zmax ); /* Obtains the min coordinates for the window */ xmin = fminf( x[i * WINDOW_LENGTH + count], xmin ); ymin = fminf( y[i * WINDOW_LENGTH + count], ymin ); zmin = fminf( z[i * WINDOW_LENGTH + count], zmin ); } __syncthreads(); /* Extra loop to calculate standard deviation because it relies on results of sumx, sumy, and sumz */ for( count = 0 ; count < WINDOW_LENGTH ; count++ ){ sdx += powf( (x[i * WINDOW_LENGTH + count] - (sumx/WINDOW_LENGTH)), 2 ); sdy += powf( (y[i * WINDOW_LENGTH + count] - (sumy/WINDOW_LENGTH)), 2 ); sdz += powf( (z[i * WINDOW_LENGTH + count] - (sumz/WINDOW_LENGTH)), 2 ); } /* Writes all the results to their appropriate arrays */ mag[i] = (absumx + absumy + absumz) / WINDOW_LENGTH; in[i] = avg / WINDOW_LENGTH; means[i] = sumx / WINDOW_LENGTH; means[Y_INDEX * length + i] = sumy / WINDOW_LENGTH; means[Z_INDEX * length + i] = sumz / WINDOW_LENGTH; sd[i] = sqrtf( sdx ); sd[Y_INDEX * length + i] = sqrtf( sdy ); sd[Z_INDEX * length + i] = sqrtf( sdz ); max[i] = xmax; max[Y_INDEX * length + i] = ymax; max[Z_INDEX * length + i] = zmax; min[i] = xmin; min[Y_INDEX * length + i] = ymin; min[Z_INDEX * length + i] = zmin; } } /* Allocates device arrays with error checking (INT) */ __host__ void allocate_dev( int** array, const int size ) { cudaError_t err = cudaSuccess; err = cudaMalloc((void **)array, size ); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate array on device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /* Allocates device arrays with error checking (FLOAT */ __host__ void allocate_devf( float** array, const int size ) { cudaError_t err = cudaSuccess; err = cudaMalloc((void **)array, size ); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate array on device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /* Allocates the local arrays to copy the result arrays from the device to */ __host__ int allocateLocal( int** mag, int** in, int** means, float** sd, int** max, int** min, const int length ) { /* Allocate local result array (mag) */ (*mag) = (int*)malloc( length * sizeof( int ) ); (*in) = (int*)malloc( length * sizeof( int ) ); (*means) = (int*)malloc( length * AXIS * sizeof( int ) ); (*sd) = (float*)malloc( length * AXIS * sizeof( float ) ); (*max) = (int*)malloc( length * AXIS * sizeof( int ) ); (*min) = (int*)malloc( length * AXIS * sizeof( int ) ); if( *mag == NULL || *in == NULL || *means == NULL || *sd == NULL || *max == NULL || *min == NULL ){ fprintf( stderr, "Malloc of local array failed!\n" ); exit( EXIT_FAILURE ); } return 0; } /* Returns the number of lines there are in the file pointed to by the argument fp */ __host__ int getLineCount( FILE** const fp ) { int count = 0; char c; /* Get the number of lines in the file */ for( c = getc( *fp ) ; c != EOF ; c = getc( *fp ) ){ if( c == '\n' ){ count++; } } rewind( *fp ); return count; } /*populates the local arrays with the data */ __host__ int readData( int** x, int** y, int** z, int* numOfLines ) { char* token; char line[BUFFSIZE]; int count; const char del[2] = ","; FILE* input; /* Open file for reading */ if( ( input = fopen( "sheep_imu_data.csv", "r" ) ) == NULL ){ fprintf( stderr, "Failed to open file!" ); return 1; } *numOfLines = getLineCount( &input ); *x = (int*)malloc( (*numOfLines) * sizeof(int) ); *y = (int*)malloc( (*numOfLines) * sizeof(int) ); *z = (int*)malloc( (*numOfLines) * sizeof(int) ); if( *x == NULL || *y == NULL || *z == NULL ){ fprintf( stderr, "Malloc of local array failed!\n" ); exit( EXIT_FAILURE ); } count = 0; while( fgets( line, BUFFSIZE, input ) ){ token = strtok( line, del ); (*x)[count] = atoi( token ); token = strtok( NULL, del ); (*y)[count] = atoi( token ); token = strtok( NULL, del ); (*z)[count] = atoi( token ); count++; } fclose( input ); return 0; } /* Writes arrays to a csv file */ __host__ void writeCSV( const char* const filename, int** mag, int** intensity, int** means, float** sd, int** max, int** min, const int length ) { FILE* fp; unsigned int count; if( ( fp = fopen( filename, "w+" ) ) == NULL ){ fprintf( stderr, "Failed to open or create new file!" ); } for( count = 0 ; count < length ; count++ ){ fprintf( fp, "%d, %d, %d, %d, %d, %f, %f, %f, %d, %d, %d, %d, %d, %d\n", (*mag)[count], (*intensity)[count], (*means)[count], (*means)[Y_INDEX*length+count], (*means)[Z_INDEX*length+count], (*sd)[count], (*sd)[Y_INDEX*length+count], (*sd)[Z_INDEX*length+count], (*max)[count], (*max)[Y_INDEX*length+count], (*max)[Z_INDEX*length+count], (*min)[count], (*min)[Y_INDEX*length+count], (*min)[Z_INDEX*length+count] ); } fclose( fp ); }
197b3d05a0364a9de46fc3f9423281637f404deb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/channel_stats_op.h" namespace caffe2 { namespace { // based on "Optimizing Parallel Reduction in CUDA" by Mark Harris // note - volatile keyword is needed to allow doing a warp reduction without // synchronization on recent architectures template <unsigned int blockSize> __device__ void warpReduce(volatile float* sdata, unsigned int tid) { // note - the if statements are "free" as they are resolved at compile time if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } template <unsigned int blockSize> __global__ void ChannelStatsBlockKernel( int N, int C, int valsPerChannel, const float* inputData, float* sums, float* sumsq) { __shared__ float sumData[blockSize]; __shared__ float sumSqData[blockSize]; auto tid = threadIdx.x; auto numBlocksPerChannel = (valsPerChannel + blockSize - 1) / blockSize; auto localBlockIndex = blockIdx.x % numBlocksPerChannel; auto inputIndex = (blockIdx.x / numBlocksPerChannel) * valsPerChannel + localBlockIndex * blockSize + tid; sumData[tid] = 0; sumSqData[tid] = 0; if (localBlockIndex * blockSize + tid < valsPerChannel) { sumData[tid] += inputData[inputIndex]; sumSqData[tid] += inputData[inputIndex] * inputData[inputIndex]; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sumData[tid] += sumData[tid + 256]; sumSqData[tid] += sumSqData[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sumData[tid] += sumData[tid + 128]; sumSqData[tid] += sumSqData[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sumData[tid] += sumData[tid + 64]; sumSqData[tid] += sumSqData[tid + 64]; } __syncthreads(); } if (tid < 32) { warpReduce<blockSize>(sumData, tid); warpReduce<blockSize>(sumSqData, tid); } // output block data sorted by C to simplify second reduction if (tid == 0) { auto n = blockIdx.x / numBlocksPerChannel / C; auto c = (blockIdx.x / numBlocksPerChannel) % C; auto outputIndex = (c * N + n) * numBlocksPerChannel + localBlockIndex; sums[outputIndex] = sumData[0]; sumsq[outputIndex] = sumSqData[0]; } } template <unsigned int blockSize> __global__ void ChannelStatsFinalSumsKernel( int N, int C, int numSumsPerChannel, const float* sumsScratch, const float* sumsqScratch, float* channelSums, float* channelSumsq) { __shared__ float sumData[blockSize]; __shared__ float sumSqData[blockSize]; auto tid = threadIdx.x; auto inputIndex = blockIdx.x * N * numSumsPerChannel + tid; sumData[tid] = 0; sumSqData[tid] = 0; for (auto i = inputIndex; i < (blockIdx.x + 1) * N * numSumsPerChannel; i += blockSize) { sumData[tid] += sumsScratch[i]; sumSqData[tid] += sumsqScratch[i]; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sumData[tid] += sumData[tid + 256]; sumSqData[tid] += sumSqData[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sumData[tid] += sumData[tid + 128]; sumSqData[tid] += sumSqData[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sumData[tid] += sumData[tid + 64]; sumSqData[tid] += sumSqData[tid + 64]; } __syncthreads(); } if (tid < 32) { warpReduce<blockSize>(sumData, tid); warpReduce<blockSize>(sumSqData, tid); } if (tid == 0) { channelSums[blockIdx.x] = sumData[0]; channelSumsq[blockIdx.x] = sumSqData[0]; } } } // namespace template <> bool ChannelStatsOp<CUDAContext>::RunOnDevice() { const auto& X = Input(INPUT); CAFFE_ENFORCE(X.ndim() >= 3 && X.ndim() <= 5); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.ndim() > 3 ? X.dim32(3) : 1; const int D = X.ndim() > 4 ? X.dim32(4) : 1; auto sum = Output(SUM); auto sumsq = Output(SUMSQ); const auto X_arr = X.data<float>(); const auto valsPerChannel = H * W * D; const auto numBlocksPerChannel = CAFFE_GET_BLOCKS(valsPerChannel); const auto numBlocksTotal = numBlocksPerChannel * N * C; sumScratch_.Resize(numBlocksTotal); sumsqScratch_.Resize(numBlocksTotal); sum->Resize(C); sumsq->Resize(C); hipLaunchKernelGGL(( ChannelStatsBlockKernel<CAFFE_CUDA_NUM_THREADS>) , dim3(numBlocksTotal), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, valsPerChannel, X_arr, sumScratch_.mutable_data<float>(), sumsqScratch_.mutable_data<float>()); hipLaunchKernelGGL(( ChannelStatsFinalSumsKernel<CAFFE_CUDA_NUM_THREADS>) , dim3(C), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, numBlocksPerChannel, sumScratch_.data<float>(), sumsqScratch_.data<float>(), sum->template mutable_data<float>(), sumsq->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(ChannelStats, ChannelStatsOp<CUDAContext>); } // namespace caffe2
197b3d05a0364a9de46fc3f9423281637f404deb.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/channel_stats_op.h" namespace caffe2 { namespace { // based on "Optimizing Parallel Reduction in CUDA" by Mark Harris // note - volatile keyword is needed to allow doing a warp reduction without // synchronization on recent architectures template <unsigned int blockSize> __device__ void warpReduce(volatile float* sdata, unsigned int tid) { // note - the if statements are "free" as they are resolved at compile time if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } template <unsigned int blockSize> __global__ void ChannelStatsBlockKernel( int N, int C, int valsPerChannel, const float* inputData, float* sums, float* sumsq) { __shared__ float sumData[blockSize]; __shared__ float sumSqData[blockSize]; auto tid = threadIdx.x; auto numBlocksPerChannel = (valsPerChannel + blockSize - 1) / blockSize; auto localBlockIndex = blockIdx.x % numBlocksPerChannel; auto inputIndex = (blockIdx.x / numBlocksPerChannel) * valsPerChannel + localBlockIndex * blockSize + tid; sumData[tid] = 0; sumSqData[tid] = 0; if (localBlockIndex * blockSize + tid < valsPerChannel) { sumData[tid] += inputData[inputIndex]; sumSqData[tid] += inputData[inputIndex] * inputData[inputIndex]; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sumData[tid] += sumData[tid + 256]; sumSqData[tid] += sumSqData[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sumData[tid] += sumData[tid + 128]; sumSqData[tid] += sumSqData[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sumData[tid] += sumData[tid + 64]; sumSqData[tid] += sumSqData[tid + 64]; } __syncthreads(); } if (tid < 32) { warpReduce<blockSize>(sumData, tid); warpReduce<blockSize>(sumSqData, tid); } // output block data sorted by C to simplify second reduction if (tid == 0) { auto n = blockIdx.x / numBlocksPerChannel / C; auto c = (blockIdx.x / numBlocksPerChannel) % C; auto outputIndex = (c * N + n) * numBlocksPerChannel + localBlockIndex; sums[outputIndex] = sumData[0]; sumsq[outputIndex] = sumSqData[0]; } } template <unsigned int blockSize> __global__ void ChannelStatsFinalSumsKernel( int N, int C, int numSumsPerChannel, const float* sumsScratch, const float* sumsqScratch, float* channelSums, float* channelSumsq) { __shared__ float sumData[blockSize]; __shared__ float sumSqData[blockSize]; auto tid = threadIdx.x; auto inputIndex = blockIdx.x * N * numSumsPerChannel + tid; sumData[tid] = 0; sumSqData[tid] = 0; for (auto i = inputIndex; i < (blockIdx.x + 1) * N * numSumsPerChannel; i += blockSize) { sumData[tid] += sumsScratch[i]; sumSqData[tid] += sumsqScratch[i]; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sumData[tid] += sumData[tid + 256]; sumSqData[tid] += sumSqData[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sumData[tid] += sumData[tid + 128]; sumSqData[tid] += sumSqData[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sumData[tid] += sumData[tid + 64]; sumSqData[tid] += sumSqData[tid + 64]; } __syncthreads(); } if (tid < 32) { warpReduce<blockSize>(sumData, tid); warpReduce<blockSize>(sumSqData, tid); } if (tid == 0) { channelSums[blockIdx.x] = sumData[0]; channelSumsq[blockIdx.x] = sumSqData[0]; } } } // namespace template <> bool ChannelStatsOp<CUDAContext>::RunOnDevice() { const auto& X = Input(INPUT); CAFFE_ENFORCE(X.ndim() >= 3 && X.ndim() <= 5); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.ndim() > 3 ? X.dim32(3) : 1; const int D = X.ndim() > 4 ? X.dim32(4) : 1; auto sum = Output(SUM); auto sumsq = Output(SUMSQ); const auto X_arr = X.data<float>(); const auto valsPerChannel = H * W * D; const auto numBlocksPerChannel = CAFFE_GET_BLOCKS(valsPerChannel); const auto numBlocksTotal = numBlocksPerChannel * N * C; sumScratch_.Resize(numBlocksTotal); sumsqScratch_.Resize(numBlocksTotal); sum->Resize(C); sumsq->Resize(C); ChannelStatsBlockKernel<CAFFE_CUDA_NUM_THREADS> <<<numBlocksTotal, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, valsPerChannel, X_arr, sumScratch_.mutable_data<float>(), sumsqScratch_.mutable_data<float>()); ChannelStatsFinalSumsKernel<CAFFE_CUDA_NUM_THREADS> <<<C, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, numBlocksPerChannel, sumScratch_.data<float>(), sumsqScratch_.data<float>(), sum->template mutable_data<float>(), sumsq->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(ChannelStats, ChannelStatsOp<CUDAContext>); } // namespace caffe2
63dcd80937db97b7d21351eaee7a9f9a4161d58d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * FPFHEstimator2.cpp * * Created on: Sep 25, 2012 * Author: avo */ #include "DFPFHEstimator.h" #include <helper_cuda.h> #include <helper_image.h> #include <thrust/remove.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "point_info.hpp" #include "../include/processor.h" #include "../sink/pcd_io.h" #include "utils.hpp" #include "device_utils.hpp" namespace device { struct DFPFHBaseKernel : public FeatureBaseKernel { enum { rx = 5, ry = rx, lx = 2*rx+1, ly = 2*ry+1, }; }; struct SDPFHEstimator : public DFPFHBaseKernel { enum { points_per_block = 32, dx = points_per_block * WARP_SIZE, dy = 1, rx = 10, ry = rx, lx = 2*rx+1, ly = 2*ry+1, }; float4 *input_pos; float4 *input_normals; float *output_bins; unsigned int view; float radius; float invalidToNormalPointRatio; __device__ __forceinline__ void operator () () const { __shared__ float4 shm_pos[points_per_block]; __shared__ float4 shm_normal[points_per_block]; __shared__ float shm_histo[points_per_block*bins]; __shared__ unsigned int shm_off[points_per_block*2]; __shared__ unsigned int shm_buffer[dx*dy]; unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; unsigned int wid = tid / WARP_SIZE; unsigned int wtid = tid - wid * WARP_SIZE; // unsigned int oy = (blockIdx.x * points_per_block + wid)/640; // unsigned int ox = (blockIdx.x * points_per_block + wid) - oy*640; int ox,oy; if(tid<points_per_block) { shm_off[tid] = oy = (blockIdx.x * points_per_block + tid)/640; shm_off[points_per_block+tid] = ox = (blockIdx.x * points_per_block + tid) - oy*640; shm_pos[tid] = input_pos[blockIdx.z*640*480+oy*640+ox]; shm_normal[tid] = input_normals[blockIdx.z*640*480+oy*640+ox]; } for(int i=tid;i<bins*points_per_block;i+=dx) { shm_histo[i] = 0.f; } __syncthreads(); if(shm_pos[wid].z==0 || isBackground(shm_pos[wid].w)) { for(int i=wtid;i<bins;i+=WARP_SIZE) { // output_bins[(blockIdx.z*640*480+blockIdx.x*points_per_block+wid)*bins_n_meta+i] = -1; output_bins[640*480*blockIdx.z*bins_n_meta + i*640*480 + blockIdx.x*points_per_block + wid] = 0; } if(wtid==0) { // output_bins[(blockIdx.z*640*480+blockIdx.x*points_per_block+wid)*bins_n_meta+bins] = 0; output_bins[640*480*blockIdx.z*bins_n_meta + 640*480*bins + blockIdx.x*points_per_block + wid] = 0; } return; } // for(int i=wtid;i<bins;i+=WARP_SIZE) // { // shm_histo[] // } // ox = shm_off[wid]; // oy = shm_off[points_per_block+wid]; __syncthreads(); unsigned int point_count = 0; unsigned int invalid_points = 0; for(int j=wtid;j<lx*ly;j+=WARP_SIZE) { __syncthreads(); oy = j/ly; ox = j - oy*ly; oy = shm_off[wid] - ry + oy; ox = shm_off[points_per_block+wid] - rx + ox; if(oy<0 || oy>=480 || ox <0 || ox>=640 || j==(lx*ly)/2) continue; float4 p = input_pos[blockIdx.z*640*480+oy*640+ox]; float4 n = input_normals[blockIdx.z*640*480+oy*640+ox]; if(!isValid(p.w)) { invalid_points++; continue; } if(lengthf43(minusf43(shm_pos[wid],p))>radius || n.w<0) { continue; } float3 ps,pt,ns,nt; if( dotf43(shm_normal[wid], minusf43(p,shm_pos[wid]) ) <= dotf43(n, minusf43(shm_pos[wid],p) ) ) { ps = fetch(shm_pos[wid]); ns = fetch(shm_normal[wid]); pt = fetch(p); nt = fetch(n); } else { ps = fetch(p); ns = fetch(n); pt = fetch(shm_pos[wid]); nt = fetch(shm_normal[wid]); } // float3 u = ns; float3 d = pt-ps; float3 v = cross(d,ns); // float3 w = cross(ns,v); float vn = norm(v); if(vn==0.f || norm(d)==0.f) { invalid_points++; continue; } point_count++; vn = 1.f/vn; v = v * vn; unsigned int idx = 0; float f = dot(v,nt); idx = bins_per_feature * ((f + 1.0f) * 0.5f); idx = min(idx,bins_per_feature-1) + 0*bins_per_feature + wid*bins; atomicAdd(&shm_histo[idx], 1.f); f = dot(ns,d)/norm(d); idx = bins_per_feature * ((f + 1.0f) * 0.5f); idx = min(idx,bins_per_feature-1) + 1*bins_per_feature + wid*bins; atomicAdd(&shm_histo[idx], 1.f); // v <- w v = cross(ns,v); f = atan2f(dot(v,nt),dot(ns,nt)); idx = bins_per_feature * ((f + M_PI) * (1.0f / (2.0f * M_PI))); idx = min(idx,bins_per_feature-1) + 2*bins_per_feature + wid*bins; atomicAdd(&shm_histo[idx], 1.f); } __syncthreads(); // shm_buffer[wid*WARP_SIZE+wtid] = point_count; volatile unsigned int *sbuf = &shm_buffer[wid*WARP_SIZE]; sbuf[wtid] = point_count; if(wtid < 16) { sbuf[wtid] += sbuf[wtid + 16]; sbuf[wtid] += sbuf[wtid + 8]; sbuf[wtid] += sbuf[wtid + 4]; sbuf[wtid] += sbuf[wtid + 2]; sbuf[wtid] += sbuf[wtid + 1]; } __syncthreads(); point_count = shm_buffer[wid*WARP_SIZE]; sbuf[wtid] = invalid_points; if(wtid < 16) { sbuf[wtid] += sbuf[wtid + 16]; sbuf[wtid] += sbuf[wtid + 8]; sbuf[wtid] += sbuf[wtid + 4]; sbuf[wtid] += sbuf[wtid + 2]; sbuf[wtid] += sbuf[wtid + 1]; } __syncthreads(); invalid_points = shm_buffer[wid*WARP_SIZE]; // if((((float)invalid_points)/((float)point_count)) > 0.5f) // { // printf("(%d/%d) %d / %d = %f \n",wid,wtid,point_count,invalid_points,(((float)invalid_points)/((float)point_count))); // } __syncthreads(); bool vali = ( point_count > 0 && (((float)invalid_points)/((float)point_count)) < invalidToNormalPointRatio); for(int i=wtid;i<bins;i+=WARP_SIZE) { float re = (vali)?shm_histo[wid*bins+i]/point_count:0; // output_bins[(view*640*480+blockIdx.x*points_per_block+wid)*bins+i] = re; // output_bins[640*480*view*bins+i*640*480+blockDim.x*points_per_block + blockIdx.x*points_per_block + wid] = re; output_bins[640*480*blockIdx.z*bins_n_meta + i*640*480 + blockIdx.x*points_per_block + wid] = re; // output_bins[(blockIdx.z*640*480+blockIdx.x*points_per_block+wid)*bins_n_meta+i] = re; } if(wtid==0) { output_bins[640*480*blockIdx.z*bins_n_meta + bins*640*480 + blockIdx.x*points_per_block + wid] = vali; // output_bins[(blockIdx.z*640*480+blockIdx.x*points_per_block+wid)*bins_n_meta+bins] = vali; } } }; __global__ void computeSDPFH(const SDPFHEstimator sdpfhe){ sdpfhe(); } struct DFPFHEstimator : public DFPFHBaseKernel { enum { points_per_block = 32, dx = points_per_block * WARP_SIZE, dy = 1, }; float4 *input_pos; // float4 *input_normals; float *input_bins_sdfpfh; float *output_bins; float radius; int maxReconstructuionLevel; template<typename T> __device__ __forceinline__ void sum(volatile T* smem_buffer,int wtid) const { // T reg = smem_buffer[wtid]; if(wtid<16) { smem_buffer[wtid] += smem_buffer[wtid + 16]; smem_buffer[wtid] += smem_buffer[wtid + 8]; smem_buffer[wtid] += smem_buffer[wtid + 4]; smem_buffer[wtid] += smem_buffer[wtid + 2]; smem_buffer[wtid] += smem_buffer[wtid + 1]; } } __device__ __forceinline__ void operator () () const { __shared__ float4 shm_pos[points_per_block]; // __shared__ float4 shm_normal[points_per_block]; __shared__ float shm_histo[points_per_block*bins_n_meta]; __shared__ unsigned int shm_off[points_per_block*2]; __shared__ float shm_hist_buffer[dx*dy]; unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; unsigned int wid = tid / WARP_SIZE; unsigned int wtid = tid - wid * WARP_SIZE; int ox,oy; if(tid<points_per_block) { shm_off[tid] = oy = (blockIdx.x * points_per_block + tid)/640; shm_off[points_per_block+tid] = ox = (blockIdx.x * points_per_block + tid) - oy*640; shm_pos[tid] = input_pos[blockIdx.z*640*480+oy*640+ox]; } for(int i=tid;i<bins_n_meta*points_per_block;i+=dx) { shm_histo[i] = 0.f; } __syncthreads(); // if(tid==1 && blockIdx.x > 4110 && blockIdx.x < 4115 && blockIdx.z==0) // { // printf("%d/%d -> %d/%d -> %f %f %f %f -> %d %d (%d>%d)->%d %d \n",blockIdx.x,tid,shm_off[tid],shm_off[tid+points_per_block],shm_pos[tid].x,shm_pos[tid].y,shm_pos[tid].z,shm_pos[tid].w,shm_pos[wid].z==0, input_bins_sdfpfh[640*480*blockIdx.z*bins_n_meta + 640*480*bins + blockIdx.x*points_per_block + wid]==0 , getReconstructionLevel(shm_pos[wid].w),maxReconstructuionLevel , getReconstructionLevel(shm_pos[wid].w)>maxReconstructuionLevel, !isForeground(shm_pos[wid].w) ); // // } if(!(shm_pos[wid].z==0 || input_bins_sdfpfh[640*480*blockIdx.z*bins_n_meta + 640*480*bins + blockIdx.x*points_per_block + wid]==0 || getReconstructionLevel(shm_pos[wid].w)>maxReconstructuionLevel || !isForeground(shm_pos[wid].w))) { // unsigned int point_count = 0; // unsigned int invalid_points = 0; unsigned int warpRuns = (lx*ly-1)/WARP_SIZE +1; // if(blockIdx.z==0 && blockIdx.x==0 && tid==0) // printf("warpRuns: %d | lx*ly: %d \n",warpRuns,lx*ly); for(int r=0;r<warpRuns;r++) { unsigned int j = r*WARP_SIZE+wtid; // __syncthreads(); oy = j/ly; ox = j - oy*ly; oy = shm_off[wid] - ry + oy; ox = shm_off[points_per_block+wid] - rx + ox; bool load = false; float weight = 1.f; if(!(oy<0 || oy>=480 || ox <0 || ox>=640 || j==(lx*ly)/2 || j>=lx*ly)) { float4 p = input_pos[blockIdx.z*640*480+oy*640+ox]; // if(!isValid(p.w)) // { // invalid_points++; // continue; // } float dis; if((dis=lengthf43(minusf43(shm_pos[wid],p)))<=radius) { load = true; weight = 1.f/(dis/1000.f); } } //put in warp buffer and reduce for(int b=0;b<bins_n_meta;b++) { // shm_hist_buffer[wid*WARP_SIZE+j] = (load)?input_bins_sdfpfh[640*480*blockIdx.z*bins_n_meta + b*640*480 + oy*640+ox]:0.f; shm_hist_buffer[wid*WARP_SIZE+wtid] = (load)?weight*input_bins_sdfpfh[640*480*blockIdx.z*bins_n_meta + b*640*480 + oy*640+ox]:0.f; volatile float *smem_buffer = &shm_hist_buffer[wid*WARP_SIZE]; sum(smem_buffer,wtid); if(wtid==0) { // if(blockIdx.z==0 && blockIdx.x==0 && tid==0) // printf("shm_hist[0]: %f | smem_buffer[0]: %f \n",shm_histo[b*points_per_block+wid],smem_buffer[wtid]); shm_histo[b*points_per_block+wid] += smem_buffer[wtid]; } } } } __syncthreads(); for(int i=tid;i<points_per_block*bins_n_meta;i+=dx*dy) { unsigned int oid = i/points_per_block; unsigned int otid = i - oid*points_per_block; // if(oid<bins) output_bins[blockIdx.z*640*480*bins_n_meta+oid*640*480+blockIdx.x*points_per_block+otid] = (shm_histo[points_per_block*bins+otid]>0.f)?shm_histo[i]/shm_histo[points_per_block*bins+otid]:0.f; // else // output_bins[blockIdx.z*640*480*bins_n_meta+oid*640*480+blockIdx.x*points_per_block+otid] = shm_histo[i]; } } }; __global__ void computeDFPFH(const DFPFHEstimator dfpfhe){ dfpfhe(); } struct MeanDFPFHBlockEstimator : public DFPFHBaseKernel { enum { dx = 1024, dy = 1, max_shared = 11*1024, // 20kb shared_lines = max_shared/dx, shared_buffer = shared_lines * dx }; float *input_bins; float *output_block_mean; __device__ __forceinline__ void operator () () const { __shared__ float shm_count[dx]; __shared__ float shm_buffer[shared_buffer]; unsigned int tid = threadIdx.x; shm_count[tid] = input_bins[blockIdx.z*640*480*bins_n_meta+bins*640*480+blockIdx.x*dx+tid]; __syncthreads(); reduceBlock<dx>(shm_count); __syncthreads(); if(tid==0) { output_block_mean[blockIdx.z*gridDim.x*bins_n_meta + bins*gridDim.x+blockIdx.x] = shm_count[0];// (shm_count[0]>0)?1:0; } if(shm_count[0]<=0) return; // if(blockIdx.x==0 && tid==0) // { // printf("count %f \n",shm_count[0]); // output_block_mean[0] = shm_count[0]; // } // shm_buffer[threadIdx.x] = 1; // shm_buffer[dx+threadIdx.x] = 2; unsigned int loops = (bins-1)/shared_lines +1; for(int l=0;l<loops;l++) { unsigned int lines = ((l+1)*shared_lines<bins)?shared_lines:bins-l*shared_lines; for(int i=0;i<lines;i++) { shm_buffer[i*dx+tid] = input_bins[blockIdx.z*640*480*bins_n_meta+(l*shared_lines+i)*640*480+blockIdx.x*dx+tid]; } __syncthreads(); reduceBlockNoReg<dx>(shm_buffer,lines); __syncthreads(); if(tid<lines) output_block_mean[blockIdx.z*gridDim.x*bins_n_meta+(l*shared_lines+tid)*gridDim.x+blockIdx.x] = shm_buffer[tid*dx];///shm_count[0]; // __syncthreads(); } // } // template<unsigned int blockSize, typename T> // __device__ __forceinline__ void reduceBlock(T* shm) const // { // // unsigned int tid = threadIdx.x; // float sum = shm[tid]; // if (blockSize >= 1024) { if(tid < 512) { shm[tid] = sum = sum + shm[tid + 512]; } __syncthreads(); } // if (blockSize >= 512) { if(tid < 256) { shm[tid] = sum = sum + shm[tid + 256]; } __syncthreads(); } // if (blockSize >= 256) { if(tid < 128) { shm[tid] = sum = sum + shm[tid + 128]; } __syncthreads(); } // if (blockSize >= 128) { if(tid < 64) { shm[tid] = sum = sum + shm[tid + 64]; } __syncthreads(); } // // if(tid < 32) // { // volatile T* smem = shm; // if (blockSize >= 64) { smem[tid] = sum = sum + smem[tid + 32]; } // if (blockSize >= 32) { smem[tid] = sum = sum + smem[tid + 16]; } // if (blockSize >= 16) { smem[tid] = sum = sum + smem[tid + 8]; } // if (blockSize >= 8) { smem[tid] = sum = sum + smem[tid + 4]; } // if (blockSize >= 4) { smem[tid] = sum = sum + smem[tid + 2]; } // if (blockSize >= 2) { smem[tid] = sum = sum + smem[tid + 1]; } // } // } // // template<unsigned int blockSize, typename T> // __device__ __forceinline__ void reduceBlockNoReg(T* shm) const // { // // unsigned int tid = threadIdx.x; // if (blockSize >= 1024) { if(tid < 512) { shm[tid] += shm[tid + 512]; } __syncthreads(); } // if (blockSize >= 512) { if(tid < 256) { shm[tid] += shm[tid + 256]; } __syncthreads(); } // if (blockSize >= 256) { if(tid < 128) { shm[tid] += shm[tid + 128]; } __syncthreads(); } // if (blockSize >= 128) { if(tid < 64) { shm[tid] += shm[tid + 64]; } __syncthreads(); } // // if(tid < 32) // { // volatile T* smem = shm; // if (blockSize >= 64) { smem[tid] += smem[tid + 32]; } // if (blockSize >= 32) { smem[tid] += smem[tid + 16]; } // if (blockSize >= 16) { smem[tid] += smem[tid + 8]; } // if (blockSize >= 8) { smem[tid] += smem[tid + 4]; } // if (blockSize >= 4) { smem[tid] += smem[tid + 2]; } // if (blockSize >= 2) { smem[tid] += smem[tid + 1]; } // } // } // // template<unsigned int blockSize, typename T> // __device__ __forceinline__ void reduceBlockNoReg(T* shm,int lines) const // { // // unsigned int tid = threadIdx.x; // if (blockSize >= 1024) { if(tid < 512) { for(int i=0;i<lines;i++){ shm[i*dx + tid] += shm[i*dx + tid + 512]; } } __syncthreads(); } // if (blockSize >= 512) { if(tid < 256) { for(int i=0;i<lines;i++){ shm[i*dx + tid] += shm[i*dx + tid + 256]; } } __syncthreads(); } // if (blockSize >= 256) { if(tid < 128) { for(int i=0;i<lines;i++){ shm[i*dx + tid] += shm[i*dx + tid + 128]; } } __syncthreads(); } // if (blockSize >= 128) { if(tid < 64) { for(int i=0;i<lines;i++){ shm[i*dx + tid] += shm[i*dx + tid + 64]; } } __syncthreads(); } // // if(tid < 32) // { // volatile T* smem = shm; // if (blockSize >= 64) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 32]; } } // if (blockSize >= 32) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 16]; } } // if (blockSize >= 16) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 8]; } } // if (blockSize >= 8) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 4]; } } // if (blockSize >= 4) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 2]; } } // if (blockSize >= 2) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 1]; } } // } // } }; __global__ void computeBlockMeanDFPFH(const MeanDFPFHBlockEstimator blockMean){ blockMean(); } template<unsigned int blockSize> struct MeanDFPFHEstimator : public DFPFHBaseKernel { float *input_bins; float *output_block_mean; unsigned int length; enum { dx = blockSize, max_shared = 11*1024, // 11kb shared_lines = max_shared/dx, shared_buffer = shared_lines * dx }; __device__ __forceinline__ void operator () () const { __shared__ float shm_count[dx]; __shared__ float shm_buffer[shared_buffer]; unsigned int tid = threadIdx.x; unsigned int gridSize = dx*2*gridDim.x; // if(tid==0) // printf("block: %d \n",blockIdx.z); // // if(blockIdx.z==1 & tid==0) // { // for(int t=0;t<length;t+=50) // { // printf("inKernelCount(%d): %f \n",t,input_bins[blockIdx.z*length*bins_n_meta+bins*length+t]); // } // } float sum = 0.f; unsigned int i = blockIdx.x*dx*2 + tid; while(i<length) { sum += input_bins[blockIdx.z*length*bins_n_meta+bins*length+i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if ( (i + dx) < length) sum += input_bins[blockIdx.z*length*bins_n_meta+bins*length+i+dx]; i += gridSize; } shm_count[tid] = sum; __syncthreads(); reduceBlock<dx>(shm_count); __syncthreads(); if(tid==0) { output_block_mean[blockIdx.z*bins_n_meta+bins] = shm_count[0];// (shm_count[0]>0)?1.f:0.f; // printf("count: %f \n",shm_count[0]); } if(shm_count[0]<=0) return; unsigned int loops = (bins-1)/shared_lines +1; // if(threadIdx.x==0 && blockIdx.x==0) // printf("loops: %d \n",loops); for(int l=0;l<loops;l++) { unsigned int lines = ((l+1)*shared_lines<bins)?shared_lines:bins-l*shared_lines; for(int b=0;b<lines;b++) { sum = 0.f; i = blockIdx.x*dx*2 + threadIdx.x; while(i<length) { sum += input_bins[blockIdx.z*length*bins_n_meta+(l*shared_lines+b)*length+i]; if ( (i + dx) < length) sum += input_bins[blockIdx.z*length*bins_n_meta+(l*shared_lines+b)*length+i+dx]; i += gridSize; } shm_buffer[b*dx+tid] = sum; } __syncthreads(); reduceBlockNoReg<dx>(shm_buffer,lines); __syncthreads(); if(tid<lines) output_block_mean[blockIdx.z*bins_n_meta+(l*shared_lines+tid)] = shm_buffer[tid*dx]/shm_count[0]; } } }; __global__ void computeMeanDFPFH(const MeanDFPFHEstimator<1024> meandfpfhe){ meandfpfhe(); } __global__ void computeMeanDFPFH(const MeanDFPFHEstimator<512> meandfpfhe){ meandfpfhe(); } struct DivDFPFHEstimator: public DFPFHBaseKernel { enum { dx = 1024, }; float *input_dfpfh_bins; float *input_mean_bins; float *output_div; // __device__ __forceinline__ float // klDivergence(float *feature, float *mean, unsigned int bins_count, unsigned int offset_feature, unsigned int offset_mean) const // { // // float div = 0.f; // for(int i=0;i<bins_count;i++) // { // float p = feature[i*offset_feature]; // float m = mean[i*offset_mean]; // // if(p/m>0) // div += (p - m) * __logf(p/m); // } // // return div; // } // // __device__ __forceinline__ float // klEuclideanDivergence(float *feature, float *mean, unsigned int feature_count, unsigned int bin_count_per_feature, unsigned int offset_feature, unsigned int offset_mean) const // { // float div = 0.f; // // for(int f=0;f<feature_count;f++) // { // float tmpDiv = 0.f; // for(int i=0;i<bin_count_per_feature;i++) // { // float p = feature[(f*bin_count_per_feature+i)*offset_feature]; // float m = mean[(f*bin_count_per_feature+i)*offset_mean]; // // if(p/m>0) // tmpDiv += (p - m) * __logf(p/m); // } // div += (tmpDiv * tmpDiv); // } // // return sqrtf(div); // } __device__ __forceinline__ void operator () () const { __shared__ float shm_mean[bins]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*dx+tid; // unsigned int off = gridDim.x; if(tid<bins) shm_mean[tid] = input_mean_bins[blockIdx.z*bins_n_meta+tid]; __syncthreads(); // float div = klDivergence(&input_dfpfh_bins[blockIdx.z*640*480*bins_n_meta+i],shm_mean,bins,640*480,1); float div = klEuclideanDivergence(&input_dfpfh_bins[blockIdx.z*640*480*bins_n_meta+i],shm_mean,features,bins_per_feature,640*480,1); output_div[blockIdx.z*640*480+i] = div; } }; __global__ void computeDivDFPFH(const DivDFPFHEstimator de){ de(); } struct SigmaDFPFHBlock : public DFPFHBaseKernel { enum { dx = 1024, }; float *input_div; float *output_div_block; unsigned int length; __device__ __forceinline__ void operator () () const { __shared__ float shm_buffer[dx]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*dx*2 + threadIdx.x; unsigned int gridSize = dx*2*gridDim.x; float sum = 0.f; // float count = 0.f; while(i<length) { float tmp = input_div[blockIdx.z*640*480+i]; sum += tmp*tmp; if ((i+dx)<length) { tmp = input_div[blockIdx.z*640*480+i+dx]; sum += tmp * tmp; } i += gridSize; } shm_buffer[tid] = sum; __syncthreads(); reduceBlock<dx>(shm_buffer); __syncthreads(); if(tid==0) output_div_block[blockIdx.z*gridDim.x+blockIdx.x] = shm_buffer[0]; // if(tid==0) // printf("sigBlock: %d -> %f \n",blockIdx.x,shm_buffer[0]); } }; __global__ void computeSigmaDFPFHBlock(const SigmaDFPFHBlock sb){ sb(); } template<unsigned int blockSize> struct SigmaDFPFH : public DFPFHBaseKernel { enum { dx = blockSize, }; float *input_sig_block; float *input_mean_data; float *output_sigmas; unsigned int length; __device__ __forceinline__ void operator () () const { __shared__ float shm_buffer[dx]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*dx*2 + threadIdx.x; unsigned int gridSize = dx*2*gridDim.x; float sum = 0.f; while(i<length) { sum += input_sig_block[blockIdx.z*length+i]; if ((i+dx)<length) sum += input_sig_block[blockIdx.z*length+i+dx]; i += gridSize; } shm_buffer[tid] = sum; reduceBlock<dx>(shm_buffer); __syncthreads(); if(tid==0) { float points = input_mean_data[blockIdx.z*bins_n_meta+bins]; // printf("(%d) points: %f sigma: %f \n",blockIdx.z,points,sqrtf(shm_buffer[0]/points)); output_sigmas[blockIdx.z] = (points>0)?sqrtf(shm_buffer[0]/points):-1.f; } } }; __global__ void computeSigmaDFPFH(const SigmaDFPFH<512> sig){ sig(); } struct PersistanceDFPFHEstimator : public DFPFHBaseKernel { enum { dx = 1024, }; float beta; bool intersection; float *input_div1; float *input_div2; float *input_sigma1; float *input_sigma2; float *input_bins_n_meta1; float *input_bins_n_meta2; int *persistence_map; __device__ __forceinline__ void operator () () const { unsigned int tid = threadIdx.x; unsigned int vid = blockIdx.y*blockDim.x*blockDim.y+blockIdx.x*blockDim.x+tid; unsigned int gid = blockIdx.z*640*480+vid; int idx = -1; if( input_bins_n_meta1[blockIdx.z*640*480*bins_n_meta+bins*640*480+vid]>0 && input_bins_n_meta2[blockIdx.z*640*480*bins_n_meta+bins*640*480+vid]>0 ) { if( (input_div1[gid] > beta * input_sigma1[blockIdx.z]) && (input_div2[gid] > beta * input_sigma2[blockIdx.z]) ) { if(intersection) { if(persistence_map[gid] >= 0) idx = gid; } else idx = gid; } } persistence_map[gid] = idx; } }; __global__ void computePersistanceDFPFHFeatures(const PersistanceDFPFHEstimator pers){ pers(); } struct PersistanceIntersectionDFPFHEstimator : public DFPFHBaseKernel { enum { dx = 1024, }; float beta; unsigned int n_view; float **input_div; float **input_sigma; int output_persistence_map; __device__ __forceinline__ void operator () () const { } }; struct PersistanceMapLength { enum { dx = 1024, }; int *input_persistanceMap; unsigned int *output_persistanceMapLenth; unsigned int length; __device__ __forceinline__ void operator () () const { unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; if(i+1>length) { return; } unsigned int v1 = input_persistanceMap[i]; v1 /= (640*480); if(i+1==length) { output_persistanceMapLenth[v1] = i+1; return; } unsigned int v2 = input_persistanceMap[i+1]; v2 /= (640*480); if(v2>v1) output_persistanceMapLenth[v1] = i+1; } }; __global__ void computePersistanceMapLength(const PersistanceMapLength mapLength){ mapLength(); } } device::SDPFHEstimator sdpfh1; device::SDPFHEstimator sdpfh2; device::DFPFHEstimator dfpfh1; device::DFPFHEstimator dfpfh2; device::MeanDFPFHBlockEstimator meanBlock1; device::MeanDFPFHBlockEstimator meanBlock2; device::MeanDFPFHEstimator<512> mean1; device::MeanDFPFHEstimator<512> mean2; device::DivDFPFHEstimator div1; device::DivDFPFHEstimator div2; device::SigmaDFPFHBlock sigmaBlock1; device::SigmaDFPFHBlock sigmaBlock2; device::SigmaDFPFH<512> sigma1; device::SigmaDFPFH<512> sigma2; device::PersistanceDFPFHEstimator persistance; void DFPFHEstimator::init() { sdpfh1.input_pos = (float4 *)getInputDataPointer(PointCoordinates); sdpfh1.input_normals = (float4 *)getInputDataPointer(PointNormal); sdpfh2.input_pos = (float4 *)getInputDataPointer(PointCoordinates); sdpfh2.input_normals = (float4 *)getInputDataPointer(PointNormal); dfpfh1.input_pos = (float4 *)getInputDataPointer(PointCoordinates); dfpfh2.input_pos = (float4 *)getInputDataPointer(PointCoordinates); // dfpfh1.input_bins_sdfpfh = (float *)getTargetDataPointer(SDPFHistogram1); // dfpfh1.output_bins = (float *)getTargetDataPointer(DFPFHistogram1); } void DFPFHEstimator::execute() { unsigned int copyIdx = 1; unsigned int n_radii = 3; float radii[] = {10.f,15.f,20.f,25.f}; if(n_radii<2) { printf("Error not enough raddi for persistance analysis! \n"); exit(0); } thrust::device_vector<float> d_sdpfh1(n_view*640*480*sdpfh1.bins_n_meta); thrust::device_vector<float> d_sdpfh2(n_view*640*480*sdpfh2.bins_n_meta); sdpfh1.radius = radii[0]; sdpfh1.invalidToNormalPointRatio = 0.5; sdpfh1.output_bins = thrust::raw_pointer_cast(d_sdpfh1.data()); sdpfh2.radius = radii[1]; sdpfh2.invalidToNormalPointRatio = 0.5; sdpfh2.output_bins = thrust::raw_pointer_cast(d_sdpfh2.data()); dim3 block(sdpfh1.dx); dim3 grid(640*480/sdpfh1.points_per_block,1,n_view); hipLaunchKernelGGL(( device::computeSDPFH), dim3(grid),dim3(block), 0, 0, sdpfh1); hipLaunchKernelGGL(( device::computeSDPFH), dim3(grid),dim3(block), 0, 0, sdpfh2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::device_vector<float> d_dfpfh1(n_view*640*480*dfpfh1.bins_n_meta); thrust::device_vector<float> d_dfpfh2(n_view*640*480*sdpfh2.bins_n_meta); dfpfh1.radius = radii[0]; dfpfh1.maxReconstructuionLevel = 1; dfpfh1.input_bins_sdfpfh = sdpfh1.output_bins; dfpfh1.output_bins = thrust::raw_pointer_cast(d_dfpfh1.data()); dfpfh2.radius = radii[1]; dfpfh2.maxReconstructuionLevel = 1; dfpfh2.input_bins_sdfpfh = sdpfh2.output_bins; dfpfh2.output_bins = thrust::raw_pointer_cast(d_dfpfh2.data()); hipLaunchKernelGGL(( device::computeDFPFH), dim3(grid),dim3(block), 0, 0, dfpfh1); hipLaunchKernelGGL(( device::computeDFPFH), dim3(grid),dim3(block), 0, 0, dfpfh2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::device_ptr<float> dfpfh_ptr = thrust::device_pointer_cast( (float *) getTargetDataPointer(DFPFHistogram) ); if(copyIdx==0) thrust::copy(d_dfpfh1.data(),d_dfpfh1.data()+d_dfpfh1.size(),dfpfh_ptr); if(copyIdx==1) thrust::copy(d_dfpfh2.data(),d_dfpfh2.data()+d_dfpfh2.size(),dfpfh_ptr); thrust::device_vector<float> d_mean_block1((n_view*640*480)/meanBlock1.dx * meanBlock1.bins_n_meta); thrust::device_vector<float> d_mean_block2((n_view*640*480)/meanBlock2.dx * meanBlock2.bins_n_meta); meanBlock1.input_bins = dfpfh1.output_bins; meanBlock1.output_block_mean = thrust::raw_pointer_cast(d_mean_block1.data()); meanBlock2.input_bins = dfpfh2.output_bins; meanBlock2.output_block_mean = thrust::raw_pointer_cast(d_mean_block2.data()); dim3 meanPatchBlock(meanBlock1.dx); dim3 meanPatchGrid((640*480)/meanBlock1.dx,1,n_view); hipLaunchKernelGGL(( device::computeBlockMeanDFPFH), dim3(meanPatchGrid),dim3(meanPatchBlock), 0, 0, meanBlock1); hipLaunchKernelGGL(( device::computeBlockMeanDFPFH), dim3(meanPatchGrid),dim3(meanPatchBlock), 0, 0, meanBlock2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::device_vector<float> d_mean1(n_view*mean1.bins_n_meta); thrust::device_vector<float> d_mean2(n_view*mean2.bins_n_meta); mean1.input_bins = meanBlock1.output_block_mean; mean1.length = meanPatchGrid.x; mean1.output_block_mean = thrust::raw_pointer_cast(d_mean1.data()); mean2.input_bins = meanBlock2.output_block_mean; mean2.length = meanPatchGrid.x; mean2.output_block_mean = thrust::raw_pointer_cast(d_mean2.data()); dim3 meanGrid(1,1,n_view); hipLaunchKernelGGL(( device::computeMeanDFPFH), dim3(meanGrid),dim3(mean1.dx), 0, 0, mean1); hipLaunchKernelGGL(( device::computeMeanDFPFH), dim3(meanGrid),dim3(mean2.dx), 0, 0, mean2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::device_vector<float> d_div1(n_view*640*480); thrust::device_vector<float> d_div2(n_view*640*480); div1.input_dfpfh_bins = dfpfh1.output_bins; div1.input_mean_bins = mean1.output_block_mean; div1.output_div = thrust::raw_pointer_cast(d_div1.data()); div2.input_dfpfh_bins = dfpfh2.output_bins; div2.input_mean_bins = mean2.output_block_mean; div2.output_div = thrust::raw_pointer_cast(d_div2.data()); dim3 divGrid((640*480)/div1.dx,1,n_view); hipLaunchKernelGGL(( device::computeDivDFPFH), dim3(divGrid),dim3(div1.dx), 0, 0, div1); hipLaunchKernelGGL(( device::computeDivDFPFH), dim3(divGrid),dim3(div2.dx), 0, 0, div2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::device_vector<float> d_sigma_block1(n_view*(640*480)/sigmaBlock1.dx); thrust::device_vector<float> d_sigma_block2(n_view*(640*480)/sigmaBlock2.dx); sigmaBlock1.input_div = div1.output_div; sigmaBlock1.output_div_block = thrust::raw_pointer_cast(d_sigma_block1.data()); sigmaBlock1.length = 640*480; sigmaBlock2.input_div = div2.output_div; sigmaBlock2.output_div_block = thrust::raw_pointer_cast(d_sigma_block2.data()); sigmaBlock2.length = 640*480; dim3 sigGrid((640*480)/(sigmaBlock1.dx*2),1,n_view); hipLaunchKernelGGL(( device::computeSigmaDFPFHBlock), dim3(sigGrid),dim3(sigmaBlock1.dx), 0, 0, sigmaBlock1); hipLaunchKernelGGL(( device::computeSigmaDFPFHBlock), dim3(sigGrid),dim3(sigmaBlock1.dx), 0, 0, sigmaBlock2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::device_vector<float> d_sigma1(n_view); thrust::device_vector<float> d_sigma2(n_view); sigma1.input_sig_block = sigmaBlock1.output_div_block; sigma1.input_mean_data = mean1.output_block_mean; sigma1.output_sigmas = thrust::raw_pointer_cast(d_sigma1.data()); sigma1.length = sigGrid.x; sigma2.input_sig_block = sigmaBlock2.output_div_block; sigma2.input_mean_data = mean2.output_block_mean; sigma2.output_sigmas = thrust::raw_pointer_cast(d_sigma2.data()); sigma2.length = sigGrid.x; hipLaunchKernelGGL(( device::computeSigmaDFPFH), dim3(dim3(1,1,n_view)),dim3(sigma1.dx), 0, 0, sigma1); hipLaunchKernelGGL(( device::computeSigmaDFPFH), dim3(dim3(1,1,n_view)),dim3(sigma2.dx), 0, 0, sigma2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::device_vector<int> d_persistance_map(n_view*640*480); persistance.input_div1 = div1.output_div; persistance.input_div2 = div2.output_div; persistance.input_sigma1 = sigma1.output_sigmas; persistance.input_sigma2 = sigma2.output_sigmas; persistance.input_bins_n_meta1 = dfpfh1.output_bins; persistance.input_bins_n_meta2 = dfpfh2.output_bins; persistance.persistence_map = thrust::raw_pointer_cast(d_persistance_map.data()); persistance.beta = 1.f; persistance.intersection = true; if(persistance.intersection) hipMemset(persistance.persistence_map,0,n_view*640*480); hipLaunchKernelGGL(( device::computePersistanceDFPFHFeatures), dim3(dim3((640*480)/persistance.dx,1,n_view)),dim3(persistance.dx), 0, 0, persistance); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); if(outputlevel>2) printf("persistence: 1 \n"); for(int il=2;il<n_radii;il++) { bool toggle = (il%2==0); if(toggle) { sdpfh1.radius = radii[il]; hipLaunchKernelGGL(( device::computeSDPFH), dim3(grid),dim3(block), 0, 0, sdpfh1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); dfpfh1.radius = radii[il]; hipLaunchKernelGGL(( device::computeDFPFH), dim3(grid),dim3(block), 0, 0, dfpfh1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( device::computeBlockMeanDFPFH), dim3(meanPatchGrid),dim3(meanPatchBlock), 0, 0, meanBlock1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( device::computeMeanDFPFH), dim3(meanGrid),dim3(mean1.dx), 0, 0, mean1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( device::computeDivDFPFH), dim3(divGrid),dim3(div1.dx), 0, 0, div1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( device::computeSigmaDFPFHBlock), dim3(sigGrid),dim3(sigmaBlock1.dx), 0, 0, sigmaBlock1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( device::computeSigmaDFPFH), dim3(dim3(1,1,n_view)),dim3(sigma1.dx), 0, 0, sigma1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); if(copyIdx==il) thrust::copy(d_dfpfh1.data(),d_dfpfh1.data()+d_dfpfh1.size(),dfpfh_ptr); } else { sdpfh2.radius = radii[il]; hipLaunchKernelGGL(( device::computeSDPFH), dim3(grid),dim3(block), 0, 0, sdpfh2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); dfpfh2.radius = radii[il]; hipLaunchKernelGGL(( device::computeDFPFH), dim3(grid),dim3(block), 0, 0, dfpfh2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( device::computeBlockMeanDFPFH), dim3(meanPatchGrid),dim3(meanPatchBlock), 0, 0, meanBlock2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( device::computeMeanDFPFH), dim3(meanGrid),dim3(mean1.dx), 0, 0, mean2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( device::computeDivDFPFH), dim3(divGrid),dim3(div1.dx), 0, 0, div2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( device::computeSigmaDFPFHBlock), dim3(sigGrid),dim3(sigmaBlock1.dx), 0, 0, sigmaBlock2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( device::computeSigmaDFPFH), dim3(dim3(1,1,n_view)),dim3(sigma1.dx), 0, 0, sigma2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); if(copyIdx==il) thrust::copy(d_dfpfh2.data(),d_dfpfh2.data()+d_dfpfh2.size(),dfpfh_ptr); } hipLaunchKernelGGL(( device::computePersistanceDFPFHFeatures), dim3(dim3((640*480)/persistance.dx,1,n_view)),dim3(persistance.dx), 0, 0, persistance); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); if(outputlevel>2) printf("persistence: %d \n",il); } // thrust::host_vector<int> h_persistance_map = d_persistance_map; thrust::device_ptr<int> last = thrust::remove(d_persistance_map.data(),d_persistance_map.data()+d_persistance_map.size(),-1); // thrust::device_vector<int> d_clearedPersiatnceMap(d_persistance_map.data(),last); // thrust::device_ptr idxLength_ptr = thrust::device_pointer_cast((unsigned int *)getTargetDataPointer(PDFPFHIdxLength)); thrust::device_vector<unsigned int> d_persistanceMapLength(n_view); // printf("d_clearedPersiatnceMap.size(): %d \n",d_clearedPersiatnceMap.size()); // device::PersistanceMapLength mapLength; // mapLength.input_persistanceMap = thrust::raw_pointer_cast(d_clearedPersiatnceMap.data()); // mapLength.output_persistanceMapLenth = thrust::raw_pointer_cast(d_persistanceMapLength.data()); // mapLength.length = d_clearedPersiatnceMap.size(); // device::computePersistanceMapLength<<< ((d_clearedPersiatnceMap.size()-1)/mapLength.dx+1),mapLength.dx >>>(mapLength); // checkCudaErrors(hipGetLastError()); // checkCudaErrors(hipDeviceSynchronize()); unsigned int persistanceMapLength = (unsigned int) (last - d_persistance_map.data()); device::PersistanceMapLength mapLength; mapLength.input_persistanceMap = thrust::raw_pointer_cast(d_persistance_map.data()); mapLength.output_persistanceMapLenth = thrust::raw_pointer_cast(d_persistanceMapLength.data()); mapLength.length = persistanceMapLength; hipLaunchKernelGGL(( device::computePersistanceMapLength), dim3(((persistanceMapLength-1)/mapLength.dx+1)),dim3(mapLength.dx) , 0, 0, mapLength); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::device_ptr<int> idxList_ptr = thrust::device_pointer_cast((int *)getTargetDataPointer(PDFPFHIdxList)); thrust::copy(d_persistance_map.data(),d_persistance_map.data()+mapLength.length,idxList_ptr); thrust::device_ptr<unsigned int> idxLength_ptr = thrust::device_pointer_cast((unsigned int *)getTargetDataPointer(PDFPFHIdxLength)); thrust::copy(d_persistanceMapLength.data(),d_persistanceMapLength.data()+n_view,idxLength_ptr); // bool cont = true; if(outputlevel>1) { printf("length: %d -> %d \n",d_persistance_map.size(),persistanceMapLength); } thrust::host_vector<unsigned int> h_persistanceMapLength = d_persistanceMapLength; for(int l=0;l<h_persistanceMapLength.size();l++) { int length = h_persistanceMapLength.data()[l]; if(length==0) Processor::breakTry(); if(outputlevel>1) printf("%d -> %d \n",l,length); } if(outputlevel>1) { thrust::host_vector<int> h_clearedPersiatnceMap = d_persistance_map; int *dataLength = h_clearedPersiatnceMap.data(); int lidx = h_persistanceMapLength.data()[0]; printf("%d %d %d \n",dataLength[lidx-1]/(640*480),dataLength[lidx]/(640*480),dataLength[lidx+1]/(640*480)); } char path[50]; bool output = outputlevel>1; if(output) { thrust::host_vector<int> h_persistance_map = d_persistance_map; // // thrust::device_ptr<int> last = thrust::remove(d_persistance_map.data(),d_persistance_map.data()+d_persistance_map.size(),-1); // int length_pMap = last - d_persistance_map.data(); // printf("length pMap: %d \n",length_pMap); thrust::device_vector<int> d_clearedPersiatnceMap(d_persistance_map.data(),last); thrust::host_vector<int> h_clearedPersiatnceMap = d_clearedPersiatnceMap; thrust::device_ptr<float4> dptr = thrust::device_pointer_cast(dfpfh1.input_pos); thrust::device_vector<float4> d_pos(dptr,dptr+n_view*640*480); thrust::host_vector<float4> h_pos = d_pos; float4 *pos = h_pos.data(); int *persistance_map = h_persistance_map.data(); uchar4 *h_map = (uchar4 *)malloc(640*480*sizeof(uchar4)); int *clearedData = h_clearedPersiatnceMap.data(); for(int v=0;v<n_view;v++) { for(int i=0;i<640*480;i++) { unsigned char g = (pos[v*640*480+i].z/10000.f)*255.f; // if(persistance_map[v*640*480+i]>=0) // { // if(persistance_map[v*640*480+i]!=v*640*480+i) // printf("%d != %d \n",persistance_map[v*640*480+i],v*640*480+i); // // g=255; // } h_map[i] = make_uchar4(g,g,g,128); } for(int i=0;i<h_clearedPersiatnceMap.size();i++) { int idx = clearedData[i]; if(idx >= 0) { unsigned int vi = idx/(640*480); if(vi==v) h_map[idx-v*640*480] = make_uchar4(255,0,0,128); } else printf("oOOOO \n"); } sprintf(path,"/home/avo/pcds/persistance_r0_r1%d.ppm",v); sdkSavePPM4ub(path,(unsigned char*)h_map,640,480); } } printf("done dfpfh! \n"); } DFPFHEstimator::DFPFHEstimator(unsigned int n_view,unsigned int outputlevel): n_view(n_view), outputlevel(outputlevel) { DeviceDataParams paramsDFPFH; paramsDFPFH.elements = 640*480*n_view; paramsDFPFH.element_size = (sdpfh1.bins_n_meta) * sizeof(float); paramsDFPFH.elementType = FLOAT1; paramsDFPFH.dataType = Histogramm; addTargetData(addDeviceDataRequest(paramsDFPFH),DFPFHistogram); DeviceDataParams paramsPersistenceIdxList; paramsPersistenceIdxList.elements = 640*480*n_view; paramsPersistenceIdxList.element_size = sizeof(int); paramsPersistenceIdxList.elementType = INT1; paramsPersistenceIdxList.dataType = Indice; addTargetData(addDeviceDataRequest(paramsPersistenceIdxList),PDFPFHIdxList); DeviceDataParams paramsPersistenceIdxLength; paramsPersistenceIdxLength.elements = n_view; paramsPersistenceIdxLength.element_size = sizeof(unsigned int); paramsPersistenceIdxLength.elementType = UINT1; paramsPersistenceIdxLength.dataType = MetaData; addTargetData(addDeviceDataRequest(paramsPersistenceIdxLength),PDFPFHIdxLength); } DFPFHEstimator::~DFPFHEstimator() { // TODO Auto-generated destructor stub } void DFPFHEstimator::TestDFPFHE() { thrust::host_vector<float4> h_inp_pos(n_view*640*480); thrust::host_vector<float4> h_inp_normals(n_view*640*480); for(int i=0;i<h_inp_pos.size();i++) { unsigned int v = i/(640*480); unsigned int y = (i-v*(640*480))/640; unsigned int x = i-v*(640*480)-y*640; h_inp_pos[i] = make_float4(x,y,1000+v,0); device::setValid(h_inp_pos[i].w); device::setForeground(h_inp_pos[i].w); // device::unsetReconstructed(h_inp_pos[i].w); device::setReconstructed(h_inp_pos[i].w,0); h_inp_normals[i] = make_float4(1,0,0,0); } thrust::device_vector<float4> d_inp_pos = h_inp_pos; thrust::device_vector<float4> d_inp_normal = h_inp_normals; thrust::device_vector<float> d_dspfph(n_view*640*480*sdpfh1.bins_n_meta); sdpfh1.input_pos = thrust::raw_pointer_cast(d_inp_pos.data()); sdpfh1.input_normals = thrust::raw_pointer_cast(d_inp_normal.data()); sdpfh1.output_bins = thrust::raw_pointer_cast(d_dspfph.data()); sdpfh1.view = 0; sdpfh1.radius = 15.0f; sdpfh1.invalidToNormalPointRatio = 0.5; printf("dspdf \n"); dim3 block(sdpfh1.dx); dim3 grid(640*480/sdpfh1.points_per_block,1,1); hipLaunchKernelGGL(( device::computeSDPFH), dim3(grid),dim3(block), 0, 0, sdpfh1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); // thrust::host_vector<float> h_out_histo = d_dspfph; // float *data = h_out_histo.data(); // for(int i=0;i<640*480*n_view;i+=10000) // { // if(data[i*33]!=-1) // { // printf("(%d) ",i); // for(int j=0;j<3;j++) // { // for(int h=0;h<11;h++) // { // printf("%f ",data[i*33+j*11+h]); // // } // printf(" || "); // } // printf("\n"); // } // } thrust::device_vector<float> d_dfpfh(n_view*640*480*dfpfh1.bins_n_meta); dfpfh1.input_pos = thrust::raw_pointer_cast(d_inp_pos.data()); dfpfh1.input_bins_sdfpfh = thrust::raw_pointer_cast(d_dspfph.data()); dfpfh1.output_bins = thrust::raw_pointer_cast(d_dfpfh.data()); dfpfh1.radius = 15.f; dfpfh1.maxReconstructuionLevel = 0; printf("dfpdf \n"); hipLaunchKernelGGL(( device::computeDFPFH), dim3(grid),dim3(block), 0, 0, dfpfh1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::host_vector<float4> h_pos_test = d_inp_pos; for(int i=0;i<33;i++) { unsigned int oy = i/640; unsigned int ox = i - oy*640; printf("(%d %d) %f %f %f %f \n",ox,oy,h_pos_test[i].x,h_pos_test[i].y,h_pos_test[i].z,h_pos_test[i].w); } thrust::host_vector<float> h_out_dfpfh = d_dfpfh; printf("dfpfh.size: %d \n",h_out_dfpfh.size()); float *data = h_out_dfpfh.data(); // for(int v=0;v<n_view;v++) // { // for(int i=0;i<640*480;i+=10000) // { // if(data[v*640*480*(sdpfhEstimator1.bins_n_meta)+sdpfhEstimator1.bins*640*480+i]!=0) // { // printf("(%d) ",i); // for(int j=0;j<3;j++) // { // for(int h=0;h<11;h++) // { // // printf("%f ",data[i*33+j*11+h]); // //TODO // printf("%f ",data[v*640*480*(sdpfhEstimator1.bins_n_meta)+(j*11+h)*640*480]); // // } // printf(" || "); // } // printf("\n"); // } // } // } for(int v=0;v<1;v++) { for(int i=0;i<5;i+=1) { for(int j=0;j<3;j++) { for(int h=0;h<11;h++) { // printf("%f ",data[i*33+j*11+h]); //TODO printf("%f ",data[v*640*480*(sdpfh1.bins_n_meta)+(j*11+h)*640*480+i]); } printf(" || "); } printf("|| meta: %f \n",data[v*640*480*(sdpfh1.bins_n_meta)+sdpfh1.bins*640*480+i]); } } // thrust::host_vector<float> h_test_dfpfh(n_view*640*480*blockMean1.bins_n_meta); // for(int i=0;i<blockMean1.bins;i++) // for(int p=0;p<640*480;p++) // h_test_dfpfh[i*640*480+p] = i+1; // // for(int p=0;p<640*480;p++) // h_test_dfpfh[blockMean1.bins*640*480+p] = 1; // // thrust::device_vector<float> d_test_dfpfh = h_test_dfpfh; // blockMean1.input_bins = thrust::raw_pointer_cast(d_test_dfpfh.data()); meanBlock1.input_bins = dfpfh1.output_bins; thrust::device_vector<float> d_test_output((640*480)/meanBlock1.dx * meanBlock1.bins_n_meta); meanBlock1.output_block_mean = thrust::raw_pointer_cast(d_test_output.data()); dim3 meanBlock(meanBlock1.dx); dim3 meanGrid(640*480/meanBlock1.dx,1,n_view); hipLaunchKernelGGL(( device::computeBlockMeanDFPFH), dim3(meanGrid),dim3(meanBlock), 0, 0, meanBlock1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::host_vector<float> h_out_mean_dfpfh = d_test_output; printf("dfpfh.size: %d meanGrid: %d \n",h_out_mean_dfpfh.size(),meanGrid.x); float *data3 = h_out_mean_dfpfh.data(); // printf("test: %f \n",data3[0]); for(int v=0;v<1;v++) { for(int i=0;i<1;i++) { for(int j=0;j<3;j++) { for(int h=0;h<11;h++) { // printf("%f ",data[i*33+j*11+h]); //TODO printf("%f ",data3[v*meanGrid.x*meanBlock1.bins_n_meta+(j*11+h)*meanGrid.x+i]); } printf(" || "); } printf("|| meta: %f \n",data3[v*meanGrid.x*(meanBlock1.bins_n_meta)+meanBlock1.bins*meanGrid.x+i]); } } thrust::device_vector<float> d_outputMean(n_view*mean1.bins_n_meta); mean1.output_block_mean = thrust::raw_pointer_cast(d_outputMean.data()); mean1.input_bins = meanBlock1.output_block_mean; mean1.length = meanGrid.x; hipLaunchKernelGGL(( device::computeMeanDFPFH), dim3(n_view),dim3(mean1.dx), 0, 0, mean1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::host_vector<float> h_meanHisto = d_outputMean; float *data4 = h_meanHisto.data(); for(int v=0;v<1;v++) { printf("mean: "); for(int i=0;i<mean1.bins_n_meta;i++) { if(i>0 && i%mean1.bins_per_feature==0) printf("|| "); printf("%f ",data4[v*mean1.bins_n_meta+i]); } printf("\n"); } thrust::device_vector<float> d_div(n_view*640*480); div1.input_dfpfh_bins = dfpfh1.output_bins; div1.input_mean_bins = mean1.output_block_mean; div1.output_div = thrust::raw_pointer_cast(d_div.data()); hipLaunchKernelGGL(( device::computeDivDFPFH), dim3(((640*480)/div1.dx)),dim3(div1.dx), 0, 0, div1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::device_vector<float> d_sigma_block((640*480)/sigmaBlock1.dx); sigmaBlock1.input_div = div1.output_div; sigmaBlock1.output_div_block = thrust::raw_pointer_cast(d_sigma_block.data()); sigmaBlock1.length = 640*480; // dim3 sigBlock((640*480)/sigma1.dx); dim3 sigGrid((640*480)/(sigmaBlock1.dx*2),1,n_view); // dim3 sigGrid(128,1,n_view); hipLaunchKernelGGL(( device::computeSigmaDFPFHBlock), dim3(sigGrid),dim3(sigmaBlock1.dx), 0, 0, sigmaBlock1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); thrust::device_vector<float> d_sigma(n_view); sigma1.input_sig_block = sigmaBlock1.output_div_block; sigma1.input_mean_data = mean1.output_block_mean; sigma1.output_sigmas = thrust::raw_pointer_cast(d_sigma.data()); sigma1.length = sigGrid.x; hipLaunchKernelGGL(( device::computeSigmaDFPFH), dim3(n_view),dim3(sigma1.dx), 0, 0, sigma1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); // thrust::device_vector<int> persistance_map(n_view*640*480); // persistance.input_bins_n_meta1 }
63dcd80937db97b7d21351eaee7a9f9a4161d58d.cu
/* * FPFHEstimator2.cpp * * Created on: Sep 25, 2012 * Author: avo */ #include "DFPFHEstimator.h" #include <helper_cuda.h> #include <helper_image.h> #include <thrust/remove.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "point_info.hpp" #include "../include/processor.h" #include "../sink/pcd_io.h" #include "utils.hpp" #include "device_utils.hpp" namespace device { struct DFPFHBaseKernel : public FeatureBaseKernel { enum { rx = 5, ry = rx, lx = 2*rx+1, ly = 2*ry+1, }; }; struct SDPFHEstimator : public DFPFHBaseKernel { enum { points_per_block = 32, dx = points_per_block * WARP_SIZE, dy = 1, rx = 10, ry = rx, lx = 2*rx+1, ly = 2*ry+1, }; float4 *input_pos; float4 *input_normals; float *output_bins; unsigned int view; float radius; float invalidToNormalPointRatio; __device__ __forceinline__ void operator () () const { __shared__ float4 shm_pos[points_per_block]; __shared__ float4 shm_normal[points_per_block]; __shared__ float shm_histo[points_per_block*bins]; __shared__ unsigned int shm_off[points_per_block*2]; __shared__ unsigned int shm_buffer[dx*dy]; unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; unsigned int wid = tid / WARP_SIZE; unsigned int wtid = tid - wid * WARP_SIZE; // unsigned int oy = (blockIdx.x * points_per_block + wid)/640; // unsigned int ox = (blockIdx.x * points_per_block + wid) - oy*640; int ox,oy; if(tid<points_per_block) { shm_off[tid] = oy = (blockIdx.x * points_per_block + tid)/640; shm_off[points_per_block+tid] = ox = (blockIdx.x * points_per_block + tid) - oy*640; shm_pos[tid] = input_pos[blockIdx.z*640*480+oy*640+ox]; shm_normal[tid] = input_normals[blockIdx.z*640*480+oy*640+ox]; } for(int i=tid;i<bins*points_per_block;i+=dx) { shm_histo[i] = 0.f; } __syncthreads(); if(shm_pos[wid].z==0 || isBackground(shm_pos[wid].w)) { for(int i=wtid;i<bins;i+=WARP_SIZE) { // output_bins[(blockIdx.z*640*480+blockIdx.x*points_per_block+wid)*bins_n_meta+i] = -1; output_bins[640*480*blockIdx.z*bins_n_meta + i*640*480 + blockIdx.x*points_per_block + wid] = 0; } if(wtid==0) { // output_bins[(blockIdx.z*640*480+blockIdx.x*points_per_block+wid)*bins_n_meta+bins] = 0; output_bins[640*480*blockIdx.z*bins_n_meta + 640*480*bins + blockIdx.x*points_per_block + wid] = 0; } return; } // for(int i=wtid;i<bins;i+=WARP_SIZE) // { // shm_histo[] // } // ox = shm_off[wid]; // oy = shm_off[points_per_block+wid]; __syncthreads(); unsigned int point_count = 0; unsigned int invalid_points = 0; for(int j=wtid;j<lx*ly;j+=WARP_SIZE) { __syncthreads(); oy = j/ly; ox = j - oy*ly; oy = shm_off[wid] - ry + oy; ox = shm_off[points_per_block+wid] - rx + ox; if(oy<0 || oy>=480 || ox <0 || ox>=640 || j==(lx*ly)/2) continue; float4 p = input_pos[blockIdx.z*640*480+oy*640+ox]; float4 n = input_normals[blockIdx.z*640*480+oy*640+ox]; if(!isValid(p.w)) { invalid_points++; continue; } if(lengthf43(minusf43(shm_pos[wid],p))>radius || n.w<0) { continue; } float3 ps,pt,ns,nt; if( dotf43(shm_normal[wid], minusf43(p,shm_pos[wid]) ) <= dotf43(n, minusf43(shm_pos[wid],p) ) ) { ps = fetch(shm_pos[wid]); ns = fetch(shm_normal[wid]); pt = fetch(p); nt = fetch(n); } else { ps = fetch(p); ns = fetch(n); pt = fetch(shm_pos[wid]); nt = fetch(shm_normal[wid]); } // float3 u = ns; float3 d = pt-ps; float3 v = cross(d,ns); // float3 w = cross(ns,v); float vn = norm(v); if(vn==0.f || norm(d)==0.f) { invalid_points++; continue; } point_count++; vn = 1.f/vn; v = v * vn; unsigned int idx = 0; float f = dot(v,nt); idx = bins_per_feature * ((f + 1.0f) * 0.5f); idx = min(idx,bins_per_feature-1) + 0*bins_per_feature + wid*bins; atomicAdd(&shm_histo[idx], 1.f); f = dot(ns,d)/norm(d); idx = bins_per_feature * ((f + 1.0f) * 0.5f); idx = min(idx,bins_per_feature-1) + 1*bins_per_feature + wid*bins; atomicAdd(&shm_histo[idx], 1.f); // v <- w v = cross(ns,v); f = atan2f(dot(v,nt),dot(ns,nt)); idx = bins_per_feature * ((f + M_PI) * (1.0f / (2.0f * M_PI))); idx = min(idx,bins_per_feature-1) + 2*bins_per_feature + wid*bins; atomicAdd(&shm_histo[idx], 1.f); } __syncthreads(); // shm_buffer[wid*WARP_SIZE+wtid] = point_count; volatile unsigned int *sbuf = &shm_buffer[wid*WARP_SIZE]; sbuf[wtid] = point_count; if(wtid < 16) { sbuf[wtid] += sbuf[wtid + 16]; sbuf[wtid] += sbuf[wtid + 8]; sbuf[wtid] += sbuf[wtid + 4]; sbuf[wtid] += sbuf[wtid + 2]; sbuf[wtid] += sbuf[wtid + 1]; } __syncthreads(); point_count = shm_buffer[wid*WARP_SIZE]; sbuf[wtid] = invalid_points; if(wtid < 16) { sbuf[wtid] += sbuf[wtid + 16]; sbuf[wtid] += sbuf[wtid + 8]; sbuf[wtid] += sbuf[wtid + 4]; sbuf[wtid] += sbuf[wtid + 2]; sbuf[wtid] += sbuf[wtid + 1]; } __syncthreads(); invalid_points = shm_buffer[wid*WARP_SIZE]; // if((((float)invalid_points)/((float)point_count)) > 0.5f) // { // printf("(%d/%d) %d / %d = %f \n",wid,wtid,point_count,invalid_points,(((float)invalid_points)/((float)point_count))); // } __syncthreads(); bool vali = ( point_count > 0 && (((float)invalid_points)/((float)point_count)) < invalidToNormalPointRatio); for(int i=wtid;i<bins;i+=WARP_SIZE) { float re = (vali)?shm_histo[wid*bins+i]/point_count:0; // output_bins[(view*640*480+blockIdx.x*points_per_block+wid)*bins+i] = re; // output_bins[640*480*view*bins+i*640*480+blockDim.x*points_per_block + blockIdx.x*points_per_block + wid] = re; output_bins[640*480*blockIdx.z*bins_n_meta + i*640*480 + blockIdx.x*points_per_block + wid] = re; // output_bins[(blockIdx.z*640*480+blockIdx.x*points_per_block+wid)*bins_n_meta+i] = re; } if(wtid==0) { output_bins[640*480*blockIdx.z*bins_n_meta + bins*640*480 + blockIdx.x*points_per_block + wid] = vali; // output_bins[(blockIdx.z*640*480+blockIdx.x*points_per_block+wid)*bins_n_meta+bins] = vali; } } }; __global__ void computeSDPFH(const SDPFHEstimator sdpfhe){ sdpfhe(); } struct DFPFHEstimator : public DFPFHBaseKernel { enum { points_per_block = 32, dx = points_per_block * WARP_SIZE, dy = 1, }; float4 *input_pos; // float4 *input_normals; float *input_bins_sdfpfh; float *output_bins; float radius; int maxReconstructuionLevel; template<typename T> __device__ __forceinline__ void sum(volatile T* smem_buffer,int wtid) const { // T reg = smem_buffer[wtid]; if(wtid<16) { smem_buffer[wtid] += smem_buffer[wtid + 16]; smem_buffer[wtid] += smem_buffer[wtid + 8]; smem_buffer[wtid] += smem_buffer[wtid + 4]; smem_buffer[wtid] += smem_buffer[wtid + 2]; smem_buffer[wtid] += smem_buffer[wtid + 1]; } } __device__ __forceinline__ void operator () () const { __shared__ float4 shm_pos[points_per_block]; // __shared__ float4 shm_normal[points_per_block]; __shared__ float shm_histo[points_per_block*bins_n_meta]; __shared__ unsigned int shm_off[points_per_block*2]; __shared__ float shm_hist_buffer[dx*dy]; unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; unsigned int wid = tid / WARP_SIZE; unsigned int wtid = tid - wid * WARP_SIZE; int ox,oy; if(tid<points_per_block) { shm_off[tid] = oy = (blockIdx.x * points_per_block + tid)/640; shm_off[points_per_block+tid] = ox = (blockIdx.x * points_per_block + tid) - oy*640; shm_pos[tid] = input_pos[blockIdx.z*640*480+oy*640+ox]; } for(int i=tid;i<bins_n_meta*points_per_block;i+=dx) { shm_histo[i] = 0.f; } __syncthreads(); // if(tid==1 && blockIdx.x > 4110 && blockIdx.x < 4115 && blockIdx.z==0) // { // printf("%d/%d -> %d/%d -> %f %f %f %f -> %d %d (%d>%d)->%d %d \n",blockIdx.x,tid,shm_off[tid],shm_off[tid+points_per_block],shm_pos[tid].x,shm_pos[tid].y,shm_pos[tid].z,shm_pos[tid].w,shm_pos[wid].z==0, input_bins_sdfpfh[640*480*blockIdx.z*bins_n_meta + 640*480*bins + blockIdx.x*points_per_block + wid]==0 , getReconstructionLevel(shm_pos[wid].w),maxReconstructuionLevel , getReconstructionLevel(shm_pos[wid].w)>maxReconstructuionLevel, !isForeground(shm_pos[wid].w) ); // // } if(!(shm_pos[wid].z==0 || input_bins_sdfpfh[640*480*blockIdx.z*bins_n_meta + 640*480*bins + blockIdx.x*points_per_block + wid]==0 || getReconstructionLevel(shm_pos[wid].w)>maxReconstructuionLevel || !isForeground(shm_pos[wid].w))) { // unsigned int point_count = 0; // unsigned int invalid_points = 0; unsigned int warpRuns = (lx*ly-1)/WARP_SIZE +1; // if(blockIdx.z==0 && blockIdx.x==0 && tid==0) // printf("warpRuns: %d | lx*ly: %d \n",warpRuns,lx*ly); for(int r=0;r<warpRuns;r++) { unsigned int j = r*WARP_SIZE+wtid; // __syncthreads(); oy = j/ly; ox = j - oy*ly; oy = shm_off[wid] - ry + oy; ox = shm_off[points_per_block+wid] - rx + ox; bool load = false; float weight = 1.f; if(!(oy<0 || oy>=480 || ox <0 || ox>=640 || j==(lx*ly)/2 || j>=lx*ly)) { float4 p = input_pos[blockIdx.z*640*480+oy*640+ox]; // if(!isValid(p.w)) // { // invalid_points++; // continue; // } float dis; if((dis=lengthf43(minusf43(shm_pos[wid],p)))<=radius) { load = true; weight = 1.f/(dis/1000.f); } } //put in warp buffer and reduce for(int b=0;b<bins_n_meta;b++) { // shm_hist_buffer[wid*WARP_SIZE+j] = (load)?input_bins_sdfpfh[640*480*blockIdx.z*bins_n_meta + b*640*480 + oy*640+ox]:0.f; shm_hist_buffer[wid*WARP_SIZE+wtid] = (load)?weight*input_bins_sdfpfh[640*480*blockIdx.z*bins_n_meta + b*640*480 + oy*640+ox]:0.f; volatile float *smem_buffer = &shm_hist_buffer[wid*WARP_SIZE]; sum(smem_buffer,wtid); if(wtid==0) { // if(blockIdx.z==0 && blockIdx.x==0 && tid==0) // printf("shm_hist[0]: %f | smem_buffer[0]: %f \n",shm_histo[b*points_per_block+wid],smem_buffer[wtid]); shm_histo[b*points_per_block+wid] += smem_buffer[wtid]; } } } } __syncthreads(); for(int i=tid;i<points_per_block*bins_n_meta;i+=dx*dy) { unsigned int oid = i/points_per_block; unsigned int otid = i - oid*points_per_block; // if(oid<bins) output_bins[blockIdx.z*640*480*bins_n_meta+oid*640*480+blockIdx.x*points_per_block+otid] = (shm_histo[points_per_block*bins+otid]>0.f)?shm_histo[i]/shm_histo[points_per_block*bins+otid]:0.f; // else // output_bins[blockIdx.z*640*480*bins_n_meta+oid*640*480+blockIdx.x*points_per_block+otid] = shm_histo[i]; } } }; __global__ void computeDFPFH(const DFPFHEstimator dfpfhe){ dfpfhe(); } struct MeanDFPFHBlockEstimator : public DFPFHBaseKernel { enum { dx = 1024, dy = 1, max_shared = 11*1024, // 20kb shared_lines = max_shared/dx, shared_buffer = shared_lines * dx }; float *input_bins; float *output_block_mean; __device__ __forceinline__ void operator () () const { __shared__ float shm_count[dx]; __shared__ float shm_buffer[shared_buffer]; unsigned int tid = threadIdx.x; shm_count[tid] = input_bins[blockIdx.z*640*480*bins_n_meta+bins*640*480+blockIdx.x*dx+tid]; __syncthreads(); reduceBlock<dx>(shm_count); __syncthreads(); if(tid==0) { output_block_mean[blockIdx.z*gridDim.x*bins_n_meta + bins*gridDim.x+blockIdx.x] = shm_count[0];// (shm_count[0]>0)?1:0; } if(shm_count[0]<=0) return; // if(blockIdx.x==0 && tid==0) // { // printf("count %f \n",shm_count[0]); // output_block_mean[0] = shm_count[0]; // } // shm_buffer[threadIdx.x] = 1; // shm_buffer[dx+threadIdx.x] = 2; unsigned int loops = (bins-1)/shared_lines +1; for(int l=0;l<loops;l++) { unsigned int lines = ((l+1)*shared_lines<bins)?shared_lines:bins-l*shared_lines; for(int i=0;i<lines;i++) { shm_buffer[i*dx+tid] = input_bins[blockIdx.z*640*480*bins_n_meta+(l*shared_lines+i)*640*480+blockIdx.x*dx+tid]; } __syncthreads(); reduceBlockNoReg<dx>(shm_buffer,lines); __syncthreads(); if(tid<lines) output_block_mean[blockIdx.z*gridDim.x*bins_n_meta+(l*shared_lines+tid)*gridDim.x+blockIdx.x] = shm_buffer[tid*dx];///shm_count[0]; // __syncthreads(); } // } // template<unsigned int blockSize, typename T> // __device__ __forceinline__ void reduceBlock(T* shm) const // { // // unsigned int tid = threadIdx.x; // float sum = shm[tid]; // if (blockSize >= 1024) { if(tid < 512) { shm[tid] = sum = sum + shm[tid + 512]; } __syncthreads(); } // if (blockSize >= 512) { if(tid < 256) { shm[tid] = sum = sum + shm[tid + 256]; } __syncthreads(); } // if (blockSize >= 256) { if(tid < 128) { shm[tid] = sum = sum + shm[tid + 128]; } __syncthreads(); } // if (blockSize >= 128) { if(tid < 64) { shm[tid] = sum = sum + shm[tid + 64]; } __syncthreads(); } // // if(tid < 32) // { // volatile T* smem = shm; // if (blockSize >= 64) { smem[tid] = sum = sum + smem[tid + 32]; } // if (blockSize >= 32) { smem[tid] = sum = sum + smem[tid + 16]; } // if (blockSize >= 16) { smem[tid] = sum = sum + smem[tid + 8]; } // if (blockSize >= 8) { smem[tid] = sum = sum + smem[tid + 4]; } // if (blockSize >= 4) { smem[tid] = sum = sum + smem[tid + 2]; } // if (blockSize >= 2) { smem[tid] = sum = sum + smem[tid + 1]; } // } // } // // template<unsigned int blockSize, typename T> // __device__ __forceinline__ void reduceBlockNoReg(T* shm) const // { // // unsigned int tid = threadIdx.x; // if (blockSize >= 1024) { if(tid < 512) { shm[tid] += shm[tid + 512]; } __syncthreads(); } // if (blockSize >= 512) { if(tid < 256) { shm[tid] += shm[tid + 256]; } __syncthreads(); } // if (blockSize >= 256) { if(tid < 128) { shm[tid] += shm[tid + 128]; } __syncthreads(); } // if (blockSize >= 128) { if(tid < 64) { shm[tid] += shm[tid + 64]; } __syncthreads(); } // // if(tid < 32) // { // volatile T* smem = shm; // if (blockSize >= 64) { smem[tid] += smem[tid + 32]; } // if (blockSize >= 32) { smem[tid] += smem[tid + 16]; } // if (blockSize >= 16) { smem[tid] += smem[tid + 8]; } // if (blockSize >= 8) { smem[tid] += smem[tid + 4]; } // if (blockSize >= 4) { smem[tid] += smem[tid + 2]; } // if (blockSize >= 2) { smem[tid] += smem[tid + 1]; } // } // } // // template<unsigned int blockSize, typename T> // __device__ __forceinline__ void reduceBlockNoReg(T* shm,int lines) const // { // // unsigned int tid = threadIdx.x; // if (blockSize >= 1024) { if(tid < 512) { for(int i=0;i<lines;i++){ shm[i*dx + tid] += shm[i*dx + tid + 512]; } } __syncthreads(); } // if (blockSize >= 512) { if(tid < 256) { for(int i=0;i<lines;i++){ shm[i*dx + tid] += shm[i*dx + tid + 256]; } } __syncthreads(); } // if (blockSize >= 256) { if(tid < 128) { for(int i=0;i<lines;i++){ shm[i*dx + tid] += shm[i*dx + tid + 128]; } } __syncthreads(); } // if (blockSize >= 128) { if(tid < 64) { for(int i=0;i<lines;i++){ shm[i*dx + tid] += shm[i*dx + tid + 64]; } } __syncthreads(); } // // if(tid < 32) // { // volatile T* smem = shm; // if (blockSize >= 64) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 32]; } } // if (blockSize >= 32) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 16]; } } // if (blockSize >= 16) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 8]; } } // if (blockSize >= 8) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 4]; } } // if (blockSize >= 4) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 2]; } } // if (blockSize >= 2) { for(int i=0;i<lines;i++){ smem[i*dx + tid] += smem[i*dx + tid + 1]; } } // } // } }; __global__ void computeBlockMeanDFPFH(const MeanDFPFHBlockEstimator blockMean){ blockMean(); } template<unsigned int blockSize> struct MeanDFPFHEstimator : public DFPFHBaseKernel { float *input_bins; float *output_block_mean; unsigned int length; enum { dx = blockSize, max_shared = 11*1024, // 11kb shared_lines = max_shared/dx, shared_buffer = shared_lines * dx }; __device__ __forceinline__ void operator () () const { __shared__ float shm_count[dx]; __shared__ float shm_buffer[shared_buffer]; unsigned int tid = threadIdx.x; unsigned int gridSize = dx*2*gridDim.x; // if(tid==0) // printf("block: %d \n",blockIdx.z); // // if(blockIdx.z==1 & tid==0) // { // for(int t=0;t<length;t+=50) // { // printf("inKernelCount(%d): %f \n",t,input_bins[blockIdx.z*length*bins_n_meta+bins*length+t]); // } // } float sum = 0.f; unsigned int i = blockIdx.x*dx*2 + tid; while(i<length) { sum += input_bins[blockIdx.z*length*bins_n_meta+bins*length+i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if ( (i + dx) < length) sum += input_bins[blockIdx.z*length*bins_n_meta+bins*length+i+dx]; i += gridSize; } shm_count[tid] = sum; __syncthreads(); reduceBlock<dx>(shm_count); __syncthreads(); if(tid==0) { output_block_mean[blockIdx.z*bins_n_meta+bins] = shm_count[0];// (shm_count[0]>0)?1.f:0.f; // printf("count: %f \n",shm_count[0]); } if(shm_count[0]<=0) return; unsigned int loops = (bins-1)/shared_lines +1; // if(threadIdx.x==0 && blockIdx.x==0) // printf("loops: %d \n",loops); for(int l=0;l<loops;l++) { unsigned int lines = ((l+1)*shared_lines<bins)?shared_lines:bins-l*shared_lines; for(int b=0;b<lines;b++) { sum = 0.f; i = blockIdx.x*dx*2 + threadIdx.x; while(i<length) { sum += input_bins[blockIdx.z*length*bins_n_meta+(l*shared_lines+b)*length+i]; if ( (i + dx) < length) sum += input_bins[blockIdx.z*length*bins_n_meta+(l*shared_lines+b)*length+i+dx]; i += gridSize; } shm_buffer[b*dx+tid] = sum; } __syncthreads(); reduceBlockNoReg<dx>(shm_buffer,lines); __syncthreads(); if(tid<lines) output_block_mean[blockIdx.z*bins_n_meta+(l*shared_lines+tid)] = shm_buffer[tid*dx]/shm_count[0]; } } }; __global__ void computeMeanDFPFH(const MeanDFPFHEstimator<1024> meandfpfhe){ meandfpfhe(); } __global__ void computeMeanDFPFH(const MeanDFPFHEstimator<512> meandfpfhe){ meandfpfhe(); } struct DivDFPFHEstimator: public DFPFHBaseKernel { enum { dx = 1024, }; float *input_dfpfh_bins; float *input_mean_bins; float *output_div; // __device__ __forceinline__ float // klDivergence(float *feature, float *mean, unsigned int bins_count, unsigned int offset_feature, unsigned int offset_mean) const // { // // float div = 0.f; // for(int i=0;i<bins_count;i++) // { // float p = feature[i*offset_feature]; // float m = mean[i*offset_mean]; // // if(p/m>0) // div += (p - m) * __logf(p/m); // } // // return div; // } // // __device__ __forceinline__ float // klEuclideanDivergence(float *feature, float *mean, unsigned int feature_count, unsigned int bin_count_per_feature, unsigned int offset_feature, unsigned int offset_mean) const // { // float div = 0.f; // // for(int f=0;f<feature_count;f++) // { // float tmpDiv = 0.f; // for(int i=0;i<bin_count_per_feature;i++) // { // float p = feature[(f*bin_count_per_feature+i)*offset_feature]; // float m = mean[(f*bin_count_per_feature+i)*offset_mean]; // // if(p/m>0) // tmpDiv += (p - m) * __logf(p/m); // } // div += (tmpDiv * tmpDiv); // } // // return sqrtf(div); // } __device__ __forceinline__ void operator () () const { __shared__ float shm_mean[bins]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*dx+tid; // unsigned int off = gridDim.x; if(tid<bins) shm_mean[tid] = input_mean_bins[blockIdx.z*bins_n_meta+tid]; __syncthreads(); // float div = klDivergence(&input_dfpfh_bins[blockIdx.z*640*480*bins_n_meta+i],shm_mean,bins,640*480,1); float div = klEuclideanDivergence(&input_dfpfh_bins[blockIdx.z*640*480*bins_n_meta+i],shm_mean,features,bins_per_feature,640*480,1); output_div[blockIdx.z*640*480+i] = div; } }; __global__ void computeDivDFPFH(const DivDFPFHEstimator de){ de(); } struct SigmaDFPFHBlock : public DFPFHBaseKernel { enum { dx = 1024, }; float *input_div; float *output_div_block; unsigned int length; __device__ __forceinline__ void operator () () const { __shared__ float shm_buffer[dx]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*dx*2 + threadIdx.x; unsigned int gridSize = dx*2*gridDim.x; float sum = 0.f; // float count = 0.f; while(i<length) { float tmp = input_div[blockIdx.z*640*480+i]; sum += tmp*tmp; if ((i+dx)<length) { tmp = input_div[blockIdx.z*640*480+i+dx]; sum += tmp * tmp; } i += gridSize; } shm_buffer[tid] = sum; __syncthreads(); reduceBlock<dx>(shm_buffer); __syncthreads(); if(tid==0) output_div_block[blockIdx.z*gridDim.x+blockIdx.x] = shm_buffer[0]; // if(tid==0) // printf("sigBlock: %d -> %f \n",blockIdx.x,shm_buffer[0]); } }; __global__ void computeSigmaDFPFHBlock(const SigmaDFPFHBlock sb){ sb(); } template<unsigned int blockSize> struct SigmaDFPFH : public DFPFHBaseKernel { enum { dx = blockSize, }; float *input_sig_block; float *input_mean_data; float *output_sigmas; unsigned int length; __device__ __forceinline__ void operator () () const { __shared__ float shm_buffer[dx]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*dx*2 + threadIdx.x; unsigned int gridSize = dx*2*gridDim.x; float sum = 0.f; while(i<length) { sum += input_sig_block[blockIdx.z*length+i]; if ((i+dx)<length) sum += input_sig_block[blockIdx.z*length+i+dx]; i += gridSize; } shm_buffer[tid] = sum; reduceBlock<dx>(shm_buffer); __syncthreads(); if(tid==0) { float points = input_mean_data[blockIdx.z*bins_n_meta+bins]; // printf("(%d) points: %f sigma: %f \n",blockIdx.z,points,sqrtf(shm_buffer[0]/points)); output_sigmas[blockIdx.z] = (points>0)?sqrtf(shm_buffer[0]/points):-1.f; } } }; __global__ void computeSigmaDFPFH(const SigmaDFPFH<512> sig){ sig(); } struct PersistanceDFPFHEstimator : public DFPFHBaseKernel { enum { dx = 1024, }; float beta; bool intersection; float *input_div1; float *input_div2; float *input_sigma1; float *input_sigma2; float *input_bins_n_meta1; float *input_bins_n_meta2; int *persistence_map; __device__ __forceinline__ void operator () () const { unsigned int tid = threadIdx.x; unsigned int vid = blockIdx.y*blockDim.x*blockDim.y+blockIdx.x*blockDim.x+tid; unsigned int gid = blockIdx.z*640*480+vid; int idx = -1; if( input_bins_n_meta1[blockIdx.z*640*480*bins_n_meta+bins*640*480+vid]>0 && input_bins_n_meta2[blockIdx.z*640*480*bins_n_meta+bins*640*480+vid]>0 ) { if( (input_div1[gid] > beta * input_sigma1[blockIdx.z]) && (input_div2[gid] > beta * input_sigma2[blockIdx.z]) ) { if(intersection) { if(persistence_map[gid] >= 0) idx = gid; } else idx = gid; } } persistence_map[gid] = idx; } }; __global__ void computePersistanceDFPFHFeatures(const PersistanceDFPFHEstimator pers){ pers(); } struct PersistanceIntersectionDFPFHEstimator : public DFPFHBaseKernel { enum { dx = 1024, }; float beta; unsigned int n_view; float **input_div; float **input_sigma; int output_persistence_map; __device__ __forceinline__ void operator () () const { } }; struct PersistanceMapLength { enum { dx = 1024, }; int *input_persistanceMap; unsigned int *output_persistanceMapLenth; unsigned int length; __device__ __forceinline__ void operator () () const { unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; if(i+1>length) { return; } unsigned int v1 = input_persistanceMap[i]; v1 /= (640*480); if(i+1==length) { output_persistanceMapLenth[v1] = i+1; return; } unsigned int v2 = input_persistanceMap[i+1]; v2 /= (640*480); if(v2>v1) output_persistanceMapLenth[v1] = i+1; } }; __global__ void computePersistanceMapLength(const PersistanceMapLength mapLength){ mapLength(); } } device::SDPFHEstimator sdpfh1; device::SDPFHEstimator sdpfh2; device::DFPFHEstimator dfpfh1; device::DFPFHEstimator dfpfh2; device::MeanDFPFHBlockEstimator meanBlock1; device::MeanDFPFHBlockEstimator meanBlock2; device::MeanDFPFHEstimator<512> mean1; device::MeanDFPFHEstimator<512> mean2; device::DivDFPFHEstimator div1; device::DivDFPFHEstimator div2; device::SigmaDFPFHBlock sigmaBlock1; device::SigmaDFPFHBlock sigmaBlock2; device::SigmaDFPFH<512> sigma1; device::SigmaDFPFH<512> sigma2; device::PersistanceDFPFHEstimator persistance; void DFPFHEstimator::init() { sdpfh1.input_pos = (float4 *)getInputDataPointer(PointCoordinates); sdpfh1.input_normals = (float4 *)getInputDataPointer(PointNormal); sdpfh2.input_pos = (float4 *)getInputDataPointer(PointCoordinates); sdpfh2.input_normals = (float4 *)getInputDataPointer(PointNormal); dfpfh1.input_pos = (float4 *)getInputDataPointer(PointCoordinates); dfpfh2.input_pos = (float4 *)getInputDataPointer(PointCoordinates); // dfpfh1.input_bins_sdfpfh = (float *)getTargetDataPointer(SDPFHistogram1); // dfpfh1.output_bins = (float *)getTargetDataPointer(DFPFHistogram1); } void DFPFHEstimator::execute() { unsigned int copyIdx = 1; unsigned int n_radii = 3; float radii[] = {10.f,15.f,20.f,25.f}; if(n_radii<2) { printf("Error not enough raddi for persistance analysis! \n"); exit(0); } thrust::device_vector<float> d_sdpfh1(n_view*640*480*sdpfh1.bins_n_meta); thrust::device_vector<float> d_sdpfh2(n_view*640*480*sdpfh2.bins_n_meta); sdpfh1.radius = radii[0]; sdpfh1.invalidToNormalPointRatio = 0.5; sdpfh1.output_bins = thrust::raw_pointer_cast(d_sdpfh1.data()); sdpfh2.radius = radii[1]; sdpfh2.invalidToNormalPointRatio = 0.5; sdpfh2.output_bins = thrust::raw_pointer_cast(d_sdpfh2.data()); dim3 block(sdpfh1.dx); dim3 grid(640*480/sdpfh1.points_per_block,1,n_view); device::computeSDPFH<<<grid,block>>>(sdpfh1); device::computeSDPFH<<<grid,block>>>(sdpfh2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::device_vector<float> d_dfpfh1(n_view*640*480*dfpfh1.bins_n_meta); thrust::device_vector<float> d_dfpfh2(n_view*640*480*sdpfh2.bins_n_meta); dfpfh1.radius = radii[0]; dfpfh1.maxReconstructuionLevel = 1; dfpfh1.input_bins_sdfpfh = sdpfh1.output_bins; dfpfh1.output_bins = thrust::raw_pointer_cast(d_dfpfh1.data()); dfpfh2.radius = radii[1]; dfpfh2.maxReconstructuionLevel = 1; dfpfh2.input_bins_sdfpfh = sdpfh2.output_bins; dfpfh2.output_bins = thrust::raw_pointer_cast(d_dfpfh2.data()); device::computeDFPFH<<<grid,block>>>(dfpfh1); device::computeDFPFH<<<grid,block>>>(dfpfh2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::device_ptr<float> dfpfh_ptr = thrust::device_pointer_cast( (float *) getTargetDataPointer(DFPFHistogram) ); if(copyIdx==0) thrust::copy(d_dfpfh1.data(),d_dfpfh1.data()+d_dfpfh1.size(),dfpfh_ptr); if(copyIdx==1) thrust::copy(d_dfpfh2.data(),d_dfpfh2.data()+d_dfpfh2.size(),dfpfh_ptr); thrust::device_vector<float> d_mean_block1((n_view*640*480)/meanBlock1.dx * meanBlock1.bins_n_meta); thrust::device_vector<float> d_mean_block2((n_view*640*480)/meanBlock2.dx * meanBlock2.bins_n_meta); meanBlock1.input_bins = dfpfh1.output_bins; meanBlock1.output_block_mean = thrust::raw_pointer_cast(d_mean_block1.data()); meanBlock2.input_bins = dfpfh2.output_bins; meanBlock2.output_block_mean = thrust::raw_pointer_cast(d_mean_block2.data()); dim3 meanPatchBlock(meanBlock1.dx); dim3 meanPatchGrid((640*480)/meanBlock1.dx,1,n_view); device::computeBlockMeanDFPFH<<<meanPatchGrid,meanPatchBlock>>>(meanBlock1); device::computeBlockMeanDFPFH<<<meanPatchGrid,meanPatchBlock>>>(meanBlock2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::device_vector<float> d_mean1(n_view*mean1.bins_n_meta); thrust::device_vector<float> d_mean2(n_view*mean2.bins_n_meta); mean1.input_bins = meanBlock1.output_block_mean; mean1.length = meanPatchGrid.x; mean1.output_block_mean = thrust::raw_pointer_cast(d_mean1.data()); mean2.input_bins = meanBlock2.output_block_mean; mean2.length = meanPatchGrid.x; mean2.output_block_mean = thrust::raw_pointer_cast(d_mean2.data()); dim3 meanGrid(1,1,n_view); device::computeMeanDFPFH<<<meanGrid,mean1.dx>>>(mean1); device::computeMeanDFPFH<<<meanGrid,mean2.dx>>>(mean2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::device_vector<float> d_div1(n_view*640*480); thrust::device_vector<float> d_div2(n_view*640*480); div1.input_dfpfh_bins = dfpfh1.output_bins; div1.input_mean_bins = mean1.output_block_mean; div1.output_div = thrust::raw_pointer_cast(d_div1.data()); div2.input_dfpfh_bins = dfpfh2.output_bins; div2.input_mean_bins = mean2.output_block_mean; div2.output_div = thrust::raw_pointer_cast(d_div2.data()); dim3 divGrid((640*480)/div1.dx,1,n_view); device::computeDivDFPFH<<<divGrid,div1.dx>>>(div1); device::computeDivDFPFH<<<divGrid,div2.dx>>>(div2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::device_vector<float> d_sigma_block1(n_view*(640*480)/sigmaBlock1.dx); thrust::device_vector<float> d_sigma_block2(n_view*(640*480)/sigmaBlock2.dx); sigmaBlock1.input_div = div1.output_div; sigmaBlock1.output_div_block = thrust::raw_pointer_cast(d_sigma_block1.data()); sigmaBlock1.length = 640*480; sigmaBlock2.input_div = div2.output_div; sigmaBlock2.output_div_block = thrust::raw_pointer_cast(d_sigma_block2.data()); sigmaBlock2.length = 640*480; dim3 sigGrid((640*480)/(sigmaBlock1.dx*2),1,n_view); device::computeSigmaDFPFHBlock<<<sigGrid,sigmaBlock1.dx>>>(sigmaBlock1); device::computeSigmaDFPFHBlock<<<sigGrid,sigmaBlock1.dx>>>(sigmaBlock2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::device_vector<float> d_sigma1(n_view); thrust::device_vector<float> d_sigma2(n_view); sigma1.input_sig_block = sigmaBlock1.output_div_block; sigma1.input_mean_data = mean1.output_block_mean; sigma1.output_sigmas = thrust::raw_pointer_cast(d_sigma1.data()); sigma1.length = sigGrid.x; sigma2.input_sig_block = sigmaBlock2.output_div_block; sigma2.input_mean_data = mean2.output_block_mean; sigma2.output_sigmas = thrust::raw_pointer_cast(d_sigma2.data()); sigma2.length = sigGrid.x; device::computeSigmaDFPFH<<<dim3(1,1,n_view),sigma1.dx>>>(sigma1); device::computeSigmaDFPFH<<<dim3(1,1,n_view),sigma2.dx>>>(sigma2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::device_vector<int> d_persistance_map(n_view*640*480); persistance.input_div1 = div1.output_div; persistance.input_div2 = div2.output_div; persistance.input_sigma1 = sigma1.output_sigmas; persistance.input_sigma2 = sigma2.output_sigmas; persistance.input_bins_n_meta1 = dfpfh1.output_bins; persistance.input_bins_n_meta2 = dfpfh2.output_bins; persistance.persistence_map = thrust::raw_pointer_cast(d_persistance_map.data()); persistance.beta = 1.f; persistance.intersection = true; if(persistance.intersection) cudaMemset(persistance.persistence_map,0,n_view*640*480); device::computePersistanceDFPFHFeatures<<<dim3((640*480)/persistance.dx,1,n_view),persistance.dx>>>(persistance); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); if(outputlevel>2) printf("persistence: 1 \n"); for(int il=2;il<n_radii;il++) { bool toggle = (il%2==0); if(toggle) { sdpfh1.radius = radii[il]; device::computeSDPFH<<<grid,block>>>(sdpfh1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); dfpfh1.radius = radii[il]; device::computeDFPFH<<<grid,block>>>(dfpfh1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); device::computeBlockMeanDFPFH<<<meanPatchGrid,meanPatchBlock>>>(meanBlock1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); device::computeMeanDFPFH<<<meanGrid,mean1.dx>>>(mean1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); device::computeDivDFPFH<<<divGrid,div1.dx>>>(div1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); device::computeSigmaDFPFHBlock<<<sigGrid,sigmaBlock1.dx>>>(sigmaBlock1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); device::computeSigmaDFPFH<<<dim3(1,1,n_view),sigma1.dx>>>(sigma1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); if(copyIdx==il) thrust::copy(d_dfpfh1.data(),d_dfpfh1.data()+d_dfpfh1.size(),dfpfh_ptr); } else { sdpfh2.radius = radii[il]; device::computeSDPFH<<<grid,block>>>(sdpfh2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); dfpfh2.radius = radii[il]; device::computeDFPFH<<<grid,block>>>(dfpfh2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); device::computeBlockMeanDFPFH<<<meanPatchGrid,meanPatchBlock>>>(meanBlock2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); device::computeMeanDFPFH<<<meanGrid,mean1.dx>>>(mean2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); device::computeDivDFPFH<<<divGrid,div1.dx>>>(div2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); device::computeSigmaDFPFHBlock<<<sigGrid,sigmaBlock1.dx>>>(sigmaBlock2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); device::computeSigmaDFPFH<<<dim3(1,1,n_view),sigma1.dx>>>(sigma2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); if(copyIdx==il) thrust::copy(d_dfpfh2.data(),d_dfpfh2.data()+d_dfpfh2.size(),dfpfh_ptr); } device::computePersistanceDFPFHFeatures<<<dim3((640*480)/persistance.dx,1,n_view),persistance.dx>>>(persistance); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); if(outputlevel>2) printf("persistence: %d \n",il); } // thrust::host_vector<int> h_persistance_map = d_persistance_map; thrust::device_ptr<int> last = thrust::remove(d_persistance_map.data(),d_persistance_map.data()+d_persistance_map.size(),-1); // thrust::device_vector<int> d_clearedPersiatnceMap(d_persistance_map.data(),last); // thrust::device_ptr idxLength_ptr = thrust::device_pointer_cast((unsigned int *)getTargetDataPointer(PDFPFHIdxLength)); thrust::device_vector<unsigned int> d_persistanceMapLength(n_view); // printf("d_clearedPersiatnceMap.size(): %d \n",d_clearedPersiatnceMap.size()); // device::PersistanceMapLength mapLength; // mapLength.input_persistanceMap = thrust::raw_pointer_cast(d_clearedPersiatnceMap.data()); // mapLength.output_persistanceMapLenth = thrust::raw_pointer_cast(d_persistanceMapLength.data()); // mapLength.length = d_clearedPersiatnceMap.size(); // device::computePersistanceMapLength<<< ((d_clearedPersiatnceMap.size()-1)/mapLength.dx+1),mapLength.dx >>>(mapLength); // checkCudaErrors(cudaGetLastError()); // checkCudaErrors(cudaDeviceSynchronize()); unsigned int persistanceMapLength = (unsigned int) (last - d_persistance_map.data()); device::PersistanceMapLength mapLength; mapLength.input_persistanceMap = thrust::raw_pointer_cast(d_persistance_map.data()); mapLength.output_persistanceMapLenth = thrust::raw_pointer_cast(d_persistanceMapLength.data()); mapLength.length = persistanceMapLength; device::computePersistanceMapLength<<< ((persistanceMapLength-1)/mapLength.dx+1),mapLength.dx >>>(mapLength); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::device_ptr<int> idxList_ptr = thrust::device_pointer_cast((int *)getTargetDataPointer(PDFPFHIdxList)); thrust::copy(d_persistance_map.data(),d_persistance_map.data()+mapLength.length,idxList_ptr); thrust::device_ptr<unsigned int> idxLength_ptr = thrust::device_pointer_cast((unsigned int *)getTargetDataPointer(PDFPFHIdxLength)); thrust::copy(d_persistanceMapLength.data(),d_persistanceMapLength.data()+n_view,idxLength_ptr); // bool cont = true; if(outputlevel>1) { printf("length: %d -> %d \n",d_persistance_map.size(),persistanceMapLength); } thrust::host_vector<unsigned int> h_persistanceMapLength = d_persistanceMapLength; for(int l=0;l<h_persistanceMapLength.size();l++) { int length = h_persistanceMapLength.data()[l]; if(length==0) Processor::breakTry(); if(outputlevel>1) printf("%d -> %d \n",l,length); } if(outputlevel>1) { thrust::host_vector<int> h_clearedPersiatnceMap = d_persistance_map; int *dataLength = h_clearedPersiatnceMap.data(); int lidx = h_persistanceMapLength.data()[0]; printf("%d %d %d \n",dataLength[lidx-1]/(640*480),dataLength[lidx]/(640*480),dataLength[lidx+1]/(640*480)); } char path[50]; bool output = outputlevel>1; if(output) { thrust::host_vector<int> h_persistance_map = d_persistance_map; // // thrust::device_ptr<int> last = thrust::remove(d_persistance_map.data(),d_persistance_map.data()+d_persistance_map.size(),-1); // int length_pMap = last - d_persistance_map.data(); // printf("length pMap: %d \n",length_pMap); thrust::device_vector<int> d_clearedPersiatnceMap(d_persistance_map.data(),last); thrust::host_vector<int> h_clearedPersiatnceMap = d_clearedPersiatnceMap; thrust::device_ptr<float4> dptr = thrust::device_pointer_cast(dfpfh1.input_pos); thrust::device_vector<float4> d_pos(dptr,dptr+n_view*640*480); thrust::host_vector<float4> h_pos = d_pos; float4 *pos = h_pos.data(); int *persistance_map = h_persistance_map.data(); uchar4 *h_map = (uchar4 *)malloc(640*480*sizeof(uchar4)); int *clearedData = h_clearedPersiatnceMap.data(); for(int v=0;v<n_view;v++) { for(int i=0;i<640*480;i++) { unsigned char g = (pos[v*640*480+i].z/10000.f)*255.f; // if(persistance_map[v*640*480+i]>=0) // { // if(persistance_map[v*640*480+i]!=v*640*480+i) // printf("%d != %d \n",persistance_map[v*640*480+i],v*640*480+i); // // g=255; // } h_map[i] = make_uchar4(g,g,g,128); } for(int i=0;i<h_clearedPersiatnceMap.size();i++) { int idx = clearedData[i]; if(idx >= 0) { unsigned int vi = idx/(640*480); if(vi==v) h_map[idx-v*640*480] = make_uchar4(255,0,0,128); } else printf("oOOOO \n"); } sprintf(path,"/home/avo/pcds/persistance_r0_r1%d.ppm",v); sdkSavePPM4ub(path,(unsigned char*)h_map,640,480); } } printf("done dfpfh! \n"); } DFPFHEstimator::DFPFHEstimator(unsigned int n_view,unsigned int outputlevel): n_view(n_view), outputlevel(outputlevel) { DeviceDataParams paramsDFPFH; paramsDFPFH.elements = 640*480*n_view; paramsDFPFH.element_size = (sdpfh1.bins_n_meta) * sizeof(float); paramsDFPFH.elementType = FLOAT1; paramsDFPFH.dataType = Histogramm; addTargetData(addDeviceDataRequest(paramsDFPFH),DFPFHistogram); DeviceDataParams paramsPersistenceIdxList; paramsPersistenceIdxList.elements = 640*480*n_view; paramsPersistenceIdxList.element_size = sizeof(int); paramsPersistenceIdxList.elementType = INT1; paramsPersistenceIdxList.dataType = Indice; addTargetData(addDeviceDataRequest(paramsPersistenceIdxList),PDFPFHIdxList); DeviceDataParams paramsPersistenceIdxLength; paramsPersistenceIdxLength.elements = n_view; paramsPersistenceIdxLength.element_size = sizeof(unsigned int); paramsPersistenceIdxLength.elementType = UINT1; paramsPersistenceIdxLength.dataType = MetaData; addTargetData(addDeviceDataRequest(paramsPersistenceIdxLength),PDFPFHIdxLength); } DFPFHEstimator::~DFPFHEstimator() { // TODO Auto-generated destructor stub } void DFPFHEstimator::TestDFPFHE() { thrust::host_vector<float4> h_inp_pos(n_view*640*480); thrust::host_vector<float4> h_inp_normals(n_view*640*480); for(int i=0;i<h_inp_pos.size();i++) { unsigned int v = i/(640*480); unsigned int y = (i-v*(640*480))/640; unsigned int x = i-v*(640*480)-y*640; h_inp_pos[i] = make_float4(x,y,1000+v,0); device::setValid(h_inp_pos[i].w); device::setForeground(h_inp_pos[i].w); // device::unsetReconstructed(h_inp_pos[i].w); device::setReconstructed(h_inp_pos[i].w,0); h_inp_normals[i] = make_float4(1,0,0,0); } thrust::device_vector<float4> d_inp_pos = h_inp_pos; thrust::device_vector<float4> d_inp_normal = h_inp_normals; thrust::device_vector<float> d_dspfph(n_view*640*480*sdpfh1.bins_n_meta); sdpfh1.input_pos = thrust::raw_pointer_cast(d_inp_pos.data()); sdpfh1.input_normals = thrust::raw_pointer_cast(d_inp_normal.data()); sdpfh1.output_bins = thrust::raw_pointer_cast(d_dspfph.data()); sdpfh1.view = 0; sdpfh1.radius = 15.0f; sdpfh1.invalidToNormalPointRatio = 0.5; printf("dspdf \n"); dim3 block(sdpfh1.dx); dim3 grid(640*480/sdpfh1.points_per_block,1,1); device::computeSDPFH<<<grid,block>>>(sdpfh1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); // thrust::host_vector<float> h_out_histo = d_dspfph; // float *data = h_out_histo.data(); // for(int i=0;i<640*480*n_view;i+=10000) // { // if(data[i*33]!=-1) // { // printf("(%d) ",i); // for(int j=0;j<3;j++) // { // for(int h=0;h<11;h++) // { // printf("%f ",data[i*33+j*11+h]); // // } // printf(" || "); // } // printf("\n"); // } // } thrust::device_vector<float> d_dfpfh(n_view*640*480*dfpfh1.bins_n_meta); dfpfh1.input_pos = thrust::raw_pointer_cast(d_inp_pos.data()); dfpfh1.input_bins_sdfpfh = thrust::raw_pointer_cast(d_dspfph.data()); dfpfh1.output_bins = thrust::raw_pointer_cast(d_dfpfh.data()); dfpfh1.radius = 15.f; dfpfh1.maxReconstructuionLevel = 0; printf("dfpdf \n"); device::computeDFPFH<<<grid,block>>>(dfpfh1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::host_vector<float4> h_pos_test = d_inp_pos; for(int i=0;i<33;i++) { unsigned int oy = i/640; unsigned int ox = i - oy*640; printf("(%d %d) %f %f %f %f \n",ox,oy,h_pos_test[i].x,h_pos_test[i].y,h_pos_test[i].z,h_pos_test[i].w); } thrust::host_vector<float> h_out_dfpfh = d_dfpfh; printf("dfpfh.size: %d \n",h_out_dfpfh.size()); float *data = h_out_dfpfh.data(); // for(int v=0;v<n_view;v++) // { // for(int i=0;i<640*480;i+=10000) // { // if(data[v*640*480*(sdpfhEstimator1.bins_n_meta)+sdpfhEstimator1.bins*640*480+i]!=0) // { // printf("(%d) ",i); // for(int j=0;j<3;j++) // { // for(int h=0;h<11;h++) // { // // printf("%f ",data[i*33+j*11+h]); // //TODO // printf("%f ",data[v*640*480*(sdpfhEstimator1.bins_n_meta)+(j*11+h)*640*480]); // // } // printf(" || "); // } // printf("\n"); // } // } // } for(int v=0;v<1;v++) { for(int i=0;i<5;i+=1) { for(int j=0;j<3;j++) { for(int h=0;h<11;h++) { // printf("%f ",data[i*33+j*11+h]); //TODO printf("%f ",data[v*640*480*(sdpfh1.bins_n_meta)+(j*11+h)*640*480+i]); } printf(" || "); } printf("|| meta: %f \n",data[v*640*480*(sdpfh1.bins_n_meta)+sdpfh1.bins*640*480+i]); } } // thrust::host_vector<float> h_test_dfpfh(n_view*640*480*blockMean1.bins_n_meta); // for(int i=0;i<blockMean1.bins;i++) // for(int p=0;p<640*480;p++) // h_test_dfpfh[i*640*480+p] = i+1; // // for(int p=0;p<640*480;p++) // h_test_dfpfh[blockMean1.bins*640*480+p] = 1; // // thrust::device_vector<float> d_test_dfpfh = h_test_dfpfh; // blockMean1.input_bins = thrust::raw_pointer_cast(d_test_dfpfh.data()); meanBlock1.input_bins = dfpfh1.output_bins; thrust::device_vector<float> d_test_output((640*480)/meanBlock1.dx * meanBlock1.bins_n_meta); meanBlock1.output_block_mean = thrust::raw_pointer_cast(d_test_output.data()); dim3 meanBlock(meanBlock1.dx); dim3 meanGrid(640*480/meanBlock1.dx,1,n_view); device::computeBlockMeanDFPFH<<<meanGrid,meanBlock>>>(meanBlock1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::host_vector<float> h_out_mean_dfpfh = d_test_output; printf("dfpfh.size: %d meanGrid: %d \n",h_out_mean_dfpfh.size(),meanGrid.x); float *data3 = h_out_mean_dfpfh.data(); // printf("test: %f \n",data3[0]); for(int v=0;v<1;v++) { for(int i=0;i<1;i++) { for(int j=0;j<3;j++) { for(int h=0;h<11;h++) { // printf("%f ",data[i*33+j*11+h]); //TODO printf("%f ",data3[v*meanGrid.x*meanBlock1.bins_n_meta+(j*11+h)*meanGrid.x+i]); } printf(" || "); } printf("|| meta: %f \n",data3[v*meanGrid.x*(meanBlock1.bins_n_meta)+meanBlock1.bins*meanGrid.x+i]); } } thrust::device_vector<float> d_outputMean(n_view*mean1.bins_n_meta); mean1.output_block_mean = thrust::raw_pointer_cast(d_outputMean.data()); mean1.input_bins = meanBlock1.output_block_mean; mean1.length = meanGrid.x; device::computeMeanDFPFH<<<n_view,mean1.dx>>>(mean1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::host_vector<float> h_meanHisto = d_outputMean; float *data4 = h_meanHisto.data(); for(int v=0;v<1;v++) { printf("mean: "); for(int i=0;i<mean1.bins_n_meta;i++) { if(i>0 && i%mean1.bins_per_feature==0) printf("|| "); printf("%f ",data4[v*mean1.bins_n_meta+i]); } printf("\n"); } thrust::device_vector<float> d_div(n_view*640*480); div1.input_dfpfh_bins = dfpfh1.output_bins; div1.input_mean_bins = mean1.output_block_mean; div1.output_div = thrust::raw_pointer_cast(d_div.data()); device::computeDivDFPFH<<<((640*480)/div1.dx),div1.dx>>>(div1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::device_vector<float> d_sigma_block((640*480)/sigmaBlock1.dx); sigmaBlock1.input_div = div1.output_div; sigmaBlock1.output_div_block = thrust::raw_pointer_cast(d_sigma_block.data()); sigmaBlock1.length = 640*480; // dim3 sigBlock((640*480)/sigma1.dx); dim3 sigGrid((640*480)/(sigmaBlock1.dx*2),1,n_view); // dim3 sigGrid(128,1,n_view); device::computeSigmaDFPFHBlock<<<sigGrid,sigmaBlock1.dx>>>(sigmaBlock1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); thrust::device_vector<float> d_sigma(n_view); sigma1.input_sig_block = sigmaBlock1.output_div_block; sigma1.input_mean_data = mean1.output_block_mean; sigma1.output_sigmas = thrust::raw_pointer_cast(d_sigma.data()); sigma1.length = sigGrid.x; device::computeSigmaDFPFH<<<n_view,sigma1.dx>>>(sigma1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); // thrust::device_vector<int> persistance_map(n_view*640*480); // persistance.input_bins_n_meta1 }
f9fb95506e22cccee9a7d702aaf5da02aeb04a0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel1_x_nonvector [5][2]; static int dims_advec_mom_kernel1_x_nonvector_h [5][2] = {0}; //user function __device__ inline void advec_mom_kernel1_x_nonvector_gpu(const ACC<double> &node_flux, const ACC<double> &node_mass_pre, ACC<double> &mom_flux, const ACC<double> &celldx, const ACC<double> &vel1) { double sigma, wind, width; double vdiffuw, vdiffdw, auw, adw, limiter; int upwind, donor, downwind, dif; double advec_vel_temp; if( (node_flux(0,0,0)) < 0.0) { upwind = 2; donor = 1; downwind = 0; dif = donor; } else { upwind = -1; donor = 0; downwind = 1; dif = upwind; } sigma = fabs(node_flux(0,0,0))/node_mass_pre(donor,0,0); width = celldx(0,0,0); vdiffuw = vel1(donor,0,0) - vel1(upwind,0,0); vdiffdw = vel1(downwind,0,0) - vel1(donor,0,0); limiter=0.0; if(vdiffuw*vdiffdw > 0.0) { auw = fabs(vdiffuw); adw = fabs(vdiffdw); wind = 1.0; if(vdiffdw <= 0.0) wind = -1.0; limiter=wind*MIN(width*((2.0-sigma)*adw/width+(1.0+sigma)*auw/celldx(dif,0,0))/6.0, MIN(auw, adw)); } advec_vel_temp = vel1(donor,0,0) + (1.0 - sigma) * limiter; mom_flux(0,0,0) = advec_vel_temp * node_flux(0,0,0); } __global__ void ops_advec_mom_kernel1_x_nonvector( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[0][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[0][0] * dims_advec_mom_kernel1_x_nonvector[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[1][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[1][0] * dims_advec_mom_kernel1_x_nonvector[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[2][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[2][0] * dims_advec_mom_kernel1_x_nonvector[2][1]; arg3 += idx_x * 1*1 + idx_y * 0*1 * dims_advec_mom_kernel1_x_nonvector[3][0] + idx_z * 0*1 * dims_advec_mom_kernel1_x_nonvector[3][0] * dims_advec_mom_kernel1_x_nonvector[3][1]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[4][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[4][0] * dims_advec_mom_kernel1_x_nonvector[4][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { const ACC<double> argp0(dims_advec_mom_kernel1_x_nonvector[0][0], dims_advec_mom_kernel1_x_nonvector[0][1], arg0); const ACC<double> argp1(dims_advec_mom_kernel1_x_nonvector[1][0], dims_advec_mom_kernel1_x_nonvector[1][1], arg1); ACC<double> argp2(dims_advec_mom_kernel1_x_nonvector[2][0], dims_advec_mom_kernel1_x_nonvector[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel1_x_nonvector[3][0], dims_advec_mom_kernel1_x_nonvector[3][1], arg3); const ACC<double> argp4(dims_advec_mom_kernel1_x_nonvector[4][0], dims_advec_mom_kernel1_x_nonvector[4][1], arg4); advec_mom_kernel1_x_nonvector_gpu(argp0, argp1, argp2, argp3, argp4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel1_x_nonvector(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel1_x_nonvector_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,128)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(128,"advec_mom_kernel1_x_nonvector"); OPS_kernels[128].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != dims_advec_mom_kernel1_x_nonvector_h[0][0] || ydim0 != dims_advec_mom_kernel1_x_nonvector_h[0][1] || xdim1 != dims_advec_mom_kernel1_x_nonvector_h[1][0] || ydim1 != dims_advec_mom_kernel1_x_nonvector_h[1][1] || xdim2 != dims_advec_mom_kernel1_x_nonvector_h[2][0] || ydim2 != dims_advec_mom_kernel1_x_nonvector_h[2][1] || xdim3 != dims_advec_mom_kernel1_x_nonvector_h[3][0] || ydim3 != dims_advec_mom_kernel1_x_nonvector_h[3][1] || xdim4 != dims_advec_mom_kernel1_x_nonvector_h[4][0] || ydim4 != dims_advec_mom_kernel1_x_nonvector_h[4][1]) { dims_advec_mom_kernel1_x_nonvector_h[0][0] = xdim0; dims_advec_mom_kernel1_x_nonvector_h[0][1] = ydim0; dims_advec_mom_kernel1_x_nonvector_h[1][0] = xdim1; dims_advec_mom_kernel1_x_nonvector_h[1][1] = ydim1; dims_advec_mom_kernel1_x_nonvector_h[2][0] = xdim2; dims_advec_mom_kernel1_x_nonvector_h[2][1] = ydim2; dims_advec_mom_kernel1_x_nonvector_h[3][0] = xdim3; dims_advec_mom_kernel1_x_nonvector_h[3][1] = ydim3; dims_advec_mom_kernel1_x_nonvector_h[4][0] = xdim4; dims_advec_mom_kernel1_x_nonvector_h[4][1] = ydim4; cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel1_x_nonvector, dims_advec_mom_kernel1_x_nonvector_h, sizeof(dims_advec_mom_kernel1_x_nonvector))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[128].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel1_x_nonvector), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[128].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[128].mpi_time += t2-t1; OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel1_x_nonvector(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 128; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 128; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel1_x_nonvector_execute; if (OPS_diags > 1) { ops_timing_realloc(128,"advec_mom_kernel1_x_nonvector"); } ops_enqueue_kernel(desc); } #endif
f9fb95506e22cccee9a7d702aaf5da02aeb04a0e.cu
// // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel1_x_nonvector [5][2]; static int dims_advec_mom_kernel1_x_nonvector_h [5][2] = {0}; //user function __device__ inline void advec_mom_kernel1_x_nonvector_gpu(const ACC<double> &node_flux, const ACC<double> &node_mass_pre, ACC<double> &mom_flux, const ACC<double> &celldx, const ACC<double> &vel1) { double sigma, wind, width; double vdiffuw, vdiffdw, auw, adw, limiter; int upwind, donor, downwind, dif; double advec_vel_temp; if( (node_flux(0,0,0)) < 0.0) { upwind = 2; donor = 1; downwind = 0; dif = donor; } else { upwind = -1; donor = 0; downwind = 1; dif = upwind; } sigma = fabs(node_flux(0,0,0))/node_mass_pre(donor,0,0); width = celldx(0,0,0); vdiffuw = vel1(donor,0,0) - vel1(upwind,0,0); vdiffdw = vel1(downwind,0,0) - vel1(donor,0,0); limiter=0.0; if(vdiffuw*vdiffdw > 0.0) { auw = fabs(vdiffuw); adw = fabs(vdiffdw); wind = 1.0; if(vdiffdw <= 0.0) wind = -1.0; limiter=wind*MIN(width*((2.0-sigma)*adw/width+(1.0+sigma)*auw/celldx(dif,0,0))/6.0, MIN(auw, adw)); } advec_vel_temp = vel1(donor,0,0) + (1.0 - sigma) * limiter; mom_flux(0,0,0) = advec_vel_temp * node_flux(0,0,0); } __global__ void ops_advec_mom_kernel1_x_nonvector( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[0][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[0][0] * dims_advec_mom_kernel1_x_nonvector[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[1][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[1][0] * dims_advec_mom_kernel1_x_nonvector[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[2][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[2][0] * dims_advec_mom_kernel1_x_nonvector[2][1]; arg3 += idx_x * 1*1 + idx_y * 0*1 * dims_advec_mom_kernel1_x_nonvector[3][0] + idx_z * 0*1 * dims_advec_mom_kernel1_x_nonvector[3][0] * dims_advec_mom_kernel1_x_nonvector[3][1]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[4][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[4][0] * dims_advec_mom_kernel1_x_nonvector[4][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { const ACC<double> argp0(dims_advec_mom_kernel1_x_nonvector[0][0], dims_advec_mom_kernel1_x_nonvector[0][1], arg0); const ACC<double> argp1(dims_advec_mom_kernel1_x_nonvector[1][0], dims_advec_mom_kernel1_x_nonvector[1][1], arg1); ACC<double> argp2(dims_advec_mom_kernel1_x_nonvector[2][0], dims_advec_mom_kernel1_x_nonvector[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel1_x_nonvector[3][0], dims_advec_mom_kernel1_x_nonvector[3][1], arg3); const ACC<double> argp4(dims_advec_mom_kernel1_x_nonvector[4][0], dims_advec_mom_kernel1_x_nonvector[4][1], arg4); advec_mom_kernel1_x_nonvector_gpu(argp0, argp1, argp2, argp3, argp4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel1_x_nonvector(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel1_x_nonvector_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,128)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(128,"advec_mom_kernel1_x_nonvector"); OPS_kernels[128].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != dims_advec_mom_kernel1_x_nonvector_h[0][0] || ydim0 != dims_advec_mom_kernel1_x_nonvector_h[0][1] || xdim1 != dims_advec_mom_kernel1_x_nonvector_h[1][0] || ydim1 != dims_advec_mom_kernel1_x_nonvector_h[1][1] || xdim2 != dims_advec_mom_kernel1_x_nonvector_h[2][0] || ydim2 != dims_advec_mom_kernel1_x_nonvector_h[2][1] || xdim3 != dims_advec_mom_kernel1_x_nonvector_h[3][0] || ydim3 != dims_advec_mom_kernel1_x_nonvector_h[3][1] || xdim4 != dims_advec_mom_kernel1_x_nonvector_h[4][0] || ydim4 != dims_advec_mom_kernel1_x_nonvector_h[4][1]) { dims_advec_mom_kernel1_x_nonvector_h[0][0] = xdim0; dims_advec_mom_kernel1_x_nonvector_h[0][1] = ydim0; dims_advec_mom_kernel1_x_nonvector_h[1][0] = xdim1; dims_advec_mom_kernel1_x_nonvector_h[1][1] = ydim1; dims_advec_mom_kernel1_x_nonvector_h[2][0] = xdim2; dims_advec_mom_kernel1_x_nonvector_h[2][1] = ydim2; dims_advec_mom_kernel1_x_nonvector_h[3][0] = xdim3; dims_advec_mom_kernel1_x_nonvector_h[3][1] = ydim3; dims_advec_mom_kernel1_x_nonvector_h[4][0] = xdim4; dims_advec_mom_kernel1_x_nonvector_h[4][1] = ydim4; cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel1_x_nonvector, dims_advec_mom_kernel1_x_nonvector_h, sizeof(dims_advec_mom_kernel1_x_nonvector))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[128].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel1_x_nonvector<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[128].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[128].mpi_time += t2-t1; OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel1_x_nonvector(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 128; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 128; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel1_x_nonvector_execute; if (OPS_diags > 1) { ops_timing_realloc(128,"advec_mom_kernel1_x_nonvector"); } ops_enqueue_kernel(desc); } #endif
972ff1ac17e25b824ef64230f5d37f4fa2f08a40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ctime> #include <stdio.h> __global__ void print_3d(int *vector) { int threads_per_block = blockDim.x * blockDim.y * blockDim.z; int index = threadIdx.x + (threadIdx.y * (blockDim.z * blockDim.x)) + (threadIdx.z * blockDim.z) + (blockIdx.x * threads_per_block) + (blockIdx.z * gridDim.x * threads_per_block) + (blockIdx.y * gridDim.z * gridDim.x * threads_per_block); printf("index: %d value: %d\n", index, vector[index]); } // ================================================== int main() { printf(" starts ... \n"); int size = 64; int byte_size = size * sizeof(int); int *h_input; h_input = (int *)malloc(byte_size); for (int i = 0; i < size; i++) { h_input[i] = rand() % 1000; } int *d_input; hipMalloc((void **)&d_input, byte_size); hipMemcpy(d_input, h_input, byte_size, hipMemcpyHostToDevice); int nx, ny, nz; nx = 4; ny = 4; nz = 4; dim3 block(2, 2, 2); dim3 grid(nx / block.x, ny / block.y, nz / block.z); hipLaunchKernelGGL(( print_3d), dim3(grid), dim3(block), 0, 0, d_input); hipDeviceSynchronize(); hipDeviceReset(); printf(" finished. \n"); return 0; }
972ff1ac17e25b824ef64230f5d37f4fa2f08a40.cu
#include <ctime> #include <stdio.h> __global__ void print_3d(int *vector) { int threads_per_block = blockDim.x * blockDim.y * blockDim.z; int index = threadIdx.x + (threadIdx.y * (blockDim.z * blockDim.x)) + (threadIdx.z * blockDim.z) + (blockIdx.x * threads_per_block) + (blockIdx.z * gridDim.x * threads_per_block) + (blockIdx.y * gridDim.z * gridDim.x * threads_per_block); printf("index: %d value: %d\n", index, vector[index]); } // ================================================== int main() { printf(" starts ... \n"); int size = 64; int byte_size = size * sizeof(int); int *h_input; h_input = (int *)malloc(byte_size); for (int i = 0; i < size; i++) { h_input[i] = rand() % 1000; } int *d_input; cudaMalloc((void **)&d_input, byte_size); cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice); int nx, ny, nz; nx = 4; ny = 4; nz = 4; dim3 block(2, 2, 2); dim3 grid(nx / block.x, ny / block.y, nz / block.z); print_3d<<<grid, block>>>(d_input); cudaDeviceSynchronize(); cudaDeviceReset(); printf(" finished. \n"); return 0; }
e78cf9c555c2f1cc46d826daa598d50da0858195.hip
// !!! This is a file automatically generated by hipify!!! // nvcc -O3 -std=c++11 -arch compute_61 -ccbin=g++ random_access_two_phase.cu #include "hip/hip_runtime.h" #include <algorithm> #include <cassert> #include <iterator> #include <random> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/system/hip/execution_policy.h> #define PXL_HOST_LOOPS 64 template <int PASSES> __global__ void gather_kernel(const unsigned int *const __restrict__ position, const unsigned int *const __restrict__ in1, unsigned int *out1, unsigned int *tmp, const size_t n) { #pragma unroll for (size_t j = 0; j < PASSES; ++j) { const size_t start = j * (n / PASSES); const size_t end = (j + 1) * (n / PASSES); for (size_t i = blockDim.x * blockIdx.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { if (i >= start && i < end) { out1[i] = in1[position[i]]; } } } } int main(int argc, char **argv) { const size_t size_MB = max(atoi(argv[1]), 1) * sizeof(unsigned int); const size_t size = size_MB * 1024 * 1024; thrust::host_vector<unsigned int> index(size); thrust::sequence(index.begin(), index.end()); std::random_device rd; std::mt19937 g(rd()); std::shuffle(index.begin(), index.end(), g); thrust::device_vector<unsigned int> index_d = index; thrust::device_vector<unsigned int> data_d = index; thrust::device_vector<unsigned int> out_d = index; thrust::device_vector<unsigned int> tmp_d(size, 0); int minGridSize; int blockSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, gather_kernel<5>, 0, 0); int gridSize = (size + blockSize - 1) / blockSize; // warm-up hipLaunchKernelGGL(( gather_kernel<100>), dim3(gridSize), dim3(blockSize), 0, 0, thrust::raw_pointer_cast(index_d.data()), thrust::raw_pointer_cast(data_d.data()), thrust::raw_pointer_cast(out_d.data()), thrust::raw_pointer_cast(tmp_d.data()), size); hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start); for (int ii = 0; ii < PXL_HOST_LOOPS; ii++) { hipLaunchKernelGGL(( gather_kernel<100>), dim3(gridSize), dim3(blockSize), 0, 0, thrust::raw_pointer_cast(index_d.data()), thrust::raw_pointer_cast(data_d.data()), thrust::raw_pointer_cast(out_d.data()), thrust::raw_pointer_cast(tmp_d.data()), size); } hipEventRecord(end); hipEventSynchronize(end); float elapsed; hipEventElapsedTime(&elapsed, start, end); printf("grid %d block %d \n", gridSize, blockSize); printf("%8lu, %f, %8.2f\n", size_MB, elapsed / PXL_HOST_LOOPS, (1.0 * size_MB * PXL_HOST_LOOPS * 1000) / (elapsed * 1024)); return 0; }
e78cf9c555c2f1cc46d826daa598d50da0858195.cu
// nvcc -O3 -std=c++11 -arch compute_61 -ccbin=g++ random_access_two_phase.cu #include "cuda_runtime.h" #include <algorithm> #include <cassert> #include <iterator> #include <random> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/system/cuda/execution_policy.h> #define PXL_HOST_LOOPS 64 template <int PASSES> __global__ void gather_kernel(const unsigned int *const __restrict__ position, const unsigned int *const __restrict__ in1, unsigned int *out1, unsigned int *tmp, const size_t n) { #pragma unroll for (size_t j = 0; j < PASSES; ++j) { const size_t start = j * (n / PASSES); const size_t end = (j + 1) * (n / PASSES); for (size_t i = blockDim.x * blockIdx.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { if (i >= start && i < end) { out1[i] = in1[position[i]]; } } } } int main(int argc, char **argv) { const size_t size_MB = max(atoi(argv[1]), 1) * sizeof(unsigned int); const size_t size = size_MB * 1024 * 1024; thrust::host_vector<unsigned int> index(size); thrust::sequence(index.begin(), index.end()); std::random_device rd; std::mt19937 g(rd()); std::shuffle(index.begin(), index.end(), g); thrust::device_vector<unsigned int> index_d = index; thrust::device_vector<unsigned int> data_d = index; thrust::device_vector<unsigned int> out_d = index; thrust::device_vector<unsigned int> tmp_d(size, 0); int minGridSize; int blockSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, gather_kernel<5>, 0, 0); int gridSize = (size + blockSize - 1) / blockSize; // warm-up gather_kernel<100><<<gridSize, blockSize>>>( thrust::raw_pointer_cast(index_d.data()), thrust::raw_pointer_cast(data_d.data()), thrust::raw_pointer_cast(out_d.data()), thrust::raw_pointer_cast(tmp_d.data()), size); cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); for (int ii = 0; ii < PXL_HOST_LOOPS; ii++) { gather_kernel<100><<<gridSize, blockSize>>>( thrust::raw_pointer_cast(index_d.data()), thrust::raw_pointer_cast(data_d.data()), thrust::raw_pointer_cast(out_d.data()), thrust::raw_pointer_cast(tmp_d.data()), size); } cudaEventRecord(end); cudaEventSynchronize(end); float elapsed; cudaEventElapsedTime(&elapsed, start, end); printf("grid %d block %d \n", gridSize, blockSize); printf("%8lu, %f, %8.2f\n", size_MB, elapsed / PXL_HOST_LOOPS, (1.0 * size_MB * PXL_HOST_LOOPS * 1000) / (elapsed * 1024)); return 0; }
d1719fd5b80dcaec7ac8b7d8fcc6149d36182a53.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <vector> #include <cfloat> #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "hip/hip_runtime.h" #include "caffe/layers/yolo_co_layer.hpp" #include "caffe/region_common.hpp" namespace caffe { template <typename Dtype> __global__ void yolo_co_kernel( int outer_num, int inner_num, int co_classes, int channels, int classes, int max_gt, const int* comap_class_data, const int* comap_offset_data, const int* comap_size_data, const int* comap_data, const float* comap_thresh_data, const float* comap_obj_thresh_data, const float* comap_ixr_data, const Dtype* pred_data, const Dtype* bbs_data, const Dtype* truth_data, const Dtype* obj_data, Dtype* target_no_obj_data) { CUDA_KERNEL_LOOP(index, max_gt * outer_num * co_classes * inner_num) { const int s = index % inner_num; auto t = index / inner_num; const int cidx = t % co_classes; t /= co_classes; const int n = t % outer_num; t /= outer_num; auto obj_index = n * inner_num + s; // If this is a ground-truth already, nothing to do if (target_no_obj_data[obj_index] > 0) continue; auto offset_nt = n * 5 * max_gt + t * 5; Dtype tx = *(truth_data + offset_nt + 0); // If no ground-truth at this index if (!tx) continue; Dtype ty = *(truth_data + offset_nt + 1); Dtype tw = *(truth_data + offset_nt + 2); Dtype th = *(truth_data + offset_nt + 3); int cls = *(truth_data + offset_nt + 4); // Ground-truth class // we explicitly ignore this zero-length bounding boxes if (tw <= 0.00001 || th <= 0.00001) continue; int bbs_index = obj_index * 4; Dtype px = *(bbs_data + bbs_index + 0); Dtype py = *(bbs_data + bbs_index + 1); Dtype pw = *(bbs_data + bbs_index + 2); Dtype ph = *(bbs_data + bbs_index + 3); // Same as ground-truth logic: // we explicitly ignore this zero-length bounding boxes if (pw <= 0.00001 || ph <= 0.00001) continue; auto size = comap_size_data[cidx]; auto offset = comap_offset_data[cidx]; for (int i = 0; i < size; ++i) { auto co = comap_data[offset + i]; // class that c may co-occur with if (co != cls) continue; // c may co-occure with co only in one rule, so after this the loop will end auto obj_thresh = comap_obj_thresh_data[offset + i]; auto offset_pred = n * channels * inner_num + s; bool with_objectness = (channels == classes + 1); Dtype objectness; if (with_objectness) objectness = pred_data[offset_pred + classes * inner_num]; else objectness = obj_data[obj_index]; if (objectness < obj_thresh) break; auto c = comap_class_data[cidx]; auto conf = pred_data[offset_pred + c * inner_num]; auto thresh = comap_thresh_data[offset + i]; if (conf < thresh) break; // Check intersection with co-occured class auto ixr_thresh = comap_ixr_data[offset + i]; auto ix = TBoxIntersection(px, py, pw, ph, tx, ty, tw, th); ix /= (pw * ph); // intersection ratio if (ix >= ixr_thresh) target_no_obj_data[obj_index] = obj_data[obj_index]; break; } } } template <typename Dtype> void YoloCoOccurrenceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int blob_idx = 0; auto blob_obj = bottom[blob_idx++]; auto blob_no_obj = bottom[blob_idx++]; auto blob_truth = bottom[blob_idx++]; auto blob_bbs = bottom[blob_idx++]; auto blob_pred = bottom[blob_idx++]; auto target_no_obj = top[0]; caffe_copy(blob_no_obj->count(), blob_no_obj->gpu_data(), target_no_obj->mutable_gpu_data()); auto co_classes = comap_class_.count(); if (!co_classes) return; yolo_co_kernel << <CAFFE_GET_BLOCKS(max_gt_ * outer_num_ * co_classes * inner_num_), CAFFE_CUDA_NUM_THREADS >> > (outer_num_, inner_num_, co_classes, channels_, classes_, max_gt_, comap_class_.gpu_data(), comap_offset_.gpu_data(), comap_size_.gpu_data(), comap_.gpu_data(), comap_thresh_.gpu_data(), comap_obj_thresh_.gpu_data(), comap_ixr_.gpu_data(), blob_pred->gpu_data(), blob_bbs->gpu_data(), blob_truth->gpu_data(), blob_obj->gpu_data(), target_no_obj->mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(YoloCoOccurrenceLayer); } // namespace caffe
d1719fd5b80dcaec7ac8b7d8fcc6149d36182a53.cu
#include <algorithm> #include <vector> #include <cfloat> #include "cuda_runtime.h" #include "curand.h" #include "cuda.h" #include "caffe/layers/yolo_co_layer.hpp" #include "caffe/region_common.hpp" namespace caffe { template <typename Dtype> __global__ void yolo_co_kernel( int outer_num, int inner_num, int co_classes, int channels, int classes, int max_gt, const int* comap_class_data, const int* comap_offset_data, const int* comap_size_data, const int* comap_data, const float* comap_thresh_data, const float* comap_obj_thresh_data, const float* comap_ixr_data, const Dtype* pred_data, const Dtype* bbs_data, const Dtype* truth_data, const Dtype* obj_data, Dtype* target_no_obj_data) { CUDA_KERNEL_LOOP(index, max_gt * outer_num * co_classes * inner_num) { const int s = index % inner_num; auto t = index / inner_num; const int cidx = t % co_classes; t /= co_classes; const int n = t % outer_num; t /= outer_num; auto obj_index = n * inner_num + s; // If this is a ground-truth already, nothing to do if (target_no_obj_data[obj_index] > 0) continue; auto offset_nt = n * 5 * max_gt + t * 5; Dtype tx = *(truth_data + offset_nt + 0); // If no ground-truth at this index if (!tx) continue; Dtype ty = *(truth_data + offset_nt + 1); Dtype tw = *(truth_data + offset_nt + 2); Dtype th = *(truth_data + offset_nt + 3); int cls = *(truth_data + offset_nt + 4); // Ground-truth class // we explicitly ignore this zero-length bounding boxes if (tw <= 0.00001 || th <= 0.00001) continue; int bbs_index = obj_index * 4; Dtype px = *(bbs_data + bbs_index + 0); Dtype py = *(bbs_data + bbs_index + 1); Dtype pw = *(bbs_data + bbs_index + 2); Dtype ph = *(bbs_data + bbs_index + 3); // Same as ground-truth logic: // we explicitly ignore this zero-length bounding boxes if (pw <= 0.00001 || ph <= 0.00001) continue; auto size = comap_size_data[cidx]; auto offset = comap_offset_data[cidx]; for (int i = 0; i < size; ++i) { auto co = comap_data[offset + i]; // class that c may co-occur with if (co != cls) continue; // c may co-occure with co only in one rule, so after this the loop will end auto obj_thresh = comap_obj_thresh_data[offset + i]; auto offset_pred = n * channels * inner_num + s; bool with_objectness = (channels == classes + 1); Dtype objectness; if (with_objectness) objectness = pred_data[offset_pred + classes * inner_num]; else objectness = obj_data[obj_index]; if (objectness < obj_thresh) break; auto c = comap_class_data[cidx]; auto conf = pred_data[offset_pred + c * inner_num]; auto thresh = comap_thresh_data[offset + i]; if (conf < thresh) break; // Check intersection with co-occured class auto ixr_thresh = comap_ixr_data[offset + i]; auto ix = TBoxIntersection(px, py, pw, ph, tx, ty, tw, th); ix /= (pw * ph); // intersection ratio if (ix >= ixr_thresh) target_no_obj_data[obj_index] = obj_data[obj_index]; break; } } } template <typename Dtype> void YoloCoOccurrenceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int blob_idx = 0; auto blob_obj = bottom[blob_idx++]; auto blob_no_obj = bottom[blob_idx++]; auto blob_truth = bottom[blob_idx++]; auto blob_bbs = bottom[blob_idx++]; auto blob_pred = bottom[blob_idx++]; auto target_no_obj = top[0]; caffe_copy(blob_no_obj->count(), blob_no_obj->gpu_data(), target_no_obj->mutable_gpu_data()); auto co_classes = comap_class_.count(); if (!co_classes) return; yolo_co_kernel << <CAFFE_GET_BLOCKS(max_gt_ * outer_num_ * co_classes * inner_num_), CAFFE_CUDA_NUM_THREADS >> > (outer_num_, inner_num_, co_classes, channels_, classes_, max_gt_, comap_class_.gpu_data(), comap_offset_.gpu_data(), comap_size_.gpu_data(), comap_.gpu_data(), comap_thresh_.gpu_data(), comap_obj_thresh_.gpu_data(), comap_ixr_.gpu_data(), blob_pred->gpu_data(), blob_bbs->gpu_data(), blob_truth->gpu_data(), blob_obj->gpu_data(), target_no_obj->mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(YoloCoOccurrenceLayer); } // namespace caffe
1e65cbb49573fe9da6b1a649f07d050a3897a475.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/compress_inner_product_layer.hpp" #include <cmath> #include <vector> #include <fstream> #include <sstream> namespace caffe { template <typename Dtype> __global__ void CCMaskApply(const int n, const Dtype* wb, const Dtype* mask, Dtype* wb_t) { CUDA_KERNEL_LOOP(index, n) { wb_t[index] = wb[index] * mask[index]; } } template <typename Dtype> void CInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* weightTmp = this->weight_tmp_.mutable_gpu_data(); const Dtype* bias = NULL; if (this->bias_term_) { bias = this->blobs_[1]->mutable_gpu_data(); } if (this->phase_ == TRAIN){ if (this->iter_ % this->inter_iter_ == 0 && (this->iter_) < (this->iter_stop_) && this->is_pruning_){ Dtype* weightCPU = this->blobs_[0]->mutable_cpu_data(); Dtype* weightMaskCPU = this->blobs_[this->blob_num_]->mutable_cpu_data(); // compute the weight mask based on the inter_inter Dtype sparsity_ratio = this->bound_weight_ * log(2 + (this->iter_ / this->inter_iter_)); // compute the mask caffe_set(this->blobs_[this->blob_num_]->count(), (Dtype)1.0, weightMaskCPU); vector<std::pair <Dtype, size_t> > param_temp; for (size_t i = 0; i < this->blobs_[this->blob_num_]->count(); i++) param_temp.push_back(std::make_pair(fabs(weightCPU[i]), i)); std::sort(param_temp.begin(), param_temp.end(), sortPairAscend); for (size_t i = 0; i < this->blobs_[0]->count() * sparsity_ratio; i++) weightMaskCPU[param_temp[i].second] = 0.0; LOG(INFO) << sparsity_ratio << " " << param_temp[this->blobs_[0]->count()*sparsity_ratio].first \ << " " << param_temp[this->blobs_[0]->count() - 1].first; /* record mask into file std::ofstream outfile; outfile.open(this->name_.c_str(), std::ofstream::app); for (size_t i = 0; i < this->blobs_[this->blob_num_]->count(); i++){ outfile << weightMaskCPU[i] << " "; } outfile << "\n"; outfile.close();*/ } } const Dtype* weight = this->blobs_[0]->mutable_gpu_data(); Dtype* weightMask = this->blobs_[this->blob_num_]->mutable_gpu_data(); // Calculate the current (masked) weight and bias hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), weight, weightMask, weightTmp); CUDA_POST_KERNEL_CHECK; // Forward calculation with (masked) weight and bias const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1., weightTmp, bottom_data, (Dtype)0., top_data); if (this->bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], bias, top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weightTmp, (Dtype)0., top_data); if (this->bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., bias_multiplier_.gpu_data(), bias, (Dtype)1., top_data); } } template <typename Dtype> void CInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); if (this->param_propagate_down_[0]) { const Dtype* weightMask = this->blobs_[this->blob_num_]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., top_diff, bottom_data, (Dtype)1., weight_diff); //Gradient with respect to weight hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[this->blob_num_]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[this->blob_num_]->count(), weight_diff, weightMask, weight_diff); CUDA_POST_KERNEL_CHECK; } if (bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff, bias_multiplier_.gpu_data(), (Dtype)1.,bias_diff); } if (propagate_down[0]) { const Dtype* weightTmp = this->weight_tmp_.gpu_data(); // Gradient with respect to bottom data caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., top_diff, weightTmp, (Dtype)0., bottom[0]->mutable_gpu_diff()); } } INSTANTIATE_LAYER_GPU_FUNCS(CInnerProductLayer); } // namespace caffe
1e65cbb49573fe9da6b1a649f07d050a3897a475.cu
#include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/compress_inner_product_layer.hpp" #include <cmath> #include <vector> #include <fstream> #include <sstream> namespace caffe { template <typename Dtype> __global__ void CCMaskApply(const int n, const Dtype* wb, const Dtype* mask, Dtype* wb_t) { CUDA_KERNEL_LOOP(index, n) { wb_t[index] = wb[index] * mask[index]; } } template <typename Dtype> void CInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* weightTmp = this->weight_tmp_.mutable_gpu_data(); const Dtype* bias = NULL; if (this->bias_term_) { bias = this->blobs_[1]->mutable_gpu_data(); } if (this->phase_ == TRAIN){ if (this->iter_ % this->inter_iter_ == 0 && (this->iter_) < (this->iter_stop_) && this->is_pruning_){ Dtype* weightCPU = this->blobs_[0]->mutable_cpu_data(); Dtype* weightMaskCPU = this->blobs_[this->blob_num_]->mutable_cpu_data(); // compute the weight mask based on the inter_inter Dtype sparsity_ratio = this->bound_weight_ * log(2 + (this->iter_ / this->inter_iter_)); // compute the mask caffe_set(this->blobs_[this->blob_num_]->count(), (Dtype)1.0, weightMaskCPU); vector<std::pair <Dtype, size_t> > param_temp; for (size_t i = 0; i < this->blobs_[this->blob_num_]->count(); i++) param_temp.push_back(std::make_pair(fabs(weightCPU[i]), i)); std::sort(param_temp.begin(), param_temp.end(), sortPairAscend); for (size_t i = 0; i < this->blobs_[0]->count() * sparsity_ratio; i++) weightMaskCPU[param_temp[i].second] = 0.0; LOG(INFO) << sparsity_ratio << " " << param_temp[this->blobs_[0]->count()*sparsity_ratio].first \ << " " << param_temp[this->blobs_[0]->count() - 1].first; /* record mask into file std::ofstream outfile; outfile.open(this->name_.c_str(), std::ofstream::app); for (size_t i = 0; i < this->blobs_[this->blob_num_]->count(); i++){ outfile << weightMaskCPU[i] << " "; } outfile << "\n"; outfile.close();*/ } } const Dtype* weight = this->blobs_[0]->mutable_gpu_data(); Dtype* weightMask = this->blobs_[this->blob_num_]->mutable_gpu_data(); // Calculate the current (masked) weight and bias CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, weightTmp); CUDA_POST_KERNEL_CHECK; // Forward calculation with (masked) weight and bias const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1., weightTmp, bottom_data, (Dtype)0., top_data); if (this->bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], bias, top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weightTmp, (Dtype)0., top_data); if (this->bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., bias_multiplier_.gpu_data(), bias, (Dtype)1., top_data); } } template <typename Dtype> void CInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); if (this->param_propagate_down_[0]) { const Dtype* weightMask = this->blobs_[this->blob_num_]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., top_diff, bottom_data, (Dtype)1., weight_diff); //Gradient with respect to weight CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[this->blob_num_]->count()), CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[this->blob_num_]->count(), weight_diff, weightMask, weight_diff); CUDA_POST_KERNEL_CHECK; } if (bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff, bias_multiplier_.gpu_data(), (Dtype)1.,bias_diff); } if (propagate_down[0]) { const Dtype* weightTmp = this->weight_tmp_.gpu_data(); // Gradient with respect to bottom data caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., top_diff, weightTmp, (Dtype)0., bottom[0]->mutable_gpu_diff()); } } INSTANTIATE_LAYER_GPU_FUNCS(CInnerProductLayer); } // namespace caffe
3e162536a5e09e73ec9e1be438254eec1db55d9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void Predictor (const double TIME, double4 *p_pred, float4 *v_pred, float4 *a_pred, double4 *p_corr, double4 *v_corr, double *loc_time, double4 *acc, double4 *acc1, double4 *acc2, double4 *acc3, int istart, int* nvec, int ppgpus, unsigned int N){ int i = blockIdx.x*blockDim.x + threadIdx.x + istart; int cost = ppgpus+istart; if(i>=cost){ i = nvec[i - cost]; if(i>=istart && i < cost) i=-1; } if(i<0) return; double timestep = TIME - loc_time[i]; double t2 = timestep * timestep; double t3 = t2 * timestep; double t4 = t2 * t2; double t5 = t4 * timestep; t2 *= 0.5; t3 *= 0.1666666666666666666666; t4 *= 0.0416666666666666666666; t5 *= 0.0083333333333333333333; double4 myppred; myppred.x = p_pred[i].x; myppred.y = p_pred[i].y; myppred.z = p_pred[i].z; float4 mypred; mypred.x = v_pred[i].x; mypred.y = v_pred[i].y; mypred.z = v_pred[i].z; double4 mypcorr; mypcorr.x = p_corr[i].x; mypcorr.y = p_corr[i].y; mypcorr.z = p_corr[i].z; double4 myvcorr; myvcorr.x = v_corr[i].x; myvcorr.y = v_corr[i].y; myvcorr.z = v_corr[i].z; double4 myacc; myacc.x = acc[i].x; myacc.y = acc[i].y; myacc.z = acc[i].z; double4 myacc1; myacc1.x = acc1[i].x; myacc1.y = acc1[i].y; myacc1.z = acc1[i].z; double4 myacc2; myacc2.x = acc2[i].x; myacc2.y = acc2[i].y; myacc2.z = acc2[i].z; double4 myacc3; myacc3.x = acc3[i].x; myacc3.y = acc3[i].y; myacc3.z = acc3[i].z; myppred.x = mypcorr.x + timestep * myvcorr.x + t2 * myacc.x + t3 * myacc1.x + t4 * myacc2.x + t5 * myacc3.x ; myppred.y = mypcorr.y + timestep * myvcorr.y + t2 * myacc.y + t3 * myacc1.y + t4 * myacc2.y + t5 * myacc3.y ; myppred.z = mypcorr.z + timestep * myvcorr.z + t2 * myacc.z + t3 * myacc1.z + t4 * myacc2.z + t5 * myacc3.z ; p_pred[i].x = myppred.x; p_pred[i].y = myppred.y; p_pred[i].z = myppred.z; mypred.x = myvcorr.x + timestep * myacc.x + t2 * myacc1.x + t3 * myacc2.x + t4 * myacc3.x ; mypred.y = myvcorr.y + timestep * myacc.y + t2 * myacc1.y + t3 * myacc2.y + t4 * myacc3.y ; mypred.z = myvcorr.z + timestep * myacc.z + t2 * myacc1.z + t3 * myacc2.z + t4 * myacc3.z ; v_pred[i].x = mypred.x; v_pred[i].y = mypred.y; v_pred[i].z = mypred.z; mypred.x = myacc.x + timestep * myacc1.x + t2 * myacc2.x + t3 * myacc3.x ; mypred.y = myacc.y + timestep * myacc1.y + t2 * myacc2.y + t3 * myacc3.y ; mypred.z = myacc.z + timestep * myacc1.z + t2 * myacc2.z + t3 * myacc3.z ; a_pred[i].x = mypred.x; a_pred[i].y = mypred.y; a_pred[i].z = mypred.z; }
3e162536a5e09e73ec9e1be438254eec1db55d9f.cu
#include "includes.h" __global__ void Predictor (const double TIME, double4 *p_pred, float4 *v_pred, float4 *a_pred, double4 *p_corr, double4 *v_corr, double *loc_time, double4 *acc, double4 *acc1, double4 *acc2, double4 *acc3, int istart, int* nvec, int ppgpus, unsigned int N){ int i = blockIdx.x*blockDim.x + threadIdx.x + istart; int cost = ppgpus+istart; if(i>=cost){ i = nvec[i - cost]; if(i>=istart && i < cost) i=-1; } if(i<0) return; double timestep = TIME - loc_time[i]; double t2 = timestep * timestep; double t3 = t2 * timestep; double t4 = t2 * t2; double t5 = t4 * timestep; t2 *= 0.5; t3 *= 0.1666666666666666666666; t4 *= 0.0416666666666666666666; t5 *= 0.0083333333333333333333; double4 myppred; myppred.x = p_pred[i].x; myppred.y = p_pred[i].y; myppred.z = p_pred[i].z; float4 mypred; mypred.x = v_pred[i].x; mypred.y = v_pred[i].y; mypred.z = v_pred[i].z; double4 mypcorr; mypcorr.x = p_corr[i].x; mypcorr.y = p_corr[i].y; mypcorr.z = p_corr[i].z; double4 myvcorr; myvcorr.x = v_corr[i].x; myvcorr.y = v_corr[i].y; myvcorr.z = v_corr[i].z; double4 myacc; myacc.x = acc[i].x; myacc.y = acc[i].y; myacc.z = acc[i].z; double4 myacc1; myacc1.x = acc1[i].x; myacc1.y = acc1[i].y; myacc1.z = acc1[i].z; double4 myacc2; myacc2.x = acc2[i].x; myacc2.y = acc2[i].y; myacc2.z = acc2[i].z; double4 myacc3; myacc3.x = acc3[i].x; myacc3.y = acc3[i].y; myacc3.z = acc3[i].z; myppred.x = mypcorr.x + timestep * myvcorr.x + t2 * myacc.x + t3 * myacc1.x + t4 * myacc2.x + t5 * myacc3.x ; myppred.y = mypcorr.y + timestep * myvcorr.y + t2 * myacc.y + t3 * myacc1.y + t4 * myacc2.y + t5 * myacc3.y ; myppred.z = mypcorr.z + timestep * myvcorr.z + t2 * myacc.z + t3 * myacc1.z + t4 * myacc2.z + t5 * myacc3.z ; p_pred[i].x = myppred.x; p_pred[i].y = myppred.y; p_pred[i].z = myppred.z; mypred.x = myvcorr.x + timestep * myacc.x + t2 * myacc1.x + t3 * myacc2.x + t4 * myacc3.x ; mypred.y = myvcorr.y + timestep * myacc.y + t2 * myacc1.y + t3 * myacc2.y + t4 * myacc3.y ; mypred.z = myvcorr.z + timestep * myacc.z + t2 * myacc1.z + t3 * myacc2.z + t4 * myacc3.z ; v_pred[i].x = mypred.x; v_pred[i].y = mypred.y; v_pred[i].z = mypred.z; mypred.x = myacc.x + timestep * myacc1.x + t2 * myacc2.x + t3 * myacc3.x ; mypred.y = myacc.y + timestep * myacc1.y + t2 * myacc2.y + t3 * myacc3.y ; mypred.z = myacc.z + timestep * myacc1.z + t2 * myacc2.z + t3 * myacc3.z ; a_pred[i].x = mypred.x; a_pred[i].y = mypred.y; a_pred[i].z = mypred.z; }
0cb0979e2f3477a89cb588a5e2c08c4b1f6a0736.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--blockDim=32 --gridDim=2 __global__ void foo() { __threadfence(); }
0cb0979e2f3477a89cb588a5e2c08c4b1f6a0736.cu
//pass //--blockDim=32 --gridDim=2 __global__ void foo() { __threadfence(); }
e1d2ddd8978b4530b0a5bf2617f72cdda258da55.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda_bicgstab.h" #include "bluebottle.h" //contain _u,_v,_w, rho_f,nu,dt0,dt #include "shigan.h" //contain _u,_v,_w, rho_f,nu,dt0,dt #include "entrySearch.h" //contain _u,_v,_w, rho_f,nu,dt0,dt #include <cusp/dia_matrix.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> void shigan_max(int size, real *_stress) { //int size=dom[dev].Gfx.s3b; real *_max_u; real *max_u=(real*) malloc(size * sizeof(real)); checkCudaErrors(hipMalloc((void**) &_max_u, sizeof(real) * size)); checkCudaErrors(hipMemcpy(_max_u,_stress, size*sizeof(real),hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(max_u,_max_u, size*sizeof(real),hipMemcpyDeviceToHost)); //for(int ii=0;ii<size;ii++) printf(" %f ",max_u[ii]); //printf("Array_previous %f %f %f %f\n",max_u[0],max_u[1],max_u[2],max_u[3]); //fflush(stdout); //could change this line to find max or min!! real maxValue=find_max(size,_max_u); printf("max %f \n",maxValue); fflush(stdout); free(max_u); hipFree(_max_u); } void shigan_min(int size, real *_stress) { //int size=dom[dev].Gfx.s3b; real *_max_u; real *max_u=(real*) malloc(size * sizeof(real)); checkCudaErrors(hipMalloc((void**) &_max_u, sizeof(real) * size)); checkCudaErrors(hipMemcpy(_max_u,_stress, size*sizeof(real),hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(max_u,_max_u, size*sizeof(real),hipMemcpyDeviceToHost)); /* printf("Array_previous %f %f %f %f\n",max_u[0],max_u[1],max_u[2],max_u[3]); fflush(stdout); */ //could change this line to find max or min!! real maxValue=find_min(size,_max_u); printf("min %f \n",maxValue); fflush(stdout); free(max_u); hipFree(_max_u); } void cuda_flow_stress() { #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); checkCudaErrors(hipSetDevice(dev + dev_start)); int threads_x = 0; int threads_y = 0; int threads_z = 0; int blocks_x = 0; int blocks_y = 0; int blocks_z = 0; // u-component if(dom[dev].Gfx.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gfx.jnb + 2; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gfx.knb < MAX_THREADS_DIM) threads_z = dom[dev].Gfx.knb + 2; else threads_z = MAX_THREADS_DIM; blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) (threads_y-2)); blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) (threads_z-2)); dim3 dimBlocks_u(threads_y, threads_z); dim3 numBlocks_u(blocks_y, blocks_z); //printf("max and min\n"); //printf("p\n"); //shigan_max(dom[dev].Gcc.s3b, _p[dev]); //shigan_min(dom[dev].Gcc.s3b, _p[dev]); // //printf("u\n"); //shigan_max(dom[dev].Gfx.s3b, _u[dev]); //shigan_min(dom[dev].Gfx.s3b, _u[dev]); // //printf("w\n"); //shigan_max(dom[dev].Gfz.s3b, _w[dev]); //shigan_min(dom[dev].Gfz.s3b, _w[dev]); hipLaunchKernelGGL(( stress_u), dim3(numBlocks_u), dim3(dimBlocks_u), 0, 0, rho_f, nu,_u[dev],_p[dev],_p0[dev], _stress_u[dev], _dom[dev],_flag_u[dev],dt,dt0); fflush(stdout); //printf("max and min stress_u\n"); //shigan_max(dom[dev].Gfx.s3b, _stress_u[dev]); //shigan_min(dom[dev].Gfx.s3b, _stress_u[dev]); // v-component if(dom[dev].Gfy.knb < MAX_THREADS_DIM) threads_z = dom[dev].Gfy.knb + 2; else threads_z = MAX_THREADS_DIM; if(dom[dev].Gfy.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gfy.inb + 2; else threads_x = MAX_THREADS_DIM; blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) (threads_z-2)); blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) (threads_x-2)); dim3 dimBlocks_v(threads_z, threads_x); dim3 numBlocks_v(blocks_z, blocks_x); hipLaunchKernelGGL(( stress_v), dim3(numBlocks_v), dim3(dimBlocks_v), 0, 0, rho_f, nu,_v[dev],_p[dev],_p0[dev], _stress_v[dev], _dom[dev],_flag_v[dev],dt,dt0); fflush(stdout); // w-component if(dom[dev].Gfz.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gfz.inb + 2; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gfz.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gfz.jnb + 2; else threads_y = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) (threads_x-2)); blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) (threads_y-2)); dim3 dimBlocks_w(threads_x, threads_y); dim3 numBlocks_w(blocks_x, blocks_y); hipLaunchKernelGGL(( stress_w), dim3(numBlocks_w), dim3(dimBlocks_w), 0, 0, rho_f, nu,_w[dev],_p[dev],_p0[dev], _stress_w[dev], _dom[dev],_flag_w[dev],dt,dt0); fflush(stdout); } } //extern "C" void cuda_move_points() { // parallelize over CPU threads #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); checkCudaErrors(hipSetDevice(dev + dev_start)); int threads = MAX_THREADS_1D; int blocks = (int)ceil((real) npoints / (real) threads); dim3 dimBlocks(threads); dim3 numBlocks(blocks); if(npoints > 0) { // do collision forcing /** if there are n point_particles in a close group, repeat this n times **/ /* real *forces; checkCudaErrors(hipMalloc((void**) &forces, 3*npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); real *moments; checkCudaErrors(hipMalloc((void**) &moments, 3*npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); real eps = 0.1 * (Dom.dx + Dom.dy + Dom.dz) / 3.; */ real *ug,*vg,*wg;//device pointer of the fluid velocity at the particle position real *lpt_stress_u,*lpt_stress_v,*lpt_stress_w;//device pointer of the fluid velocity at the particle position real *ug0,*vg0,*wg0;//device pointer of the fluid velocity at the particle position for the previous time step real *conv_ug,*conv_vg,*conv_wg;//device pointer of the convective part for fluid velocity at the particle position checkCudaErrors(hipMalloc((void**) &ug, npoints*sizeof(real))); checkCudaErrors(hipMalloc((void**) &vg, npoints*sizeof(real))); checkCudaErrors(hipMalloc((void**) &wg, npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); checkCudaErrors(hipMalloc((void**) &lpt_stress_u, npoints*sizeof(real))); checkCudaErrors(hipMalloc((void**) &lpt_stress_v, npoints*sizeof(real))); checkCudaErrors(hipMalloc((void**) &lpt_stress_w, npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); checkCudaErrors(hipMalloc((void**) &ug0, npoints*sizeof(real))); checkCudaErrors(hipMalloc((void**) &vg0, npoints*sizeof(real))); checkCudaErrors(hipMalloc((void**) &wg0, npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); checkCudaErrors(hipMalloc((void**) &conv_ug, npoints*sizeof(real))); checkCudaErrors(hipMalloc((void**) &conv_vg, npoints*sizeof(real))); checkCudaErrors(hipMalloc((void**) &conv_wg, npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); //for(int l = 0; l < 10; l++) { // collision_init<<<numBlocks, dimBlocks>>>(_points[dev], npoints); if(npoints > 0) { /* for(int i = 0; i < npoints; i++) { collision_points<<<numBlocks, dimBlocks>>>(_points[dev], i, _dom[dev], eps, forces, moments, npoints, mu, bc); } spring_points<<<numBlocks, dimBlocks>>>(_points[dev], npoints); collision_walls<<<numBlocks, dimBlocks>>>(_dom[dev], _points[dev], npoints, bc, eps, mu); } */ //bc is bc.uTD etc. Make sure which BC this is. point_struct *test=(point_struct *)malloc(sizeof(point_struct)); //interpolate_point_vel<<<numBlocks, dimBlocks>>>(_u[dev],_v[dev],_w[dev],npoints,rho_f,nu,ug,vg,wg,_points[dev],_dom[dev],dt0,ttime,bc); fflush(stdout); hipLaunchKernelGGL(( interpolate_point_vel_shigan), dim3(numBlocks), dim3(dimBlocks), 0, 0, _u[dev],_v[dev],_w[dev],npoints,rho_f,nu,ug,vg,wg,_points[dev],_dom[dev],dt0,ttime,bc); fflush(stdout); //need further modification for calculating fluid stress!! //interpolate_point_vel<<<numBlocks, dimBlocks>>>(_u0[dev],_v0[dev],_w0[dev],npoints,rho_f,nu,ug0,vg0,wg0,_points[dev],_dom[dev],dt0,dt,bc); //interpolate_point_vel<<<numBlocks, dimBlocks>>>(_conv_u[dev],_conv_v[dev],_conv_w[dev],npoints,rho_f,nu,conv_ug,conv_vg,conv_wg,_points[dev],_dom[dev],dt0,dt,bc); // checkCudaErrors(hipMemcpy(test,_points[dev], sizeof(point_struct), hipMemcpyDeviceToHost)); //real C_add=0.5; //real C_add=0; //real C_stress=1; //real C_stress=0; //real C_drag=0; //real C_drag=1; //get lpt_stress //TODO _stress_u is not available near the boundary(set to 0), while lpt_stress can be interpolated on BC if(C_stress>0||C_add>0)hipLaunchKernelGGL(( interpolate_point_vel_shigan), dim3(numBlocks), dim3(dimBlocks), 0, 0, _stress_u[dev],_stress_v[dev],_stress_w[dev],npoints,rho_f,nu,lpt_stress_u,lpt_stress_v,lpt_stress_w,_points[dev],_dom[dev],dt0,dt,bc); //if(C_stress>0||C_add>0) interpolate_point_vel<<<numBlocks, dimBlocks>>>(_stress_u[dev],_stress_v[dev],_stress_w[dev],npoints,rho_f,nu,lpt_stress_u,lpt_stress_v,lpt_stress_w,_points[dev],_dom[dev],dt0,dt,bc); hipLaunchKernelGGL(( drag_points), dim3(numBlocks), dim3(dimBlocks), 0, 0, _points[dev],npoints, ug,vg,wg, lpt_stress_u,lpt_stress_v,lpt_stress_w, rho_f,mu,g,gradP, C_add, C_stress,C_drag); fflush(stdout); hipLaunchKernelGGL(( move_points_b), dim3(numBlocks), dim3(dimBlocks), 0, 0, _dom[dev], _points[dev], npoints, dt, dt0, g, rho_f, ttime, C_add, C_stress,C_drag); fflush(stdout); //printf("w is %f wdot is %f\n",test->w,test->wdot); //printf("w is %f wdot is %f\n",test->x,test->y); //fflush(stdout); /* checkCudaErrors(hipFree(forces)); checkCudaErrors(hipFree(moments)); */ checkCudaErrors(hipFree(ug)); checkCudaErrors(hipFree(vg)); checkCudaErrors(hipFree(wg)); checkCudaErrors(hipFree(lpt_stress_u)); checkCudaErrors(hipFree(lpt_stress_v)); checkCudaErrors(hipFree(lpt_stress_w)); checkCudaErrors(hipFree(ug0)); checkCudaErrors(hipFree(vg0)); checkCudaErrors(hipFree(wg0)); checkCudaErrors(hipFree(conv_ug)); checkCudaErrors(hipFree(conv_vg)); checkCudaErrors(hipFree(conv_wg)); } } } } //extern "C" /* void cgns_point_particles(real dtout) { if(npoints > 0) { // create the solution file char fname[FILE_NAME_SIZE]; char fname2[FILE_NAME_SIZE]; char fnameall[FILE_NAME_SIZE]; char fnameall2[FILE_NAME_SIZE]; real tout = ttime; // = rec_point_particle_stepnum_out * dtout; char format[CHAR_BUF_SIZE]; int sigfigs = ceil(log10(1. / dtout)); if(sigfigs < 1) sigfigs = 1; sprintf(format, "%%.%df", sigfigs); sprintf(fname2, "point-%s.cgns", format); sprintf(fnameall2, "%s/output/point-%s.cgns", ROOT_DIR, format); sprintf(fname, fname2, tout); sprintf(fnameall, fnameall2, tout); int fn; int bn; int zn; int en; int sn; int Xn; int Yn; int Zn; int fnr; cg_open(fnameall, CG_MODE_WRITE, &fn); cg_base_write(fn, "Base", 3, 3, &bn); cgsize_t size[3][1]; size[0][0] = npoints; size[1][0] = 0; size[2][0] = 0; cg_zone_write(fn, bn, "Zone0", size[0], Unstructured, &zn); // write point_particle locations real *x = (real *)malloc(npoints * sizeof(real)); // cpumem += npoints * sizeof(real); real *y = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *z = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); cgsize_t *conn = (cgsize_t *) malloc(npoints * sizeof(cgsize_t)); // cpumem (real *)+= npoints * sizeof(int); real *a = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(int); real *u = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *v = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *w = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *udot =(real *) malloc(npoints * sizeof(real)); // cpumem +=(real *) npoints * sizeof(real); real *vdot =(real *) malloc(npoints * sizeof(real)); // cpumem +=(real *) npoints * sizeof(real); real *wdot =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *ox =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *oy =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *oz =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Fx =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Fy =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Fz =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Lx =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Ly =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Lz =(real *) malloc(npoints * sizeof(real)); // cpumem += npoints * sizeof(real); real *iFx =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *iFy =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *iFz =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *iLx =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *iLy =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *iLz =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hFx =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hFy =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hFz =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hLx =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hLy =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hLz =(real *) malloc(npoints * sizeof(real)); // cpumem += npoints * sizeof(real); for(int i = 0; i < npoints; i++) { real mass = 4./3.*PI*(points[i].rho-rho_f)*points[i].r*points[i].r*points[i].r; x[i] = points[i].x; y[i] = points[i].y; z[i] = points[i].z; conn[i] = npoints-i; a[i] = points[i].r; u[i] = points[i].u; v[i] = points[i].v; w[i] = points[i].w; udot[i] = points[i].udot; vdot[i] = points[i].vdot; wdot[i] = points[i].wdot; iFx[i] = points[i].iFx; iFy[i] = points[i].iFy; iFz[i] = points[i].iFz; iLx[i] = points[i].iLx; iLy[i] = points[i].iLy; iLz[i] = points[i].iLz; hFx[i] = points[i].Fx; hFy[i] = points[i].Fy; hFz[i] = points[i].Fz; hLx[i] = points[i].Lx; hLy[i] = points[i].Ly; hLz[i] = points[i].Lz; Fx[i] = iFx[i] + hFx[i] + mass*g.x; Fy[i] = iFy[i] + hFy[i] + mass*g.y; Fz[i] = iFz[i] + hFz[i] + mass*g.z; Lx[i] = iLx[i] + hLx[i]; Ly[i] = iLy[i] + hLy[i]; Lz[i] = iLz[i] + hLz[i]; ox[i] = points[i].ox; oy[i] = points[i].oy; oz[i] = points[i].oz; } cg_coord_write(fn, bn, zn, RealDouble, "CoordinateX", x, &Xn); cg_coord_write(fn, bn, zn, RealDouble, "CoordinateY", y, &Yn); cg_coord_write(fn, bn, zn, RealDouble, "CoordinateZ", z, &Zn); cg_section_write(fn, bn, zn, "Elements", NODE, 0, npoints-1, 0, conn, &en); cg_sol_write(fn, bn, zn, "Solution", Vertex, &sn); cg_field_write(fn, bn, zn, sn, RealDouble, "Radius", a, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "VelocityX", u, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "VelocityY", v, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "VelocityZ", w, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AccelerationX", udot, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AccelerationY", vdot, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AccelerationZ", wdot, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AngularVelocityX", ox, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AngularVelocityY", oy, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AngularVelocityZ", oz, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "HydroForceX", hFx, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "HydroForceY", hFy, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "HydroForceZ", hFz, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "InteractionForceX", iFx, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "InteractionForceY", iFy, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "InteractionForceZ", iFz, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "TotalForceX", Fx, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "TotalForceY", Fy, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "TotalForceZ", Fz, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "MomentX", Lx, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "MomentY", Ly, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "MomentZ", Lz, &fnr); cg_goto(fn, bn, "Zone_t", zn, "end"); cg_user_data_write("Etc"); cg_goto(fn, bn, "Zone_t", zn, "Etc", 0, "end"); cgsize_t *N =(cgsize_t *) malloc(sizeof(cgsize_t)); N[0] = 1; cg_array_write("Time", RealDouble, 1, N, &ttime); free(N); cg_close(fn); free(x); free(y); free(z); free(conn); free(a); free(u); free(v); free(w); free(udot); free(vdot); free(wdot); free(iFx); free(iFy); free(iFz); free(iLx); free(iLy); free(iLz); free(hFx); free(hFy); free(hFz); free(hLx); free(hLy); free(hLz); free(ox); free(oy); free(oz); free(Fx); free(Fy); free(Fz); free(Lx); free(Ly); free(Lz); } } */ //mask(i1,i2), ww[:]?
e1d2ddd8978b4530b0a5bf2617f72cdda258da55.cu
#include "cuda_bicgstab.h" #include "bluebottle.h" //contain _u,_v,_w, rho_f,nu,dt0,dt #include "shigan.h" //contain _u,_v,_w, rho_f,nu,dt0,dt #include "entrySearch.h" //contain _u,_v,_w, rho_f,nu,dt0,dt #include <cusp/dia_matrix.h> #include <cuda.h> #include <helper_cuda.h> void shigan_max(int size, real *_stress) { //int size=dom[dev].Gfx.s3b; real *_max_u; real *max_u=(real*) malloc(size * sizeof(real)); checkCudaErrors(cudaMalloc((void**) &_max_u, sizeof(real) * size)); checkCudaErrors(cudaMemcpy(_max_u,_stress, size*sizeof(real),cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(max_u,_max_u, size*sizeof(real),cudaMemcpyDeviceToHost)); //for(int ii=0;ii<size;ii++) printf(" %f ",max_u[ii]); //printf("Array_previous %f %f %f %f\n",max_u[0],max_u[1],max_u[2],max_u[3]); //fflush(stdout); //could change this line to find max or min!! real maxValue=find_max(size,_max_u); printf("max %f \n",maxValue); fflush(stdout); free(max_u); cudaFree(_max_u); } void shigan_min(int size, real *_stress) { //int size=dom[dev].Gfx.s3b; real *_max_u; real *max_u=(real*) malloc(size * sizeof(real)); checkCudaErrors(cudaMalloc((void**) &_max_u, sizeof(real) * size)); checkCudaErrors(cudaMemcpy(_max_u,_stress, size*sizeof(real),cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(max_u,_max_u, size*sizeof(real),cudaMemcpyDeviceToHost)); /* printf("Array_previous %f %f %f %f\n",max_u[0],max_u[1],max_u[2],max_u[3]); fflush(stdout); */ //could change this line to find max or min!! real maxValue=find_min(size,_max_u); printf("min %f \n",maxValue); fflush(stdout); free(max_u); cudaFree(_max_u); } void cuda_flow_stress() { #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); checkCudaErrors(cudaSetDevice(dev + dev_start)); int threads_x = 0; int threads_y = 0; int threads_z = 0; int blocks_x = 0; int blocks_y = 0; int blocks_z = 0; // u-component if(dom[dev].Gfx.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gfx.jnb + 2; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gfx.knb < MAX_THREADS_DIM) threads_z = dom[dev].Gfx.knb + 2; else threads_z = MAX_THREADS_DIM; blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) (threads_y-2)); blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) (threads_z-2)); dim3 dimBlocks_u(threads_y, threads_z); dim3 numBlocks_u(blocks_y, blocks_z); //printf("max and min\n"); //printf("p\n"); //shigan_max(dom[dev].Gcc.s3b, _p[dev]); //shigan_min(dom[dev].Gcc.s3b, _p[dev]); // //printf("u\n"); //shigan_max(dom[dev].Gfx.s3b, _u[dev]); //shigan_min(dom[dev].Gfx.s3b, _u[dev]); // //printf("w\n"); //shigan_max(dom[dev].Gfz.s3b, _w[dev]); //shigan_min(dom[dev].Gfz.s3b, _w[dev]); stress_u<<<numBlocks_u, dimBlocks_u>>>(rho_f, nu,_u[dev],_p[dev],_p0[dev], _stress_u[dev], _dom[dev],_flag_u[dev],dt,dt0); fflush(stdout); //printf("max and min stress_u\n"); //shigan_max(dom[dev].Gfx.s3b, _stress_u[dev]); //shigan_min(dom[dev].Gfx.s3b, _stress_u[dev]); // v-component if(dom[dev].Gfy.knb < MAX_THREADS_DIM) threads_z = dom[dev].Gfy.knb + 2; else threads_z = MAX_THREADS_DIM; if(dom[dev].Gfy.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gfy.inb + 2; else threads_x = MAX_THREADS_DIM; blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) (threads_z-2)); blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) (threads_x-2)); dim3 dimBlocks_v(threads_z, threads_x); dim3 numBlocks_v(blocks_z, blocks_x); stress_v<<<numBlocks_v, dimBlocks_v>>>(rho_f, nu,_v[dev],_p[dev],_p0[dev], _stress_v[dev], _dom[dev],_flag_v[dev],dt,dt0); fflush(stdout); // w-component if(dom[dev].Gfz.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gfz.inb + 2; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gfz.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gfz.jnb + 2; else threads_y = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) (threads_x-2)); blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) (threads_y-2)); dim3 dimBlocks_w(threads_x, threads_y); dim3 numBlocks_w(blocks_x, blocks_y); stress_w<<<numBlocks_w, dimBlocks_w>>>(rho_f, nu,_w[dev],_p[dev],_p0[dev], _stress_w[dev], _dom[dev],_flag_w[dev],dt,dt0); fflush(stdout); } } //extern "C" void cuda_move_points() { // parallelize over CPU threads #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); checkCudaErrors(cudaSetDevice(dev + dev_start)); int threads = MAX_THREADS_1D; int blocks = (int)ceil((real) npoints / (real) threads); dim3 dimBlocks(threads); dim3 numBlocks(blocks); if(npoints > 0) { // do collision forcing /** if there are n point_particles in a close group, repeat this n times **/ /* real *forces; checkCudaErrors(cudaMalloc((void**) &forces, 3*npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); real *moments; checkCudaErrors(cudaMalloc((void**) &moments, 3*npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); real eps = 0.1 * (Dom.dx + Dom.dy + Dom.dz) / 3.; */ real *ug,*vg,*wg;//device pointer of the fluid velocity at the particle position real *lpt_stress_u,*lpt_stress_v,*lpt_stress_w;//device pointer of the fluid velocity at the particle position real *ug0,*vg0,*wg0;//device pointer of the fluid velocity at the particle position for the previous time step real *conv_ug,*conv_vg,*conv_wg;//device pointer of the convective part for fluid velocity at the particle position checkCudaErrors(cudaMalloc((void**) &ug, npoints*sizeof(real))); checkCudaErrors(cudaMalloc((void**) &vg, npoints*sizeof(real))); checkCudaErrors(cudaMalloc((void**) &wg, npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); checkCudaErrors(cudaMalloc((void**) &lpt_stress_u, npoints*sizeof(real))); checkCudaErrors(cudaMalloc((void**) &lpt_stress_v, npoints*sizeof(real))); checkCudaErrors(cudaMalloc((void**) &lpt_stress_w, npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); checkCudaErrors(cudaMalloc((void**) &ug0, npoints*sizeof(real))); checkCudaErrors(cudaMalloc((void**) &vg0, npoints*sizeof(real))); checkCudaErrors(cudaMalloc((void**) &wg0, npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); checkCudaErrors(cudaMalloc((void**) &conv_ug, npoints*sizeof(real))); checkCudaErrors(cudaMalloc((void**) &conv_vg, npoints*sizeof(real))); checkCudaErrors(cudaMalloc((void**) &conv_wg, npoints*sizeof(real))); gpumem += 3 * npoints * sizeof(real); //for(int l = 0; l < 10; l++) { // collision_init<<<numBlocks, dimBlocks>>>(_points[dev], npoints); if(npoints > 0) { /* for(int i = 0; i < npoints; i++) { collision_points<<<numBlocks, dimBlocks>>>(_points[dev], i, _dom[dev], eps, forces, moments, npoints, mu, bc); } spring_points<<<numBlocks, dimBlocks>>>(_points[dev], npoints); collision_walls<<<numBlocks, dimBlocks>>>(_dom[dev], _points[dev], npoints, bc, eps, mu); } */ //bc is bc.uTD etc. Make sure which BC this is. point_struct *test=(point_struct *)malloc(sizeof(point_struct)); //interpolate_point_vel<<<numBlocks, dimBlocks>>>(_u[dev],_v[dev],_w[dev],npoints,rho_f,nu,ug,vg,wg,_points[dev],_dom[dev],dt0,ttime,bc); fflush(stdout); interpolate_point_vel_shigan<<<numBlocks, dimBlocks>>>(_u[dev],_v[dev],_w[dev],npoints,rho_f,nu,ug,vg,wg,_points[dev],_dom[dev],dt0,ttime,bc); fflush(stdout); //need further modification for calculating fluid stress!! //interpolate_point_vel<<<numBlocks, dimBlocks>>>(_u0[dev],_v0[dev],_w0[dev],npoints,rho_f,nu,ug0,vg0,wg0,_points[dev],_dom[dev],dt0,dt,bc); //interpolate_point_vel<<<numBlocks, dimBlocks>>>(_conv_u[dev],_conv_v[dev],_conv_w[dev],npoints,rho_f,nu,conv_ug,conv_vg,conv_wg,_points[dev],_dom[dev],dt0,dt,bc); // checkCudaErrors(cudaMemcpy(test,_points[dev], sizeof(point_struct), cudaMemcpyDeviceToHost)); //real C_add=0.5; //real C_add=0; //real C_stress=1; //real C_stress=0; //real C_drag=0; //real C_drag=1; //get lpt_stress //TODO _stress_u is not available near the boundary(set to 0), while lpt_stress can be interpolated on BC if(C_stress>0||C_add>0) interpolate_point_vel_shigan<<<numBlocks, dimBlocks>>>(_stress_u[dev],_stress_v[dev],_stress_w[dev],npoints,rho_f,nu,lpt_stress_u,lpt_stress_v,lpt_stress_w,_points[dev],_dom[dev],dt0,dt,bc); //if(C_stress>0||C_add>0) interpolate_point_vel<<<numBlocks, dimBlocks>>>(_stress_u[dev],_stress_v[dev],_stress_w[dev],npoints,rho_f,nu,lpt_stress_u,lpt_stress_v,lpt_stress_w,_points[dev],_dom[dev],dt0,dt,bc); drag_points<<<numBlocks, dimBlocks>>>(_points[dev],npoints, ug,vg,wg, lpt_stress_u,lpt_stress_v,lpt_stress_w, rho_f,mu,g,gradP, C_add, C_stress,C_drag); fflush(stdout); move_points_b<<<numBlocks, dimBlocks>>>(_dom[dev], _points[dev], npoints, dt, dt0, g, rho_f, ttime, C_add, C_stress,C_drag); fflush(stdout); //printf("w is %f wdot is %f\n",test->w,test->wdot); //printf("w is %f wdot is %f\n",test->x,test->y); //fflush(stdout); /* checkCudaErrors(cudaFree(forces)); checkCudaErrors(cudaFree(moments)); */ checkCudaErrors(cudaFree(ug)); checkCudaErrors(cudaFree(vg)); checkCudaErrors(cudaFree(wg)); checkCudaErrors(cudaFree(lpt_stress_u)); checkCudaErrors(cudaFree(lpt_stress_v)); checkCudaErrors(cudaFree(lpt_stress_w)); checkCudaErrors(cudaFree(ug0)); checkCudaErrors(cudaFree(vg0)); checkCudaErrors(cudaFree(wg0)); checkCudaErrors(cudaFree(conv_ug)); checkCudaErrors(cudaFree(conv_vg)); checkCudaErrors(cudaFree(conv_wg)); } } } } //extern "C" /* void cgns_point_particles(real dtout) { if(npoints > 0) { // create the solution file char fname[FILE_NAME_SIZE]; char fname2[FILE_NAME_SIZE]; char fnameall[FILE_NAME_SIZE]; char fnameall2[FILE_NAME_SIZE]; real tout = ttime; // = rec_point_particle_stepnum_out * dtout; char format[CHAR_BUF_SIZE]; int sigfigs = ceil(log10(1. / dtout)); if(sigfigs < 1) sigfigs = 1; sprintf(format, "%%.%df", sigfigs); sprintf(fname2, "point-%s.cgns", format); sprintf(fnameall2, "%s/output/point-%s.cgns", ROOT_DIR, format); sprintf(fname, fname2, tout); sprintf(fnameall, fnameall2, tout); int fn; int bn; int zn; int en; int sn; int Xn; int Yn; int Zn; int fnr; cg_open(fnameall, CG_MODE_WRITE, &fn); cg_base_write(fn, "Base", 3, 3, &bn); cgsize_t size[3][1]; size[0][0] = npoints; size[1][0] = 0; size[2][0] = 0; cg_zone_write(fn, bn, "Zone0", size[0], Unstructured, &zn); // write point_particle locations real *x = (real *)malloc(npoints * sizeof(real)); // cpumem += npoints * sizeof(real); real *y = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *z = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); cgsize_t *conn = (cgsize_t *) malloc(npoints * sizeof(cgsize_t)); // cpumem (real *)+= npoints * sizeof(int); real *a = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(int); real *u = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *v = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *w = (real *)malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *udot =(real *) malloc(npoints * sizeof(real)); // cpumem +=(real *) npoints * sizeof(real); real *vdot =(real *) malloc(npoints * sizeof(real)); // cpumem +=(real *) npoints * sizeof(real); real *wdot =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *ox =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *oy =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *oz =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Fx =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Fy =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Fz =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Lx =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Ly =(real *) malloc(npoints * sizeof(real)); // cpumem (real *)+= npoints * sizeof(real); real *Lz =(real *) malloc(npoints * sizeof(real)); // cpumem += npoints * sizeof(real); real *iFx =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *iFy =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *iFz =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *iLx =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *iLy =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *iLz =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hFx =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hFy =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hFz =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hLx =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hLy =(real *) malloc(npoints * sizeof(real)); // cpumem +(real *)= npoints * sizeof(real); real *hLz =(real *) malloc(npoints * sizeof(real)); // cpumem += npoints * sizeof(real); for(int i = 0; i < npoints; i++) { real mass = 4./3.*PI*(points[i].rho-rho_f)*points[i].r*points[i].r*points[i].r; x[i] = points[i].x; y[i] = points[i].y; z[i] = points[i].z; conn[i] = npoints-i; a[i] = points[i].r; u[i] = points[i].u; v[i] = points[i].v; w[i] = points[i].w; udot[i] = points[i].udot; vdot[i] = points[i].vdot; wdot[i] = points[i].wdot; iFx[i] = points[i].iFx; iFy[i] = points[i].iFy; iFz[i] = points[i].iFz; iLx[i] = points[i].iLx; iLy[i] = points[i].iLy; iLz[i] = points[i].iLz; hFx[i] = points[i].Fx; hFy[i] = points[i].Fy; hFz[i] = points[i].Fz; hLx[i] = points[i].Lx; hLy[i] = points[i].Ly; hLz[i] = points[i].Lz; Fx[i] = iFx[i] + hFx[i] + mass*g.x; Fy[i] = iFy[i] + hFy[i] + mass*g.y; Fz[i] = iFz[i] + hFz[i] + mass*g.z; Lx[i] = iLx[i] + hLx[i]; Ly[i] = iLy[i] + hLy[i]; Lz[i] = iLz[i] + hLz[i]; ox[i] = points[i].ox; oy[i] = points[i].oy; oz[i] = points[i].oz; } cg_coord_write(fn, bn, zn, RealDouble, "CoordinateX", x, &Xn); cg_coord_write(fn, bn, zn, RealDouble, "CoordinateY", y, &Yn); cg_coord_write(fn, bn, zn, RealDouble, "CoordinateZ", z, &Zn); cg_section_write(fn, bn, zn, "Elements", NODE, 0, npoints-1, 0, conn, &en); cg_sol_write(fn, bn, zn, "Solution", Vertex, &sn); cg_field_write(fn, bn, zn, sn, RealDouble, "Radius", a, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "VelocityX", u, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "VelocityY", v, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "VelocityZ", w, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AccelerationX", udot, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AccelerationY", vdot, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AccelerationZ", wdot, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AngularVelocityX", ox, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AngularVelocityY", oy, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "AngularVelocityZ", oz, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "HydroForceX", hFx, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "HydroForceY", hFy, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "HydroForceZ", hFz, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "InteractionForceX", iFx, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "InteractionForceY", iFy, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "InteractionForceZ", iFz, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "TotalForceX", Fx, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "TotalForceY", Fy, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "TotalForceZ", Fz, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "MomentX", Lx, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "MomentY", Ly, &fnr); cg_field_write(fn, bn, zn, sn, RealDouble, "MomentZ", Lz, &fnr); cg_goto(fn, bn, "Zone_t", zn, "end"); cg_user_data_write("Etc"); cg_goto(fn, bn, "Zone_t", zn, "Etc", 0, "end"); cgsize_t *N =(cgsize_t *) malloc(sizeof(cgsize_t)); N[0] = 1; cg_array_write("Time", RealDouble, 1, N, &ttime); free(N); cg_close(fn); free(x); free(y); free(z); free(conn); free(a); free(u); free(v); free(w); free(udot); free(vdot); free(wdot); free(iFx); free(iFy); free(iFz); free(iLx); free(iLy); free(iLz); free(hFx); free(hFy); free(hFz); free(hLx); free(hLy); free(hLz); free(ox); free(oy); free(oz); free(Fx); free(Fy); free(Fz); free(Lx); free(Ly); free(Lz); } } */ //mask(i1,i2), 如何表示ww[:]?
c03ab8fc191bd42d590ab9bdc35efd951b64dfe5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/sort.h> #include <stdio.h> #include <iostream> #include <vector> #include "cuda_nms.h" #define MIN_THREADS_PER_BLOCK 128 using namespace std; namespace NAMESPACE { inline int GetMaxOccupacy(int SMs, int processNum) { int threshold = processNum / SMs; int thread = MIN_THREADS_PER_BLOCK; while(thread < threshold) thread = thread << 1; thread = thread >> 1; thread = thread > 512 ? 512 : thread; return thread; } // #define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y){ x = _x, y = _y; } __device__ void set(float _x, float _y){ x = _x; y = _y; } __device__ Point operator +(const Point &b)const{ return Point(x + b.x, y + b.y); } __device__ Point operator -(const Point &b)const{ return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b){ return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){ return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && min(q1.x,q2.x) <= max(p1.x,p2.x) && min(p1.y,p2.y) <= max(q1.y,q2.y) && min(q1.y,q2.y) <= max(p1.y,p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p){ //params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if(fabs(s5 - s1) > EPS){ ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else{ float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p){ float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center){ return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b){ // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++){ rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag){ poly_center = poly_center + cross_points[cnt]; cnt++; } } } // check corners for (int k = 0; k < 4; k++){ if (check_in_box2d(box_a, box_b_corners[k])){ poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; } if (check_in_box2d(box_b, box_a_corners[k])){ poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++){ for (int i = 0; i < cnt - j - 1; i++){ if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++){ area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b){ // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __device__ inline float iou_normal(float const * const a, float const * const b) { //params: a: [x, y, dx, dy] //params: b: [x, y, dx, dy] float left = fmaxf(a[0] - a[2] / 2, b[0] - b[2] / 2), right = fminf(a[0] + a[2] / 2, b[0] + b[2] / 2); float top = fmaxf(a[1] - a[3] / 2, b[1] - b[3] / 2), bottom = fminf(a[1] + a[3] / 2, b[1] + b[3] / 2); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = a[3] * a[2]; float Sb = b[3] * b[2]; return interS / fmaxf(Sa + Sb - interS, EPS); } extern "C" __global__ void squeeze_for_score_kernel(const float *cls_rw, float *score, int *cls_index, int *range_index_rw, int* counter, int num_cls, int num_box, float score_thresh) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ int dCounter; for(int i = idx; i < num_box; i += stride) { if(threadIdx.x == 0) dCounter = 0; __syncthreads(); int cls = 0; float scoreTmp = cls_rw[i]; for(int j = 1; j < num_cls; ++j) { float scoreNow = cls_rw[j * num_box + i]; bool isSmaller = (scoreTmp < scoreNow); scoreTmp = isSmaller ? scoreNow : scoreTmp; cls = isSmaller ? j : cls; } bool isValid = (score_thresh < scoreTmp); int pos; if(isValid) pos = atomicAdd(&dCounter, 1); __syncthreads(); if(threadIdx.x == 0) dCounter = atomicAdd(counter, dCounter); __syncthreads(); if(isValid) { pos += dCounter; cls_index [i] = cls; range_index_rw[pos] = i; score [pos] = -scoreTmp; } } } __global__ void copy_to_temp_kernel_bev(int *range_index_rw, float *score, int *cls_index, const float *box_s_rw, int *cls_temp, float *box_temp, int num, int original_num, int num_box_info) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < num; i += stride) { int index = range_index_rw[i]; cls_temp[i] = cls_index[index]; #pragma unroll 7 for (int j = 0; j < 7; ++j) box_temp[j * num + i] = box_s_rw[j * original_num + index]; } } __global__ void copy_to_temp_kernel_nor(int *range_index_rw, float *score, int *cls_index, const float *box_s_rw, int *cls_temp, float *box_temp, int num, int original_num, int num_box_info) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < num; i += stride) { int index = range_index_rw[i]; cls_temp[i] = cls_index[index]; #pragma unroll 4 for (int j = 0; j < 4; ++j) box_temp[j * num + i] = box_s_rw[j * original_num + index]; } } __global__ void iou_self_bev_kernel(int nms_pre_maxsize, const float *boxes, int *index, float *ans_iou) { // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < nms_pre_maxsize * nms_pre_maxsize; i += stride) { int row = i / nms_pre_maxsize; //rows int col = i - (row * nms_pre_maxsize); //cols int a_idx = row; int b_idx = col; if(a_idx!=b_idx){ float box_a[7]; float box_b[7]; #pragma unroll 7 for(int j = 0; j < 7; ++j) { box_a[j] = boxes[j * nms_pre_maxsize + a_idx]; box_b[j] = boxes[j * nms_pre_maxsize + b_idx]; } float cur_iou_bev = iou_bev(reinterpret_cast<const float*>(&box_a[0]), reinterpret_cast<const float*>(&box_b[0])); ans_iou[row * nms_pre_maxsize + col] = cur_iou_bev; ans_iou[col * nms_pre_maxsize + row] = cur_iou_bev; }else{ ans_iou[row * nms_pre_maxsize + col] = 1; } } } __global__ void iou_self_kernel(int nms_pre_maxsize, const float *boxes, int *index, float *ans_iou){ // params boxes: (N, 4) [x, y, dx, dy] int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < nms_pre_maxsize * nms_pre_maxsize; i += stride) { int row = i / nms_pre_maxsize; //rows int col = i - (row * nms_pre_maxsize); //cols int a_idx = index[row]; int b_idx = index[col]; if(a_idx!=b_idx){ float box_a[4]; float box_b[4]; #pragma unroll 4 for(int j = 0; j < 4; ++j) { box_a[j] = boxes[j * nms_pre_maxsize + a_idx]; box_b[j] = boxes[j * nms_pre_maxsize + b_idx]; } float cur_iou = iou_normal(reinterpret_cast<const float*>(&box_a[0]), reinterpret_cast<const float*>(&box_b[0])); ans_iou[row * nms_pre_maxsize + col] = cur_iou; ans_iou[col * nms_pre_maxsize + row] = cur_iou; } else{ ans_iou[row * nms_pre_maxsize + col] = 1; } } } __global__ void nms_kernel(int nms_pre_maxsize, int *range_index, float *ans_iou, float nms_thresh, int box_idx) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < nms_pre_maxsize; i += stride) { if(range_index[box_idx]<0) continue; if(i<=box_idx) continue; if(ans_iou[box_idx * nms_pre_maxsize + i] > nms_thresh) range_index[i] = -1; } } void nms_func(int nms_pre_maxsize, int *range_index, float *ans_iou, float nms_thresh) { int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device launch checkCudaErrors(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, nms_kernel, 0, nms_pre_maxsize)); minGridSize = ::min(minGridSize, DivUp(nms_pre_maxsize, blockSize)); for (int i = 0; i < nms_pre_maxsize; ++i) hipLaunchKernelGGL(( nms_kernel), dim3(minGridSize), dim3(blockSize), 0, 0, nms_pre_maxsize, range_index, ans_iou, nms_thresh, i); //hipDeviceSynchronize(); } __global__ void concat_outputs_kernel_bev(float *box_temp, float *score_temp, int *cls_temp, int *range_index_rw, int total_box, int nms_post_maxsize, float *dst_s_rw, int orign_num) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < total_box; i += stride) { int index = range_index_rw[i]; #pragma unroll 7 for (int j = 0; j < 7; ++j) dst_s_rw[j * nms_post_maxsize + i] = box_temp[j * orign_num + index]; dst_s_rw[7 * nms_post_maxsize + i] = -score_temp[index];// dst_s_rw[8 * nms_post_maxsize + i] = (float)cls_temp[index]; } } __global__ void concat_outputs_kernel_nor(float *box_temp, float *score_temp, int *cls_temp, int *range_index_rw, int total_box, int nms_post_maxsize, float *dst_s_rw, int orign_num) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < total_box; i += stride) { int index = range_index_rw[i]; #pragma unroll 4 for (int j = 0; j < 4; ++j) dst_s_rw[j * nms_post_maxsize + i] = box_temp[j * orign_num + index]; dst_s_rw[4 * nms_post_maxsize + i] = -score_temp[index];// dst_s_rw[5 * nms_post_maxsize + i] = (float)cls_temp[index]; } } __global__ void range_kernel(int *index, int num) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < num; i += stride) { index[i] = i; } } struct is_neg { __host__ __device__ bool operator()(const int x) { return x < 0; } }; void cuda_nms(const float *batch_box, const float *batch_cls_rw, float *score, int *cls_index, int *range_index_rw, int* pos_rw, int *cls_temp, float *box_temp, float *ious_rw, float *dst, int num_box, int num_cls, int nms_pre_maxsize, int nms_post_maxsize, float nms_thresh, int batch_size, float score_thresh, int use_bev) { int SMs = 0; checkCudaErrors(hipDeviceGetAttribute(&SMs, hipDeviceAttributeMultiprocessorCount, 0)); int num_box_info = use_bev == 0 ? 4 : 7; for (int i = 0; i < batch_size; ++i){ const float *cls_s_rw = batch_cls_rw + i * num_box * num_cls; const float *box_s_rw = batch_box + i * num_box * num_box_info; float *dst_s_rw = dst + i * nms_post_maxsize * (num_box_info + 2); int *pos_s_rw = pos_rw + i; //boxscorescoreboxbox-1 hipLaunchKernelGGL(( squeeze_for_score_kernel), dim3(SMs), dim3(GetMaxOccupacy(SMs, num_box)), sizeof(int), 0, cls_s_rw, score, cls_index, range_index_rw, pos_s_rw, num_cls, num_box, score_thresh); //clsscore int num; checkCudaErrors(hipMemcpy(&num, pos_s_rw, sizeof(int), hipMemcpyDeviceToHost)); if(num < 1) continue; //box,scorerange_index_rw thrust::stable_sort_by_key(thrust::device, score, score + num, range_index_rw); if(num > nms_pre_maxsize) num = nms_pre_maxsize; if(use_bev != 0) hipLaunchKernelGGL(( copy_to_temp_kernel_bev), dim3(SMs), dim3(GetMaxOccupacy(SMs, num)), 0, 0, range_index_rw, score, cls_index, box_s_rw, cls_temp, box_temp, num, num_box, num_box_info); else hipLaunchKernelGGL(( copy_to_temp_kernel_nor), dim3(SMs), dim3(GetMaxOccupacy(SMs, num)), 0, 0, range_index_rw, score, cls_index, box_s_rw, cls_temp, box_temp, num, num_box, num_box_info); if(use_bev != 0) hipLaunchKernelGGL(( iou_self_bev_kernel), dim3(SMs), dim3(GetMaxOccupacy(SMs, num * num)), 0, 0, num, box_temp, range_index_rw, ious_rw); else hipLaunchKernelGGL(( iou_self_kernel) , dim3(SMs), dim3(GetMaxOccupacy(SMs, num * num)), 0, 0, num, box_temp, range_index_rw, ious_rw); hipLaunchKernelGGL(( range_kernel), dim3(SMs), dim3(GetMaxOccupacy(SMs, num)), 0, 0, range_index_rw, num);// mark temp index nms_func(num, range_index_rw, ious_rw, nms_thresh); //-1boxbox int *new_end = thrust::remove_if(thrust::device, range_index_rw, range_index_rw + num, is_neg()); int valid_num = new_end - range_index_rw; if(valid_num < 1) continue; valid_num = valid_num > nms_post_maxsize ? nms_post_maxsize : valid_num; if(use_bev != 0) hipLaunchKernelGGL(( concat_outputs_kernel_bev), dim3(SMs), dim3(GetMaxOccupacy(SMs, valid_num)), 0, 0, box_temp, score, cls_temp, range_index_rw, valid_num, nms_post_maxsize, dst_s_rw, num); else hipLaunchKernelGGL(( concat_outputs_kernel_nor), dim3(SMs), dim3(GetMaxOccupacy(SMs, valid_num)), 0, 0, box_temp, score, cls_temp, range_index_rw, valid_num, nms_post_maxsize, dst_s_rw, num); } checkCudaErrors(hipDeviceSynchronize()); } }//namespace
c03ab8fc191bd42d590ab9bdc35efd951b64dfe5.cu
#include <thrust/sort.h> #include <stdio.h> #include <iostream> #include <vector> #include "cuda_nms.h" #define MIN_THREADS_PER_BLOCK 128 using namespace std; namespace NAMESPACE { inline int GetMaxOccupacy(int SMs, int processNum) { int threshold = processNum / SMs; int thread = MIN_THREADS_PER_BLOCK; while(thread < threshold) thread = thread << 1; thread = thread >> 1; thread = thread > 512 ? 512 : thread; return thread; } // #define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y){ x = _x, y = _y; } __device__ void set(float _x, float _y){ x = _x; y = _y; } __device__ Point operator +(const Point &b)const{ return Point(x + b.x, y + b.y); } __device__ Point operator -(const Point &b)const{ return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b){ return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){ return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && min(q1.x,q2.x) <= max(p1.x,p2.x) && min(p1.y,p2.y) <= max(q1.y,q2.y) && min(q1.y,q2.y) <= max(p1.y,p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p){ //params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if(fabs(s5 - s1) > EPS){ ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else{ float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p){ float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center){ return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b){ // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++){ rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag){ poly_center = poly_center + cross_points[cnt]; cnt++; } } } // check corners for (int k = 0; k < 4; k++){ if (check_in_box2d(box_a, box_b_corners[k])){ poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; } if (check_in_box2d(box_b, box_a_corners[k])){ poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++){ for (int i = 0; i < cnt - j - 1; i++){ if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++){ area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b){ // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __device__ inline float iou_normal(float const * const a, float const * const b) { //params: a: [x, y, dx, dy] //params: b: [x, y, dx, dy] float left = fmaxf(a[0] - a[2] / 2, b[0] - b[2] / 2), right = fminf(a[0] + a[2] / 2, b[0] + b[2] / 2); float top = fmaxf(a[1] - a[3] / 2, b[1] - b[3] / 2), bottom = fminf(a[1] + a[3] / 2, b[1] + b[3] / 2); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = a[3] * a[2]; float Sb = b[3] * b[2]; return interS / fmaxf(Sa + Sb - interS, EPS); } extern "C" __global__ void squeeze_for_score_kernel(const float *cls_rw, float *score, int *cls_index, int *range_index_rw, int* counter, int num_cls, int num_box, float score_thresh) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ int dCounter; for(int i = idx; i < num_box; i += stride) { if(threadIdx.x == 0) dCounter = 0; __syncthreads(); int cls = 0; float scoreTmp = cls_rw[i]; for(int j = 1; j < num_cls; ++j) { float scoreNow = cls_rw[j * num_box + i]; bool isSmaller = (scoreTmp < scoreNow); scoreTmp = isSmaller ? scoreNow : scoreTmp; cls = isSmaller ? j : cls; } bool isValid = (score_thresh < scoreTmp); int pos; if(isValid) pos = atomicAdd(&dCounter, 1); __syncthreads(); if(threadIdx.x == 0) dCounter = atomicAdd(counter, dCounter); __syncthreads(); if(isValid) { pos += dCounter; cls_index [i] = cls; range_index_rw[pos] = i; score [pos] = -scoreTmp; } } } __global__ void copy_to_temp_kernel_bev(int *range_index_rw, float *score, int *cls_index, const float *box_s_rw, int *cls_temp, float *box_temp, int num, int original_num, int num_box_info) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < num; i += stride) { int index = range_index_rw[i]; cls_temp[i] = cls_index[index]; #pragma unroll 7 for (int j = 0; j < 7; ++j) box_temp[j * num + i] = box_s_rw[j * original_num + index]; } } __global__ void copy_to_temp_kernel_nor(int *range_index_rw, float *score, int *cls_index, const float *box_s_rw, int *cls_temp, float *box_temp, int num, int original_num, int num_box_info) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < num; i += stride) { int index = range_index_rw[i]; cls_temp[i] = cls_index[index]; #pragma unroll 4 for (int j = 0; j < 4; ++j) box_temp[j * num + i] = box_s_rw[j * original_num + index]; } } __global__ void iou_self_bev_kernel(int nms_pre_maxsize, const float *boxes, int *index, float *ans_iou) { // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < nms_pre_maxsize * nms_pre_maxsize; i += stride) { int row = i / nms_pre_maxsize; //rows int col = i - (row * nms_pre_maxsize); //cols int a_idx = row; int b_idx = col; if(a_idx!=b_idx){ float box_a[7]; float box_b[7]; #pragma unroll 7 for(int j = 0; j < 7; ++j) { box_a[j] = boxes[j * nms_pre_maxsize + a_idx]; box_b[j] = boxes[j * nms_pre_maxsize + b_idx]; } float cur_iou_bev = iou_bev(reinterpret_cast<const float*>(&box_a[0]), reinterpret_cast<const float*>(&box_b[0])); ans_iou[row * nms_pre_maxsize + col] = cur_iou_bev; ans_iou[col * nms_pre_maxsize + row] = cur_iou_bev; }else{ ans_iou[row * nms_pre_maxsize + col] = 1; } } } __global__ void iou_self_kernel(int nms_pre_maxsize, const float *boxes, int *index, float *ans_iou){ // params boxes: (N, 4) [x, y, dx, dy] int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < nms_pre_maxsize * nms_pre_maxsize; i += stride) { int row = i / nms_pre_maxsize; //rows int col = i - (row * nms_pre_maxsize); //cols int a_idx = index[row]; int b_idx = index[col]; if(a_idx!=b_idx){ float box_a[4]; float box_b[4]; #pragma unroll 4 for(int j = 0; j < 4; ++j) { box_a[j] = boxes[j * nms_pre_maxsize + a_idx]; box_b[j] = boxes[j * nms_pre_maxsize + b_idx]; } float cur_iou = iou_normal(reinterpret_cast<const float*>(&box_a[0]), reinterpret_cast<const float*>(&box_b[0])); ans_iou[row * nms_pre_maxsize + col] = cur_iou; ans_iou[col * nms_pre_maxsize + row] = cur_iou; } else{ ans_iou[row * nms_pre_maxsize + col] = 1; } } } __global__ void nms_kernel(int nms_pre_maxsize, int *range_index, float *ans_iou, float nms_thresh, int box_idx) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < nms_pre_maxsize; i += stride) { if(range_index[box_idx]<0) continue; if(i<=box_idx) continue; if(ans_iou[box_idx * nms_pre_maxsize + i] > nms_thresh) range_index[i] = -1; } } void nms_func(int nms_pre_maxsize, int *range_index, float *ans_iou, float nms_thresh) { int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device launch checkCudaErrors(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, nms_kernel, 0, nms_pre_maxsize)); minGridSize = std::min(minGridSize, DivUp(nms_pre_maxsize, blockSize)); for (int i = 0; i < nms_pre_maxsize; ++i) nms_kernel<<<minGridSize, blockSize>>>(nms_pre_maxsize, range_index, ans_iou, nms_thresh, i); //cudaDeviceSynchronize(); } __global__ void concat_outputs_kernel_bev(float *box_temp, float *score_temp, int *cls_temp, int *range_index_rw, int total_box, int nms_post_maxsize, float *dst_s_rw, int orign_num) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < total_box; i += stride) { int index = range_index_rw[i]; #pragma unroll 7 for (int j = 0; j < 7; ++j) dst_s_rw[j * nms_post_maxsize + i] = box_temp[j * orign_num + index]; dst_s_rw[7 * nms_post_maxsize + i] = -score_temp[index];//前面排序时存为负值 dst_s_rw[8 * nms_post_maxsize + i] = (float)cls_temp[index]; } } __global__ void concat_outputs_kernel_nor(float *box_temp, float *score_temp, int *cls_temp, int *range_index_rw, int total_box, int nms_post_maxsize, float *dst_s_rw, int orign_num) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < total_box; i += stride) { int index = range_index_rw[i]; #pragma unroll 4 for (int j = 0; j < 4; ++j) dst_s_rw[j * nms_post_maxsize + i] = box_temp[j * orign_num + index]; dst_s_rw[4 * nms_post_maxsize + i] = -score_temp[index];//前面排序时存为负值 dst_s_rw[5 * nms_post_maxsize + i] = (float)cls_temp[index]; } } __global__ void range_kernel(int *index, int num) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < num; i += stride) { index[i] = i; } } struct is_neg { __host__ __device__ bool operator()(const int x) { return x < 0; } }; void cuda_nms(const float *batch_box, const float *batch_cls_rw, float *score, int *cls_index, int *range_index_rw, int* pos_rw, int *cls_temp, float *box_temp, float *ious_rw, float *dst, int num_box, int num_cls, int nms_pre_maxsize, int nms_post_maxsize, float nms_thresh, int batch_size, float score_thresh, int use_bev) { int SMs = 0; checkCudaErrors(cudaDeviceGetAttribute(&SMs, cudaDevAttrMultiProcessorCount, 0)); int num_box_info = use_bev == 0 ? 4 : 7; for (int i = 0; i < batch_size; ++i){ const float *cls_s_rw = batch_cls_rw + i * num_box * num_cls; const float *box_s_rw = batch_box + i * num_box * num_box_info; float *dst_s_rw = dst + i * nms_post_maxsize * (num_box_info + 2); int *pos_s_rw = pos_rw + i; //挑选每个box中score最大的类,记录类别索引和score;记录box索引,不满足阈值的box,索引记为-1 squeeze_for_score_kernel<<<SMs, GetMaxOccupacy(SMs, num_box), sizeof(int)>>>(cls_s_rw, score, cls_index, range_index_rw, pos_s_rw, num_cls, num_box, score_thresh); //提取cls信息和score信息 int num; checkCudaErrors(cudaMemcpy(&num, pos_s_rw, sizeof(int), cudaMemcpyDeviceToHost)); if(num < 1) continue; //将有效的box按照置信度排序,注意,此时score,range_index_rw都是排序的结果 thrust::stable_sort_by_key(thrust::device, score, score + num, range_index_rw); if(num > nms_pre_maxsize) num = nms_pre_maxsize; if(use_bev != 0) copy_to_temp_kernel_bev<<<SMs, GetMaxOccupacy(SMs, num)>>>(range_index_rw, score, cls_index, box_s_rw, cls_temp, box_temp, num, num_box, num_box_info); else copy_to_temp_kernel_nor<<<SMs, GetMaxOccupacy(SMs, num)>>>(range_index_rw, score, cls_index, box_s_rw, cls_temp, box_temp, num, num_box, num_box_info); if(use_bev != 0) iou_self_bev_kernel<<<SMs, GetMaxOccupacy(SMs, num * num)>>>(num, box_temp, range_index_rw, ious_rw); else iou_self_kernel <<<SMs, GetMaxOccupacy(SMs, num * num)>>>(num, box_temp, range_index_rw, ious_rw); range_kernel<<<SMs, GetMaxOccupacy(SMs, num)>>>(range_index_rw, num);// mark temp index nms_func(num, range_index_rw, ious_rw, nms_thresh); //聚合索引大于-1的box,为有效box int *new_end = thrust::remove_if(thrust::device, range_index_rw, range_index_rw + num, is_neg()); int valid_num = new_end - range_index_rw; if(valid_num < 1) continue; valid_num = valid_num > nms_post_maxsize ? nms_post_maxsize : valid_num; if(use_bev != 0) concat_outputs_kernel_bev<<<SMs, GetMaxOccupacy(SMs, valid_num)>>>(box_temp, score, cls_temp, range_index_rw, valid_num, nms_post_maxsize, dst_s_rw, num); else concat_outputs_kernel_nor<<<SMs, GetMaxOccupacy(SMs, valid_num)>>>(box_temp, score, cls_temp, range_index_rw, valid_num, nms_post_maxsize, dst_s_rw, num); } checkCudaErrors(cudaDeviceSynchronize()); } }//namespace
911cd6f9e9e5652a180734ca59cfb10b850cce83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/zhemv_offset.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 3.0.0 * @author Ahmad Abdelfattah * @date 2018-11-14 **/ #include "syhemv_offset_core.cuh" #if(TARGET_SM >= 30) #define zhemv_upper_bs (32) #define zhemv_upper_ty (4) #define zhemv_upper_by (2) #define zhemv_lower_bs (16) #define zhemv_lower_ty (2) #define zhemv_lower_by (2) #else #define zhemv_upper_bs (32) #define zhemv_upper_ty (8) #define zhemv_upper_by (2) #define zhemv_lower_bs (16) #define zhemv_lower_ty (4) #define zhemv_lower_by (2) #endif /*************************************************************************************/ int kblas_zhemv_offset_driver( char uplo, int m, hipDoubleComplex alpha, hipDoubleComplex *dA, int lda, hipDoubleComplex *dX, int incx, hipDoubleComplex beta, hipDoubleComplex *dY, int incy, int offset, hipStream_t stream = 0) { // handle the case when incx and/or incy is -ve if(incx < 0) dX -= (m-1) * incx; if(incy < 0) dY -= (m-1) * incy; if(uplo == 'U' || uplo == 'u') { /** configuration params **/ const int zhemv_bs = zhemv_upper_bs; const int thread_x = zhemv_bs; const int thread_y = zhemv_upper_ty; const int elements_per_thread = (zhemv_bs/(2*thread_y)) ; /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % zhemv_bs; int total_blocks_skipped = offset / zhemv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * zhemv_bs * lda; dA += total_blocks_skipped * zhemv_bs; dX += total_blocks_skipped * zhemv_bs * incx; dY += total_blocks_skipped * zhemv_bs * incy; m -= total_blocks_skipped * zhemv_bs; /** end offset necessary calculation **/ int mod = m % zhemv_bs; int nstripes = m / zhemv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, zhemv_upper_by); if(blocks == 0) return 0; if(mod == 0) { hipLaunchKernelGGL(( syhemvu_special_d_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); hipLaunchKernelGGL(( syhemvu_special_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); } else { hipLaunchKernelGGL(( syhemvu_generic_d_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); const int irregular_part = mod % elements_per_thread; /** * The upper case kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ switch(irregular_part) { case 0:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 1:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 2:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 3:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 4:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 5:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 6:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 7:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 8:hipLaunchKernelGGL(( syhemvu_generic_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; // return error otherwise: default: printf("ZHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else if(uplo == 'L' || uplo == 'l') { /** configuration params **/ const int zhemv_bs = zhemv_lower_bs; const int thread_x = zhemv_bs; const int thread_y = zhemv_lower_ty; const int elements_per_thread = (zhemv_bs/(2*thread_y)) ; /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % zhemv_bs; int total_blocks_skipped = offset / zhemv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * zhemv_bs * lda; dA += total_blocks_skipped * zhemv_bs; dX += total_blocks_skipped * zhemv_bs * incx; dY += total_blocks_skipped * zhemv_bs * incy; m -= total_blocks_skipped * zhemv_bs; /** end offset necessary calculation **/ int mod = m % zhemv_bs; int nstripes = m / zhemv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, zhemv_lower_by); if(blocks == 0) return 0; if(mod == 0) { hipLaunchKernelGGL(( syhemvl_special_d_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); hipLaunchKernelGGL(( syhemvl_special_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); } else { hipLaunchKernelGGL(( syhemvl_generic_d_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); hipLaunchKernelGGL(( syhemvl_generic_nd_offset<hipDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); } } else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;} return 0; } /*************************************************************************************/ extern "C" int kblas_zhemv_offset( char uplo, int m, hipDoubleComplex alpha, hipDoubleComplex *dA, int lda, hipDoubleComplex *dX, int incx, hipDoubleComplex beta, hipDoubleComplex *dY, int incy, int offset) { return kblas_zhemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset); } /*************************************************************************************/ extern "C" int kblas_zhemv_offset_async( char uplo, int m, hipDoubleComplex alpha, hipDoubleComplex *dA, int lda, hipDoubleComplex *dX, int incx, hipDoubleComplex beta, hipDoubleComplex *dY, int incy, int offset, hipStream_t stream) { return kblas_zhemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset, stream); } /*************************************************************************************/
911cd6f9e9e5652a180734ca59cfb10b850cce83.cu
/** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/zhemv_offset.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 3.0.0 * @author Ahmad Abdelfattah * @date 2018-11-14 **/ #include "syhemv_offset_core.cuh" #if(TARGET_SM >= 30) #define zhemv_upper_bs (32) #define zhemv_upper_ty (4) #define zhemv_upper_by (2) #define zhemv_lower_bs (16) #define zhemv_lower_ty (2) #define zhemv_lower_by (2) #else #define zhemv_upper_bs (32) #define zhemv_upper_ty (8) #define zhemv_upper_by (2) #define zhemv_lower_bs (16) #define zhemv_lower_ty (4) #define zhemv_lower_by (2) #endif /*************************************************************************************/ int kblas_zhemv_offset_driver( char uplo, int m, cuDoubleComplex alpha, cuDoubleComplex *dA, int lda, cuDoubleComplex *dX, int incx, cuDoubleComplex beta, cuDoubleComplex *dY, int incy, int offset, cudaStream_t stream = 0) { // handle the case when incx and/or incy is -ve if(incx < 0) dX -= (m-1) * incx; if(incy < 0) dY -= (m-1) * incy; if(uplo == 'U' || uplo == 'u') { /** configuration params **/ const int zhemv_bs = zhemv_upper_bs; const int thread_x = zhemv_bs; const int thread_y = zhemv_upper_ty; const int elements_per_thread = (zhemv_bs/(2*thread_y)) ; /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % zhemv_bs; int total_blocks_skipped = offset / zhemv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * zhemv_bs * lda; dA += total_blocks_skipped * zhemv_bs; dX += total_blocks_skipped * zhemv_bs * incx; dY += total_blocks_skipped * zhemv_bs * incy; m -= total_blocks_skipped * zhemv_bs; /** end offset necessary calculation **/ int mod = m % zhemv_bs; int nstripes = m / zhemv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, zhemv_upper_by); if(blocks == 0) return 0; if(mod == 0) { syhemvu_special_d_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); syhemvu_special_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); } else { syhemvu_generic_d_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); const int irregular_part = mod % elements_per_thread; /** * The upper case kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ switch(irregular_part) { case 0: syhemvu_generic_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 1: syhemvu_generic_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 2: syhemvu_generic_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 3: syhemvu_generic_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 4: syhemvu_generic_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 5: syhemvu_generic_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 6: syhemvu_generic_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 7: syhemvu_generic_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; case 8: syhemvu_generic_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); break; // return error otherwise: default: printf("ZHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else if(uplo == 'L' || uplo == 'l') { /** configuration params **/ const int zhemv_bs = zhemv_lower_bs; const int thread_x = zhemv_bs; const int thread_y = zhemv_lower_ty; const int elements_per_thread = (zhemv_bs/(2*thread_y)) ; /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % zhemv_bs; int total_blocks_skipped = offset / zhemv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * zhemv_bs * lda; dA += total_blocks_skipped * zhemv_bs; dX += total_blocks_skipped * zhemv_bs * incx; dY += total_blocks_skipped * zhemv_bs * incy; m -= total_blocks_skipped * zhemv_bs; /** end offset necessary calculation **/ int mod = m % zhemv_bs; int nstripes = m / zhemv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, zhemv_lower_by); if(blocks == 0) return 0; if(mod == 0) { syhemvl_special_d_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); syhemvl_special_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_); } else { syhemvl_generic_d_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); syhemvl_generic_nd_offset<cuDoubleComplex, zhemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, nstripes, offset_); } } else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;} return 0; } /*************************************************************************************/ extern "C" int kblas_zhemv_offset( char uplo, int m, cuDoubleComplex alpha, cuDoubleComplex *dA, int lda, cuDoubleComplex *dX, int incx, cuDoubleComplex beta, cuDoubleComplex *dY, int incy, int offset) { return kblas_zhemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset); } /*************************************************************************************/ extern "C" int kblas_zhemv_offset_async( char uplo, int m, cuDoubleComplex alpha, cuDoubleComplex *dA, int lda, cuDoubleComplex *dX, int incx, cuDoubleComplex beta, cuDoubleComplex *dY, int incy, int offset, cudaStream_t stream) { return kblas_zhemv_offset_driver(uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, offset, stream); } /*************************************************************************************/
8fa663acb3a3b57857fadd71583c2d87ada4a7c6.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/layers/silence_layer.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { template<typename Dtype> void SilenceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // Do nothing. } template<typename Dtype> void SilenceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { for (int_tp i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM caffe_gpu_set(bottom[i]->count(), Dtype(0), bottom[i]->mutable_gpu_diff()); #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->template program<Dtype>(); viennacl::ocl::kernel &oclk_gpu_set = program.get_kernel( CL_KERNEL_SELECT("gpu_set")); viennacl::ocl::enqueue( oclk_gpu_set( bottom[i]->count(), fixup_arg_type(Dtype(0)), WrapHandle((cl_mem) bottom[i]->mutable_gpu_diff(), &ctx)), ctx.get_queue()); ctx.get_queue().finish(); #endif } } } } INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer); } // namespace caffe
8fa663acb3a3b57857fadd71583c2d87ada4a7c6.cu
#include <vector> #include "caffe/layers/silence_layer.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { template<typename Dtype> void SilenceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // Do nothing. } template<typename Dtype> void SilenceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { for (int_tp i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_set(bottom[i]->count(), Dtype(0), bottom[i]->mutable_gpu_diff()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->template program<Dtype>(); viennacl::ocl::kernel &oclk_gpu_set = program.get_kernel( CL_KERNEL_SELECT("gpu_set")); viennacl::ocl::enqueue( oclk_gpu_set( bottom[i]->count(), fixup_arg_type(Dtype(0)), WrapHandle((cl_mem) bottom[i]->mutable_gpu_diff(), &ctx)), ctx.get_queue()); ctx.get_queue().finish(); #endif } } } } INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer); } // namespace caffe
a7e0e351e20a075d05ad388cfa4b33e86329c322.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define WARP_SIZE 32 #define HALF_WARP_SIZE (WARP_SIZE >> 1) __global__ void refCounter_kernel(unsigned int * d_counters0, unsigned int * d_counters1, unsigned int * d_del0, unsigned int * d_del1, const unsigned int numRepeats, const unsigned int numSharersPerGroup, const unsigned int numCounters, const unsigned int numSharingGroups, const unsigned int numCounters_perSharingGroup) { // local variables const unsigned int myBaseLoc = ((blockIdx.x * blockDim.x) + threadIdx.x); const unsigned int mySharingGroup = (blockIdx.x % numSharingGroups); const unsigned int myCounterLoc = ((mySharingGroup * numCounters_perSharingGroup) + threadIdx.x); unsigned int * counterAddr0, * counterAddr1; __shared__ volatile int dummyLocal[256]; // for doing local dummy calculations, assumes blockDim.x <= 256 dummyLocal[threadIdx.x] = 0; __syncthreads(); // the counters each thread accesses is fixed, regardless of the number of loop iterations counterAddr0 = &(d_counters0[myCounterLoc]); counterAddr1 = &(d_counters1[myCounterLoc]); // repeat this process a few times for (int i = 0; i < numRepeats; ++i) { // these atomics can be reordered with each other atomicAdd(counterAddr0, 1); atomicAdd(counterAddr1, 1); // Do accesses in scratchpad here to space inc and dec out for (int j = 0; j < numRepeats * 2; ++j) { dummyLocal[threadIdx.x] += j; __syncthreads(); } // If the shared counter == 0 (old value == 1), then mark the "object" to // be deleted // use atomicDec's with threadfences to ensure that we have acquire-release // semantics for DRF1 and DRF0 unsigned int currCount0 = atomicDec(counterAddr0, 1000000000); __threadfence(); unsigned int currCount1 = atomicDec(counterAddr1, 1000000000); __threadfence(); if (currCount0 <= 1) { d_del0[myBaseLoc] = true; } if (currCount1 <= 1) { d_del1[myBaseLoc] = true; } } }
a7e0e351e20a075d05ad388cfa4b33e86329c322.cu
#define WARP_SIZE 32 #define HALF_WARP_SIZE (WARP_SIZE >> 1) __global__ void refCounter_kernel(unsigned int * d_counters0, unsigned int * d_counters1, unsigned int * d_del0, unsigned int * d_del1, const unsigned int numRepeats, const unsigned int numSharersPerGroup, const unsigned int numCounters, const unsigned int numSharingGroups, const unsigned int numCounters_perSharingGroup) { // local variables const unsigned int myBaseLoc = ((blockIdx.x * blockDim.x) + threadIdx.x); const unsigned int mySharingGroup = (blockIdx.x % numSharingGroups); const unsigned int myCounterLoc = ((mySharingGroup * numCounters_perSharingGroup) + threadIdx.x); unsigned int * counterAddr0, * counterAddr1; __shared__ volatile int dummyLocal[256]; // for doing local dummy calculations, assumes blockDim.x <= 256 dummyLocal[threadIdx.x] = 0; __syncthreads(); // the counters each thread accesses is fixed, regardless of the number of loop iterations counterAddr0 = &(d_counters0[myCounterLoc]); counterAddr1 = &(d_counters1[myCounterLoc]); // repeat this process a few times for (int i = 0; i < numRepeats; ++i) { // these atomics can be reordered with each other atomicAdd(counterAddr0, 1); atomicAdd(counterAddr1, 1); // Do accesses in scratchpad here to space inc and dec out for (int j = 0; j < numRepeats * 2; ++j) { dummyLocal[threadIdx.x] += j; __syncthreads(); } // If the shared counter == 0 (old value == 1), then mark the "object" to // be deleted // use atomicDec's with threadfences to ensure that we have acquire-release // semantics for DRF1 and DRF0 unsigned int currCount0 = atomicDec(counterAddr0, 1000000000); __threadfence(); unsigned int currCount1 = atomicDec(counterAddr1, 1000000000); __threadfence(); if (currCount0 <= 1) { d_del0[myBaseLoc] = true; } if (currCount1 <= 1) { d_del1[myBaseLoc] = true; } } }
5fd80069b7fabc9b620d39b6da2ad141209021d8.hip
// !!! This is a file automatically generated by hipify!!! /* * */ #include <stdio.h> #include <time.h> #include <hip/hip_runtime.h> #include <cassert> #include <cstdlib> #include <functional> #include <iostream> #include <algorithm> #include <vector> using std::cout; using std::generate; using std::vector; #define CUDA_CALL(x) do { if((x)!=hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define CHECK(x) do { if((x)!=hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define LEARNING_RATE 0.25 #define NUMB_OF_EPOCHS 100000 #define TD_X 4 // training data in x- dimension #define TD_Y 2 // training data in y- dimension #define TD_Z 2 // training data in z- dimension double TRAINING_DATA[TD_X][TD_Y][TD_Z] = {{{0,0},{0}}, {{0,1},{1}}, {{1,0},{1}}, {{1,1},{1}}}; void trainOnCPU(struct neuron *neurons); void printNetworkInfo(); #include "Neuron.cu" int main(void){ // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); printNetworkInfo(); // declare and initialize neurons struct neuron neurons[5]; setNeurons(neurons); // train network from CPU. float CPUtime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); trainOnCPU(neurons); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&CPUtime, start, stop); printf("Compute time on CPU: %3.6f ms\n", CPUtime); return(1); }
5fd80069b7fabc9b620d39b6da2ad141209021d8.cu
/* * */ #include <stdio.h> #include <time.h> #include <cuda_runtime.h> #include <cassert> #include <cstdlib> #include <functional> #include <iostream> #include <algorithm> #include <vector> using std::cout; using std::generate; using std::vector; #define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define CHECK(x) do { if((x)!=cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define LEARNING_RATE 0.25 #define NUMB_OF_EPOCHS 100000 #define TD_X 4 // training data in x- dimension #define TD_Y 2 // training data in y- dimension #define TD_Z 2 // training data in z- dimension double TRAINING_DATA[TD_X][TD_Y][TD_Z] = {{{0,0},{0}}, {{0,1},{1}}, {{1,0},{1}}, {{1,1},{1}}}; void trainOnCPU(struct neuron *neurons); void printNetworkInfo(); #include "Neuron.cu" int main(void){ // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); printNetworkInfo(); // declare and initialize neurons struct neuron neurons[5]; setNeurons(neurons); // train network from CPU. float CPUtime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); trainOnCPU(neurons); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&CPUtime, start, stop); printf("Compute time on CPU: %3.6f ms\n", CPUtime); return(1); }
921b286ed25fe2f4581cbbb29e0c2bcb6a21bc0f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "device_BFS.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int *edges = NULL; hipMalloc(&edges, XSIZE*YSIZE); const int *dests = NULL; hipMalloc(&dests, XSIZE*YSIZE); int *labels = NULL; hipMalloc(&labels, XSIZE*YSIZE); int *visited = NULL; hipMalloc(&visited, XSIZE*YSIZE); int *c_frontier_tail = NULL; hipMalloc(&c_frontier_tail, XSIZE*YSIZE); int *c_frontier = NULL; hipMalloc(&c_frontier, XSIZE*YSIZE); int *p_frontier_tail = NULL; hipMalloc(&p_frontier_tail, XSIZE*YSIZE); int *p_frontier = NULL; hipMalloc(&p_frontier, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( device_BFS), dim3(gridBlock),dim3(threadBlock), 0, 0, edges,dests,labels,visited,c_frontier_tail,c_frontier,p_frontier_tail,p_frontier); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( device_BFS), dim3(gridBlock),dim3(threadBlock), 0, 0, edges,dests,labels,visited,c_frontier_tail,c_frontier,p_frontier_tail,p_frontier); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( device_BFS), dim3(gridBlock),dim3(threadBlock), 0, 0, edges,dests,labels,visited,c_frontier_tail,c_frontier,p_frontier_tail,p_frontier); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
921b286ed25fe2f4581cbbb29e0c2bcb6a21bc0f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "device_BFS.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int *edges = NULL; cudaMalloc(&edges, XSIZE*YSIZE); const int *dests = NULL; cudaMalloc(&dests, XSIZE*YSIZE); int *labels = NULL; cudaMalloc(&labels, XSIZE*YSIZE); int *visited = NULL; cudaMalloc(&visited, XSIZE*YSIZE); int *c_frontier_tail = NULL; cudaMalloc(&c_frontier_tail, XSIZE*YSIZE); int *c_frontier = NULL; cudaMalloc(&c_frontier, XSIZE*YSIZE); int *p_frontier_tail = NULL; cudaMalloc(&p_frontier_tail, XSIZE*YSIZE); int *p_frontier = NULL; cudaMalloc(&p_frontier, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); device_BFS<<<gridBlock,threadBlock>>>(edges,dests,labels,visited,c_frontier_tail,c_frontier,p_frontier_tail,p_frontier); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { device_BFS<<<gridBlock,threadBlock>>>(edges,dests,labels,visited,c_frontier_tail,c_frontier,p_frontier_tail,p_frontier); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { device_BFS<<<gridBlock,threadBlock>>>(edges,dests,labels,visited,c_frontier_tail,c_frontier,p_frontier_tail,p_frontier); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
60cedf1a8f92ed66e069119b86c7e86e04456612.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_stencil.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *new_data = NULL; hipMalloc(&new_data, XSIZE*YSIZE); float *data = NULL; hipMalloc(&data, XSIZE*YSIZE); float *param_a = NULL; hipMalloc(&param_a, XSIZE*YSIZE); float *param_b = NULL; hipMalloc(&param_b, XSIZE*YSIZE); float *param_c = NULL; hipMalloc(&param_c, XSIZE*YSIZE); float *param_wrk = NULL; hipMalloc(&param_wrk, XSIZE*YSIZE); float *param_bnd = NULL; hipMalloc(&param_bnd, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_stencil), dim3(gridBlock),dim3(threadBlock), 0, 0, new_data,data,param_a,param_b,param_c,param_wrk,param_bnd); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_stencil), dim3(gridBlock),dim3(threadBlock), 0, 0, new_data,data,param_a,param_b,param_c,param_wrk,param_bnd); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_stencil), dim3(gridBlock),dim3(threadBlock), 0, 0, new_data,data,param_a,param_b,param_c,param_wrk,param_bnd); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
60cedf1a8f92ed66e069119b86c7e86e04456612.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_stencil.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *new_data = NULL; cudaMalloc(&new_data, XSIZE*YSIZE); float *data = NULL; cudaMalloc(&data, XSIZE*YSIZE); float *param_a = NULL; cudaMalloc(&param_a, XSIZE*YSIZE); float *param_b = NULL; cudaMalloc(&param_b, XSIZE*YSIZE); float *param_c = NULL; cudaMalloc(&param_c, XSIZE*YSIZE); float *param_wrk = NULL; cudaMalloc(&param_wrk, XSIZE*YSIZE); float *param_bnd = NULL; cudaMalloc(&param_bnd, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_stencil<<<gridBlock,threadBlock>>>(new_data,data,param_a,param_b,param_c,param_wrk,param_bnd); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_stencil<<<gridBlock,threadBlock>>>(new_data,data,param_a,param_b,param_c,param_wrk,param_bnd); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_stencil<<<gridBlock,threadBlock>>>(new_data,data,param_a,param_b,param_c,param_wrk,param_bnd); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f997b7a189b31ef91aeb8a5db14780f9dff5134a.hip
// !!! This is a file automatically generated by hipify!!! /* * File: global.h * Author: Da Li * Email: da.li@mail.missouri.edu * Organization: Networking and Parallel Systems Lab (http://nps.missouri.edu/) * * Description: This file is adapted from GPU implementation of Rodinia benchmark. * */ #define LIMIT -999 #define BLOCK_SIZE 16 #define NUM_SEQ 64 //#define TRACE #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <needle.h> #include <hip/hip_runtime.h> #include <sys/time.h> // includes, kernels #include <needle_kernel_rodinia.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int blosum62_rodinia[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<pair number> - number of pairs\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); exit(1); } void runTest( int argc, char** argv) { double time; double start_time; int max_rows, max_cols, penalty, pair_num; int *input_itemsets[NUM_SEQ], *output_itemsets[NUM_SEQ], *referrence[NUM_SEQ]; int *matrix_cuda[NUM_SEQ], *referrence_cuda[NUM_SEQ]; int size; // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 4) { max_rows = atoi(argv[1]); max_cols = atoi(argv[1]); pair_num = atoi(argv[2]); penalty = atoi(argv[3]); } else{ usage(argc, argv); } if(atoi(argv[1])%16!=0){ fprintf(stderr,"The dimension values must be a multiple of 16\n"); exit(1); } time = gettime(); hipSetDevice(0); max_rows = max_rows + 1; max_cols = max_cols + 1; start_time = gettime(); fprintf(stdout,"First API,%lf\n",start_time-time); //time = start_time; for (int i=0; i<pair_num; ++i){ referrence[i] = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets[i] = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets[i] = (int *)malloc( max_rows * max_cols * sizeof(int) ); } if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); srand ( 7 ); time = gettime(); for (int n = 0; n<pair_num; ++n){ for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[n][i*max_cols+j] = 0; } } } //printf("Start Needleman-Wunsch\n"); for (int n = 0; n<pair_num; ++n){ for( int i=1; i< max_rows ; i++){ //please define your own sequence. input_itemsets[n][i*max_cols] = rand() % 20 + 1; } for( int j=1; j< max_cols ; j++){ //please define your own sequence. input_itemsets[n][j] = rand() % 20 + 1; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ referrence[n][i*max_cols+j] = blosum62[input_itemsets[n][i*max_cols]][input_itemsets[n][j]]; } } for( int i = 1; i< max_rows ; i++) input_itemsets[n][i*max_cols] = -i * penalty; for( int j = 1; j< max_cols ; j++) input_itemsets[n][j] = -j * penalty; } size = max_cols * max_rows; dim3 dimGrid; dim3 dimBlock(BLOCK_SIZE, 1); int block_width = ( max_cols - 1 )/BLOCK_SIZE; //printf("The size of each matrix: %d\n", sizeof(int)*size); //start_time = gettime(); //fprintf(stdout,"CPU,%lf\n",start_time-time); for (int n=0; n<pair_num; ++n){ hipMalloc((void**)& referrence_cuda[n], sizeof(int)*size); hipMalloc((void**)& matrix_cuda[n], sizeof(int)*size); } //fprintf(stdout,"Size of int: %d\n", sizeof(int)); //fprintf(stdout,"Total memory usage: %dMB\n", sizeof(int)*size*NUM_SEQ*2/1024/1024); start_time = gettime(); //fprintf(stdout,"hipMalloc,%lf\n",start_time-time); time = start_time; for (int n=0; n<pair_num; ++n){ hipMemcpy(referrence_cuda[n], referrence[n], sizeof(int) * size, hipMemcpyHostToDevice); hipMemcpy(matrix_cuda[n], input_itemsets[n], sizeof(int) * size, hipMemcpyHostToDevice); } //hipDeviceSynchronize(); //start_time = gettime(); //fprintf(stdout,"Memcpy to device,%lf\n",start_time-time); //time = start_time; // Sequencially compare multiple sequences for (int n=0; n<pair_num; ++n){ //printf("Processing top-left matrix\n"); //process top-left matrix for( int i = 1 ; i <= block_width ; i++){ dimGrid.x = i; dimGrid.y = 1; hipLaunchKernelGGL(( needle_cuda_shared_1), dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda[n], matrix_cuda[n] ,max_cols, penalty, i, block_width); } //printf("Processing bottom-right matrix\n"); //process bottom-right matrix for( int i = block_width - 1 ; i >= 1 ; i--){ dimGrid.x = i; dimGrid.y = 1; hipLaunchKernelGGL(( needle_cuda_shared_2), dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda[n], matrix_cuda[n] ,max_cols, penalty, i, block_width); } } //hipDeviceSynchronize(); //start_time = gettime(); //fprintf(stdout,"kernel,%lf\n",start_time-time); //time = start_time; for (int n=0; n<pair_num; ++n){ hipMemcpy(output_itemsets[n], matrix_cuda[n], sizeof(int) * size, hipMemcpyDeviceToHost); } start_time = gettime(); //fprintf(stdout,"Memcpy to host,%lf\n",start_time-time); fprintf(stdout,"CUDA time,%lf\n",start_time-time); #ifdef TRACEBACK FILE *fpo = fopen("result.txt","w"); fprintf(fpo, "print traceback value GPU:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) //fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = output_itemsets[(i - 1) * max_cols + j - 1]; w = output_itemsets[ i * max_cols + j - 1 ]; n = output_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = output_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = output_itemsets[(i - 1) * max_cols + j]; } else{ } //traceback = maximum(nw, w, n); int new_nw, new_w, new_n; new_nw = nw + referrence[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if(traceback == new_nw) traceback = nw; if(traceback == new_w) traceback = w; if(traceback == new_n) traceback = n; //fprintf(fpo, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;}; } // fclose(fpo); #endif hipFree(referrence_cuda); hipFree(matrix_cuda); for (int n=0; n<pair_num; ++n){ free(referrence[n]); free(input_itemsets[n]); free(output_itemsets[n]); } }
f997b7a189b31ef91aeb8a5db14780f9dff5134a.cu
/* * File: global.h * Author: Da Li * Email: da.li@mail.missouri.edu * Organization: Networking and Parallel Systems Lab (http://nps.missouri.edu/) * * Description: This file is adapted from GPU implementation of Rodinia benchmark. * */ #define LIMIT -999 #define BLOCK_SIZE 16 #define NUM_SEQ 64 //#define TRACE #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <needle.h> #include <cuda.h> #include <sys/time.h> // includes, kernels #include <needle_kernel_rodinia.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int blosum62_rodinia[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<pair number> - number of pairs\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); exit(1); } void runTest( int argc, char** argv) { double time; double start_time; int max_rows, max_cols, penalty, pair_num; int *input_itemsets[NUM_SEQ], *output_itemsets[NUM_SEQ], *referrence[NUM_SEQ]; int *matrix_cuda[NUM_SEQ], *referrence_cuda[NUM_SEQ]; int size; // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 4) { max_rows = atoi(argv[1]); max_cols = atoi(argv[1]); pair_num = atoi(argv[2]); penalty = atoi(argv[3]); } else{ usage(argc, argv); } if(atoi(argv[1])%16!=0){ fprintf(stderr,"The dimension values must be a multiple of 16\n"); exit(1); } time = gettime(); cudaSetDevice(0); max_rows = max_rows + 1; max_cols = max_cols + 1; start_time = gettime(); fprintf(stdout,"First API,%lf\n",start_time-time); //time = start_time; for (int i=0; i<pair_num; ++i){ referrence[i] = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets[i] = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets[i] = (int *)malloc( max_rows * max_cols * sizeof(int) ); } if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); srand ( 7 ); time = gettime(); for (int n = 0; n<pair_num; ++n){ for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[n][i*max_cols+j] = 0; } } } //printf("Start Needleman-Wunsch\n"); for (int n = 0; n<pair_num; ++n){ for( int i=1; i< max_rows ; i++){ //please define your own sequence. input_itemsets[n][i*max_cols] = rand() % 20 + 1; } for( int j=1; j< max_cols ; j++){ //please define your own sequence. input_itemsets[n][j] = rand() % 20 + 1; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ referrence[n][i*max_cols+j] = blosum62[input_itemsets[n][i*max_cols]][input_itemsets[n][j]]; } } for( int i = 1; i< max_rows ; i++) input_itemsets[n][i*max_cols] = -i * penalty; for( int j = 1; j< max_cols ; j++) input_itemsets[n][j] = -j * penalty; } size = max_cols * max_rows; dim3 dimGrid; dim3 dimBlock(BLOCK_SIZE, 1); int block_width = ( max_cols - 1 )/BLOCK_SIZE; //printf("The size of each matrix: %d\n", sizeof(int)*size); //start_time = gettime(); //fprintf(stdout,"CPU,%lf\n",start_time-time); for (int n=0; n<pair_num; ++n){ cudaMalloc((void**)& referrence_cuda[n], sizeof(int)*size); cudaMalloc((void**)& matrix_cuda[n], sizeof(int)*size); } //fprintf(stdout,"Size of int: %d\n", sizeof(int)); //fprintf(stdout,"Total memory usage: %dMB\n", sizeof(int)*size*NUM_SEQ*2/1024/1024); start_time = gettime(); //fprintf(stdout,"cudaMalloc,%lf\n",start_time-time); time = start_time; for (int n=0; n<pair_num; ++n){ cudaMemcpy(referrence_cuda[n], referrence[n], sizeof(int) * size, cudaMemcpyHostToDevice); cudaMemcpy(matrix_cuda[n], input_itemsets[n], sizeof(int) * size, cudaMemcpyHostToDevice); } //cudaDeviceSynchronize(); //start_time = gettime(); //fprintf(stdout,"Memcpy to device,%lf\n",start_time-time); //time = start_time; // Sequencially compare multiple sequences for (int n=0; n<pair_num; ++n){ //printf("Processing top-left matrix\n"); //process top-left matrix for( int i = 1 ; i <= block_width ; i++){ dimGrid.x = i; dimGrid.y = 1; needle_cuda_shared_1<<<dimGrid, dimBlock>>>(referrence_cuda[n], matrix_cuda[n] ,max_cols, penalty, i, block_width); } //printf("Processing bottom-right matrix\n"); //process bottom-right matrix for( int i = block_width - 1 ; i >= 1 ; i--){ dimGrid.x = i; dimGrid.y = 1; needle_cuda_shared_2<<<dimGrid, dimBlock>>>(referrence_cuda[n], matrix_cuda[n] ,max_cols, penalty, i, block_width); } } //cudaDeviceSynchronize(); //start_time = gettime(); //fprintf(stdout,"kernel,%lf\n",start_time-time); //time = start_time; for (int n=0; n<pair_num; ++n){ cudaMemcpy(output_itemsets[n], matrix_cuda[n], sizeof(int) * size, cudaMemcpyDeviceToHost); } start_time = gettime(); //fprintf(stdout,"Memcpy to host,%lf\n",start_time-time); fprintf(stdout,"CUDA time,%lf\n",start_time-time); #ifdef TRACEBACK FILE *fpo = fopen("result.txt","w"); fprintf(fpo, "print traceback value GPU:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) //fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = output_itemsets[(i - 1) * max_cols + j - 1]; w = output_itemsets[ i * max_cols + j - 1 ]; n = output_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = output_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = output_itemsets[(i - 1) * max_cols + j]; } else{ } //traceback = maximum(nw, w, n); int new_nw, new_w, new_n; new_nw = nw + referrence[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if(traceback == new_nw) traceback = nw; if(traceback == new_w) traceback = w; if(traceback == new_n) traceback = n; //fprintf(fpo, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;}; } // fclose(fpo); #endif cudaFree(referrence_cuda); cudaFree(matrix_cuda); for (int n=0; n<pair_num; ++n){ free(referrence[n]); free(input_itemsets[n]); free(output_itemsets[n]); } }
93003c32593738a49965d7d2ad141297f958d89d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Plate.h" #include "../Utils/cuda_math.h" #include "../Utils/cuda_double_math.h" #include "../Materials.h" /* Plate */ __device__ Plate::Plate() {} __device__ void Plate::init(float3 p, float3 u, float3 r, float2 l_size, Material* material_ptr) { center = p; up = normalize(u); right = normalize(r); size = l_size; SetMaterial(material_ptr); } __device__ bool Plate::Intersection(const Ray& ray, HitRec& hr) { __shared__ double3 p; if (threadIdx.x == 0) p = make_double3(center); __shared__ double3 n; if (threadIdx.x == 0) n = make_double3(cross(right, up)); __syncthreads(); double3 ro = make_double3(ray.origin); double3 dir = make_double3(ray.direction); double t = dot(p - ro, n) / dot(dir, n); if (t > eps) { double3 vec = ro + t * dir - (make_double3(center) - size.y * 0.5 * make_double3(up) - size.x * 0.5 * make_double3(right)); double pr_up = dot(make_double3(up), vec); double pr_right = dot(make_double3(right), vec); if(pr_up >= 0.0f && pr_right >= 0.0f && pr_up <= size.y && pr_right <= size.x) { if(t < hr.tmin || !hr.isHit) { hr.isHit = true; hr.tmin = t; hr.hit_point = ray.origin + t * ray.direction; hr.hit_normal = normalize(cross(right, up)); hr.ray = ray; hr.material_ptr = material_ptr; return true; } } } return false; } __device__ Plate::~Plate() {}
93003c32593738a49965d7d2ad141297f958d89d.cu
#include "Plate.h" #include "../Utils/cuda_math.h" #include "../Utils/cuda_double_math.h" #include "../Materials.h" /* Plate */ __device__ Plate::Plate() {} __device__ void Plate::init(float3 p, float3 u, float3 r, float2 l_size, Material* material_ptr) { center = p; up = normalize(u); right = normalize(r); size = l_size; SetMaterial(material_ptr); } __device__ bool Plate::Intersection(const Ray& ray, HitRec& hr) { __shared__ double3 p; if (threadIdx.x == 0) p = make_double3(center); __shared__ double3 n; if (threadIdx.x == 0) n = make_double3(cross(right, up)); __syncthreads(); double3 ro = make_double3(ray.origin); double3 dir = make_double3(ray.direction); double t = dot(p - ro, n) / dot(dir, n); if (t > eps) { double3 vec = ro + t * dir - (make_double3(center) - size.y * 0.5 * make_double3(up) - size.x * 0.5 * make_double3(right)); double pr_up = dot(make_double3(up), vec); double pr_right = dot(make_double3(right), vec); if(pr_up >= 0.0f && pr_right >= 0.0f && pr_up <= size.y && pr_right <= size.x) { if(t < hr.tmin || !hr.isHit) { hr.isHit = true; hr.tmin = t; hr.hit_point = ray.origin + t * ray.direction; hr.hit_normal = normalize(cross(right, up)); hr.ray = ray; hr.material_ptr = material_ptr; return true; } } } return false; } __device__ Plate::~Plate() {}
21e26a716a46f59313092d688f9de0668daaa363.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hipcub/hipcub.hpp> #include "oneflow/core/cuda/atomic.cuh" #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/new_kernel_util.h" #include "oneflow/user/kernels/loss_kernel_util.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { namespace user_op { namespace { using namespace loss; #define RETURN_VOID_IF_NOT_HALF typename std::enable_if_t<!std::is_same<T, half>::value, void> #define RETURN_VOID_IF_HALF typename std::enable_if_t<std::is_same<T, half>::value, void> template<typename T, typename K> __global__ RETURN_VOID_IF_NOT_HALF ComputeNllOutNone(const int64_t num_instances, const K num_classes, const K ignore_index, const T* input, const K* target, T* out, const T* weight, T* total_weight) { const T zero_val = GetZeroVal<T>(); const T one_val = GetOneVal<T>(); CUDA_1D_KERNEL_LOOP(i, num_instances) { K label = target[i]; if (label == ignore_index) { out[i] = zero_val; continue; } assert(label >= 0); assert(label < num_classes); const T cur_weight = weight == nullptr ? one_val : weight[label]; cuda::atomic::Add(total_weight, cur_weight); out[i] = -input[i * num_classes + label] * cur_weight; } } template<typename T, typename K> __global__ RETURN_VOID_IF_HALF ComputeNllOutNone(const int64_t num_instances, const K num_classes, const K ignore_index, const T* input, const K* target, T* out, const T* weight, T* total_weight) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) const T zero_val = __float2half(0.0); const T one_val = __float2half(1.0); CUDA_1D_KERNEL_LOOP(i, num_instances) { K label = target[i]; if (label == ignore_index) { out[i] = zero_val; continue; } assert(label >= 0); assert(label < num_classes); const half cur_weight = weight == nullptr ? one_val : weight[label]; cuda::atomic::Add(total_weight, cur_weight); out[i] = __float2half(-__half2float(input[i * num_classes + label] * cur_weight)); } #else printf("use half need nvcc arch >= 530"); assert(false); #endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/ } template<typename T, typename K> __global__ RETURN_VOID_IF_NOT_HALF ComputeNllOutReduce(const int64_t num_instances, const K num_classes, const K ignore_index, const T* input, const K* target, T* out, const T* weight, T* total_weight, bool is_reduce_mean) { const T zero_val = GetZeroVal<T>(); const T one_val = GetOneVal<T>(); typedef hipcub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce; __shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage; T weight_thread_sum = zero_val; T out_thread_sum = zero_val; for (int i = threadIdx.x; i < num_instances; i += kCudaThreadsNumPerBlock) { K label = target[i]; if (label == ignore_index) { continue; } assert(label >= 0); assert(label < num_classes); const T cur_weight = weight == nullptr ? one_val : weight[label]; weight_thread_sum += cur_weight; out_thread_sum -= input[i * num_classes + label] * cur_weight; } __syncthreads(); T weight_block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(weight_thread_sum, hipcub::Sum()); T out_block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(out_thread_sum, hipcub::Sum()); if (threadIdx.x == 0) { *out = out_block_sum; *total_weight = weight_block_sum; if (is_reduce_mean) { *out /= *total_weight; } } } template<typename T, typename K> __global__ RETURN_VOID_IF_HALF ComputeNllOutReduce(const int64_t num_instances, const K num_classes, const K ignore_index, const T* input, const K* target, T* out, const T* weight, T* total_weight, bool is_reduce_mean) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) const T zero_val = __float2half(0.0); const T one_val = __float2half(1.0); typedef hipcub::BlockReduce<half, kCudaThreadsNumPerBlock> BlockReduce; __shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage; half weight_thread_sum = zero_val; half out_thread_sum = zero_val; for (int i = threadIdx.x; i < num_instances; i += kCudaThreadsNumPerBlock) { K label = target[i]; if (label == ignore_index) { continue; } assert(label >= 0); assert(label < num_classes); const half cur_weight = weight == nullptr ? one_val : weight[label]; weight_thread_sum = __hadd(weight_thread_sum, cur_weight); out_thread_sum = __hsub(out_thread_sum, __hmul(input[i * num_classes + label], cur_weight)); } __syncthreads(); half weight_block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(weight_thread_sum, hipcub::Sum()); half out_block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(out_thread_sum, hipcub::Sum()); if (threadIdx.x == 0) { *out = out_block_sum; *total_weight = weight_block_sum; if (is_reduce_mean) { *out = __hdiv(*out, *total_weight); } } #else printf("use half need nvcc arch >= 530"); assert(false); #endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/ } template<typename T, typename K> __global__ RETURN_VOID_IF_NOT_HALF ComputeNllGradOut(const int64_t num_instances, const K num_classes, const K ignore_index, const K* target, const T* dy, T* dx, const T* weight, const T* total_weight, const ReductionType reduction_type) { CUDA_1D_KERNEL_LOOP(i, num_instances) { K label = target[i]; if (label == ignore_index) { continue; } assert(label >= 0); assert(label < num_classes); const T cur_weight = weight == nullptr ? -GetOneVal<T>() : -weight[label]; dx[i * num_classes + label] = (reduction_type == ReductionType::kNone ? dy[i] : (*dy)) * cur_weight; if (reduction_type == ReductionType::kMean) { dx[i * num_classes + label] /= *total_weight; } } } template<typename T, typename K> __global__ RETURN_VOID_IF_HALF ComputeNllGradOut(const int64_t num_instances, const K num_classes, const K ignore_index, const K* target, const T* dy, T* dx, const T* weight, const T* total_weight, const ReductionType reduction_type) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) CUDA_1D_KERNEL_LOOP(i, num_instances) { K label = target[i]; if (label == ignore_index) { continue; } assert(label >= 0); assert(label < num_classes); const half cur_weight = weight == nullptr ? __float2half(-1.0) : __hneg(weight[label]); dx[i * num_classes + label] = __hmul(reduction_type == ReductionType::kNone ? dy[i] : (*dy), cur_weight); if (reduction_type == ReductionType::kMean) { dx[i * num_classes + label] = __hdiv(dx[i * num_classes + label], *total_weight); } } #else printf("use half need nvcc arch >= 530"); assert(false); #endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/ } template<typename T, typename K> class NllKernel final : public user_op::OpKernel { public: NllKernel() = default; ~NllKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const auto* input_blob = ctx->Tensor4ArgNameAndIndex("input", 0); const auto* target_blob = ctx->Tensor4ArgNameAndIndex("target", 0); auto* out_blob = ctx->Tensor4ArgNameAndIndex("out", 0); auto* total_weight_blob = ctx->Tensor4ArgNameAndIndex("total_weight", 0); const int64_t num_instances = target_blob->shape().elem_cnt(); CHECK_EQ(input_blob->shape().elem_cnt() % num_instances, 0); const K num_classes = static_cast<K>(input_blob->shape().elem_cnt() / num_instances); const K ignore_index = static_cast<K>(ctx->Attr<int64_t>("ignore_index")); const ReductionType reduction = GetReductionType(ctx->Attr<std::string>("reduction")); const T* input = input_blob->dptr<T>(); const K* target = target_blob->dptr<K>(); T* out = out_blob->mut_dptr<T>(); T* total_weight = total_weight_blob->mut_dptr<T>(); const T* weight = ctx->has_input("weight", 0) ? ctx->Tensor4ArgNameAndIndex("weight", 0)->dptr<T>() : nullptr; Memset<DeviceType::kGPU>(ctx->stream(), total_weight, 0, sizeof(T)); if (reduction == ReductionType::kNone) { hipLaunchKernelGGL(( ComputeNllOutNone), dim3(BlocksNum4ThreadsNum(num_instances)), dim3(kCudaThreadsNumPerBlock), 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream(), num_instances, num_classes, ignore_index, input, target, out, weight, total_weight); } else { hipLaunchKernelGGL(( ComputeNllOutReduce), dim3(1), dim3(kCudaThreadsNumPerBlock), 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream(), num_instances, num_classes, ignore_index, input, target, out, weight, total_weight, reduction == ReductionType::kMean); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<typename T, typename K> class NllGradKernel final : public user_op::OpKernel { public: NllGradKernel() = default; ~NllGradKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const auto* input_blob = ctx->Tensor4ArgNameAndIndex("input", 0); const auto* target_blob = ctx->Tensor4ArgNameAndIndex("target", 0); const auto* dy_blob = ctx->Tensor4ArgNameAndIndex("dy", 0); auto* dx_blob = ctx->Tensor4ArgNameAndIndex("dx", 0); auto* total_weight_blob = ctx->Tensor4ArgNameAndIndex("total_weight", 0); const int64_t num_instances = target_blob->shape().elem_cnt(); const int64_t input_elem_cnt = input_blob->shape().elem_cnt(); CHECK_EQ(input_elem_cnt % num_instances, 0); const K num_classes = static_cast<K>(input_elem_cnt / num_instances); const K ignore_index = static_cast<K>(ctx->Attr<int64_t>("ignore_index")); const ReductionType reduction = GetReductionType(ctx->Attr<std::string>("reduction")); const T* dy = dy_blob->dptr<T>(); const K* target = target_blob->dptr<K>(); const T* total_weight = total_weight_blob->dptr<T>(); T* dx = dx_blob->mut_dptr<T>(); const T* weight = ctx->has_input("weight", 0) ? ctx->Tensor4ArgNameAndIndex("weight", 0)->dptr<T>() : nullptr; Memset<DeviceType::kGPU>(ctx->stream(), dx, 0, input_elem_cnt * sizeof(T)); hipLaunchKernelGGL(( ComputeNllGradOut), dim3(BlocksNum4ThreadsNum(num_instances)), dim3(kCudaThreadsNumPerBlock), 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream(), num_instances, num_classes, ignore_index, target, dy, dx, weight, total_weight, reduction); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; } // namespace #define REGISTER_NLL_KERNEL(dtype_pair, ltype_pair) \ REGISTER_USER_KERNEL("nll") \ .SetCreateFn<NllKernel<OF_PP_PAIR_FIRST(dtype_pair), OF_PP_PAIR_FIRST(ltype_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \ && (user_op::HobDataType("target", 0) == OF_PP_PAIR_SECOND(ltype_pair)) \ && (user_op::HobDataType("out", 0) == OF_PP_PAIR_SECOND(dtype_pair))); #define REGISTER_NLL_GRAD_KERNEL(dtype_pair, ltype_pair) \ REGISTER_USER_KERNEL("nll_grad") \ .SetCreateFn<NllGradKernel<OF_PP_PAIR_FIRST(dtype_pair), OF_PP_PAIR_FIRST(ltype_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \ && (user_op::HobDataType("target", 0) == OF_PP_PAIR_SECOND(ltype_pair)) \ && (user_op::HobDataType("dy", 0) == OF_PP_PAIR_SECOND(dtype_pair)) \ && (user_op::HobDataType("dx", 0) == OF_PP_PAIR_SECOND(dtype_pair))); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_NLL_KERNEL, FLOATING_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_NLL_GRAD_KERNEL, FLOATING_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) } // namespace user_op } // namespace oneflow
21e26a716a46f59313092d688f9de0668daaa363.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cub/cub.cuh> #include "oneflow/core/cuda/atomic.cuh" #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/new_kernel_util.h" #include "oneflow/user/kernels/loss_kernel_util.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { namespace user_op { namespace { using namespace loss; #define RETURN_VOID_IF_NOT_HALF typename std::enable_if_t<!std::is_same<T, half>::value, void> #define RETURN_VOID_IF_HALF typename std::enable_if_t<std::is_same<T, half>::value, void> template<typename T, typename K> __global__ RETURN_VOID_IF_NOT_HALF ComputeNllOutNone(const int64_t num_instances, const K num_classes, const K ignore_index, const T* input, const K* target, T* out, const T* weight, T* total_weight) { const T zero_val = GetZeroVal<T>(); const T one_val = GetOneVal<T>(); CUDA_1D_KERNEL_LOOP(i, num_instances) { K label = target[i]; if (label == ignore_index) { out[i] = zero_val; continue; } assert(label >= 0); assert(label < num_classes); const T cur_weight = weight == nullptr ? one_val : weight[label]; cuda::atomic::Add(total_weight, cur_weight); out[i] = -input[i * num_classes + label] * cur_weight; } } template<typename T, typename K> __global__ RETURN_VOID_IF_HALF ComputeNllOutNone(const int64_t num_instances, const K num_classes, const K ignore_index, const T* input, const K* target, T* out, const T* weight, T* total_weight) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) const T zero_val = __float2half(0.0); const T one_val = __float2half(1.0); CUDA_1D_KERNEL_LOOP(i, num_instances) { K label = target[i]; if (label == ignore_index) { out[i] = zero_val; continue; } assert(label >= 0); assert(label < num_classes); const half cur_weight = weight == nullptr ? one_val : weight[label]; cuda::atomic::Add(total_weight, cur_weight); out[i] = __float2half(-__half2float(input[i * num_classes + label] * cur_weight)); } #else printf("use half need nvcc arch >= 530"); assert(false); #endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/ } template<typename T, typename K> __global__ RETURN_VOID_IF_NOT_HALF ComputeNllOutReduce(const int64_t num_instances, const K num_classes, const K ignore_index, const T* input, const K* target, T* out, const T* weight, T* total_weight, bool is_reduce_mean) { const T zero_val = GetZeroVal<T>(); const T one_val = GetOneVal<T>(); typedef cub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce; __shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage; T weight_thread_sum = zero_val; T out_thread_sum = zero_val; for (int i = threadIdx.x; i < num_instances; i += kCudaThreadsNumPerBlock) { K label = target[i]; if (label == ignore_index) { continue; } assert(label >= 0); assert(label < num_classes); const T cur_weight = weight == nullptr ? one_val : weight[label]; weight_thread_sum += cur_weight; out_thread_sum -= input[i * num_classes + label] * cur_weight; } __syncthreads(); T weight_block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(weight_thread_sum, cub::Sum()); T out_block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(out_thread_sum, cub::Sum()); if (threadIdx.x == 0) { *out = out_block_sum; *total_weight = weight_block_sum; if (is_reduce_mean) { *out /= *total_weight; } } } template<typename T, typename K> __global__ RETURN_VOID_IF_HALF ComputeNllOutReduce(const int64_t num_instances, const K num_classes, const K ignore_index, const T* input, const K* target, T* out, const T* weight, T* total_weight, bool is_reduce_mean) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) const T zero_val = __float2half(0.0); const T one_val = __float2half(1.0); typedef cub::BlockReduce<half, kCudaThreadsNumPerBlock> BlockReduce; __shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage; half weight_thread_sum = zero_val; half out_thread_sum = zero_val; for (int i = threadIdx.x; i < num_instances; i += kCudaThreadsNumPerBlock) { K label = target[i]; if (label == ignore_index) { continue; } assert(label >= 0); assert(label < num_classes); const half cur_weight = weight == nullptr ? one_val : weight[label]; weight_thread_sum = __hadd(weight_thread_sum, cur_weight); out_thread_sum = __hsub(out_thread_sum, __hmul(input[i * num_classes + label], cur_weight)); } __syncthreads(); half weight_block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(weight_thread_sum, cub::Sum()); half out_block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(out_thread_sum, cub::Sum()); if (threadIdx.x == 0) { *out = out_block_sum; *total_weight = weight_block_sum; if (is_reduce_mean) { *out = __hdiv(*out, *total_weight); } } #else printf("use half need nvcc arch >= 530"); assert(false); #endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/ } template<typename T, typename K> __global__ RETURN_VOID_IF_NOT_HALF ComputeNllGradOut(const int64_t num_instances, const K num_classes, const K ignore_index, const K* target, const T* dy, T* dx, const T* weight, const T* total_weight, const ReductionType reduction_type) { CUDA_1D_KERNEL_LOOP(i, num_instances) { K label = target[i]; if (label == ignore_index) { continue; } assert(label >= 0); assert(label < num_classes); const T cur_weight = weight == nullptr ? -GetOneVal<T>() : -weight[label]; dx[i * num_classes + label] = (reduction_type == ReductionType::kNone ? dy[i] : (*dy)) * cur_weight; if (reduction_type == ReductionType::kMean) { dx[i * num_classes + label] /= *total_weight; } } } template<typename T, typename K> __global__ RETURN_VOID_IF_HALF ComputeNllGradOut(const int64_t num_instances, const K num_classes, const K ignore_index, const K* target, const T* dy, T* dx, const T* weight, const T* total_weight, const ReductionType reduction_type) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) CUDA_1D_KERNEL_LOOP(i, num_instances) { K label = target[i]; if (label == ignore_index) { continue; } assert(label >= 0); assert(label < num_classes); const half cur_weight = weight == nullptr ? __float2half(-1.0) : __hneg(weight[label]); dx[i * num_classes + label] = __hmul(reduction_type == ReductionType::kNone ? dy[i] : (*dy), cur_weight); if (reduction_type == ReductionType::kMean) { dx[i * num_classes + label] = __hdiv(dx[i * num_classes + label], *total_weight); } } #else printf("use half need nvcc arch >= 530"); assert(false); #endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/ } template<typename T, typename K> class NllKernel final : public user_op::OpKernel { public: NllKernel() = default; ~NllKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const auto* input_blob = ctx->Tensor4ArgNameAndIndex("input", 0); const auto* target_blob = ctx->Tensor4ArgNameAndIndex("target", 0); auto* out_blob = ctx->Tensor4ArgNameAndIndex("out", 0); auto* total_weight_blob = ctx->Tensor4ArgNameAndIndex("total_weight", 0); const int64_t num_instances = target_blob->shape().elem_cnt(); CHECK_EQ(input_blob->shape().elem_cnt() % num_instances, 0); const K num_classes = static_cast<K>(input_blob->shape().elem_cnt() / num_instances); const K ignore_index = static_cast<K>(ctx->Attr<int64_t>("ignore_index")); const ReductionType reduction = GetReductionType(ctx->Attr<std::string>("reduction")); const T* input = input_blob->dptr<T>(); const K* target = target_blob->dptr<K>(); T* out = out_blob->mut_dptr<T>(); T* total_weight = total_weight_blob->mut_dptr<T>(); const T* weight = ctx->has_input("weight", 0) ? ctx->Tensor4ArgNameAndIndex("weight", 0)->dptr<T>() : nullptr; Memset<DeviceType::kGPU>(ctx->stream(), total_weight, 0, sizeof(T)); if (reduction == ReductionType::kNone) { ComputeNllOutNone<<<BlocksNum4ThreadsNum(num_instances), kCudaThreadsNumPerBlock, 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>( num_instances, num_classes, ignore_index, input, target, out, weight, total_weight); } else { ComputeNllOutReduce<<<1, kCudaThreadsNumPerBlock, 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>( num_instances, num_classes, ignore_index, input, target, out, weight, total_weight, reduction == ReductionType::kMean); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<typename T, typename K> class NllGradKernel final : public user_op::OpKernel { public: NllGradKernel() = default; ~NllGradKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const auto* input_blob = ctx->Tensor4ArgNameAndIndex("input", 0); const auto* target_blob = ctx->Tensor4ArgNameAndIndex("target", 0); const auto* dy_blob = ctx->Tensor4ArgNameAndIndex("dy", 0); auto* dx_blob = ctx->Tensor4ArgNameAndIndex("dx", 0); auto* total_weight_blob = ctx->Tensor4ArgNameAndIndex("total_weight", 0); const int64_t num_instances = target_blob->shape().elem_cnt(); const int64_t input_elem_cnt = input_blob->shape().elem_cnt(); CHECK_EQ(input_elem_cnt % num_instances, 0); const K num_classes = static_cast<K>(input_elem_cnt / num_instances); const K ignore_index = static_cast<K>(ctx->Attr<int64_t>("ignore_index")); const ReductionType reduction = GetReductionType(ctx->Attr<std::string>("reduction")); const T* dy = dy_blob->dptr<T>(); const K* target = target_blob->dptr<K>(); const T* total_weight = total_weight_blob->dptr<T>(); T* dx = dx_blob->mut_dptr<T>(); const T* weight = ctx->has_input("weight", 0) ? ctx->Tensor4ArgNameAndIndex("weight", 0)->dptr<T>() : nullptr; Memset<DeviceType::kGPU>(ctx->stream(), dx, 0, input_elem_cnt * sizeof(T)); ComputeNllGradOut<<<BlocksNum4ThreadsNum(num_instances), kCudaThreadsNumPerBlock, 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>( num_instances, num_classes, ignore_index, target, dy, dx, weight, total_weight, reduction); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; } // namespace #define REGISTER_NLL_KERNEL(dtype_pair, ltype_pair) \ REGISTER_USER_KERNEL("nll") \ .SetCreateFn<NllKernel<OF_PP_PAIR_FIRST(dtype_pair), OF_PP_PAIR_FIRST(ltype_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \ && (user_op::HobDataType("target", 0) == OF_PP_PAIR_SECOND(ltype_pair)) \ && (user_op::HobDataType("out", 0) == OF_PP_PAIR_SECOND(dtype_pair))); #define REGISTER_NLL_GRAD_KERNEL(dtype_pair, ltype_pair) \ REGISTER_USER_KERNEL("nll_grad") \ .SetCreateFn<NllGradKernel<OF_PP_PAIR_FIRST(dtype_pair), OF_PP_PAIR_FIRST(ltype_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \ && (user_op::HobDataType("target", 0) == OF_PP_PAIR_SECOND(ltype_pair)) \ && (user_op::HobDataType("dy", 0) == OF_PP_PAIR_SECOND(dtype_pair)) \ && (user_op::HobDataType("dx", 0) == OF_PP_PAIR_SECOND(dtype_pair))); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_NLL_KERNEL, FLOATING_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_NLL_GRAD_KERNEL, FLOATING_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) } // namespace user_op } // namespace oneflow
c87f9d44da17c0f1195a678adf8a6333774d2118.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // Kernel function to add the elements of two arrays // Compile: // $ cuda_example.cu -o cuda_add // $ ./cuda_add __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
c87f9d44da17c0f1195a678adf8a6333774d2118.cu
#include <iostream> #include <math.h> // Kernel function to add the elements of two arrays // Compile: // $ cuda_example.cu -o cuda_add // $ ./cuda_add __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU add<<<1, 1>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
bbaf3d78e5ac4373677885039f22a82052699f5e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/superpixel_predict_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void forward_gpu_kernel(const int n, const Dtype* const sp_data, const Dtype* const pred_data, Dtype* out_data, const int num, const int channels, const int height, const int width, const int sp_num){ CUDA_KERNEL_LOOP(index, n){ const int n_idx = index / height / channels; const int c = (index / height) % channels; const int h = index % height; // Iter the width const int offset_out = ((n_idx*channels+c)*height+h)*width; const int offset_pred = (n_idx*channels+c)*sp_num; const int offset_sp = (n_idx*height+h)*width; const Dtype* pred_data_n = pred_data + offset_pred; const Dtype* sp_data_n = sp_data + offset_sp; Dtype* out_data_n = out_data + offset_out; for (int i = 0; i < width; i++){ out_data_n[i] = pred_data_n[(int)(sp_data_n[i])]; } } } template <typename Dtype> void SuperpixelPredictLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* sp_data = bottom[1]->gpu_data(); const Dtype* pred_data = bottom[0]->gpu_data(); Dtype* out_data = top[0]->mutable_gpu_data(); const int num = top[0]->num(); const int channels = top[0]->channels(); const int height = top[0]->height(); const int width = top[0]->width(); // Clear the memory to zero caffe_gpu_set(top[0]->count(), Dtype(0), out_data); // Iter all pixels in the minibatch // The num_kernels is n*c*h const int num_kernels = num * channels * height; hipLaunchKernelGGL(( forward_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, sp_data, pred_data, out_data, num, channels, height, width, sp_num_); CUDA_POST_KERNEL_CHECK; } template void SuperpixelPredictLayer<float>::Forward_gpu(const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void SuperpixelPredictLayer<double>::Forward_gpu(const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top); //INSTANTIATE_LAYER_GPU_FUNCS(SuperpixelPredictLayer); } // namespace caffe
bbaf3d78e5ac4373677885039f22a82052699f5e.cu
#include <vector> #include "caffe/layers/superpixel_predict_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void forward_gpu_kernel(const int n, const Dtype* const sp_data, const Dtype* const pred_data, Dtype* out_data, const int num, const int channels, const int height, const int width, const int sp_num){ CUDA_KERNEL_LOOP(index, n){ const int n_idx = index / height / channels; const int c = (index / height) % channels; const int h = index % height; // Iter the width const int offset_out = ((n_idx*channels+c)*height+h)*width; const int offset_pred = (n_idx*channels+c)*sp_num; const int offset_sp = (n_idx*height+h)*width; const Dtype* pred_data_n = pred_data + offset_pred; const Dtype* sp_data_n = sp_data + offset_sp; Dtype* out_data_n = out_data + offset_out; for (int i = 0; i < width; i++){ out_data_n[i] = pred_data_n[(int)(sp_data_n[i])]; } } } template <typename Dtype> void SuperpixelPredictLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* sp_data = bottom[1]->gpu_data(); const Dtype* pred_data = bottom[0]->gpu_data(); Dtype* out_data = top[0]->mutable_gpu_data(); const int num = top[0]->num(); const int channels = top[0]->channels(); const int height = top[0]->height(); const int width = top[0]->width(); // Clear the memory to zero caffe_gpu_set(top[0]->count(), Dtype(0), out_data); // Iter all pixels in the minibatch // The num_kernels is n*c*h const int num_kernels = num * channels * height; forward_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, sp_data, pred_data, out_data, num, channels, height, width, sp_num_); CUDA_POST_KERNEL_CHECK; } template void SuperpixelPredictLayer<float>::Forward_gpu(const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void SuperpixelPredictLayer<double>::Forward_gpu(const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top); //INSTANTIATE_LAYER_GPU_FUNCS(SuperpixelPredictLayer); } // namespace caffe
04a214e0a37ec9a862e0a2931dcdd20954b56fe0.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer &timer() { static PerformanceTimer timer; return timer; } template <typename T> __device__ void inline swap(T &a, T &b) { T c(a); a = b; b = c; } /** * Naive parallel scan algorithm * Input must be stored in `data`. * Output is stored both in `data` and `buffer`. */ __global__ void kernScanInclusive(int n, int *data, int *buffer) { int id = blockDim.x * blockIdx.x + threadIdx.x; int tx = threadIdx.x; int bdim = blockDim.x; int log2n = ilog2ceil((n < bdim) ? n : bdim); if (id < n) { for (int d = 1; d <= log2n; ++d) { buffer[id] = data[id]; __syncthreads(); if (tx >= (1 << (d - 1))) { buffer[id] = data[id - (1 << (d - 1))] + data[id]; } __syncthreads(); data[id] = buffer[id]; __syncthreads(); } } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { if (n <= 0) return; const unsigned int block_size = Common::block_size_naive; int num_scans = 1; int len = n; while ((len + block_size - 1) / block_size > 1) { ++num_scans; len = (len + block_size - 1) / block_size; } int **dev_idata = (int **)malloc(num_scans * sizeof(int *)); int **dev_odata = (int **)malloc(num_scans * sizeof(int *)); int **dev_buffer = (int **)malloc(num_scans * sizeof(int *)); int *array_sizes = (int *)malloc(num_scans * sizeof(int)); int *grid_sizes = (int *)malloc(num_scans * sizeof(int)); len = n; for (int i = 0; i < num_scans; ++i) { hipMalloc((void **)&dev_idata[i], len * sizeof(int)); hipMalloc((void **)&dev_odata[i], len * sizeof(int)); hipMalloc((void **)&dev_buffer[i], len * sizeof(int)); checkCUDAError("hipMalloc failed for dev_idata, dev_odata, dev_buffer!"); array_sizes[i] = len; len = (len + block_size - 1) / block_size; grid_sizes[i] = len; } hipMemcpy(dev_idata[0], idata, n * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("hipMemcpy failed for idata --> dev_idata[0]!"); /******* KERNEL INVOCATIONS *******/ dim3 dimBlock{block_size}; timer().startGpuTimer(); for (int i = 0; i < num_scans; ++i) { dim3 dimGrid{(unsigned int)grid_sizes[i]}; hipLaunchKernelGGL(( kernScanInclusive), dim3(dimGrid), dim3(dimBlock), 0, 0, array_sizes[i], dev_idata[i], dev_buffer[i]); if (i < num_scans - 1) { hipLaunchKernelGGL(( Common::kernExtractLastElementPerBlock), dim3(dimGrid), dim3(dimBlock), 0, 0, array_sizes[i], dev_idata[i + 1], dev_idata[i]); } } for (int i = num_scans - 1; i >= 0; --i) { dim3 dimGrid{(unsigned int)grid_sizes[i]}; hipLaunchKernelGGL(( Common::kernShiftToExclusive), dim3(dimGrid), dim3(dimBlock), 0, 0, array_sizes[i], dev_odata[i], dev_buffer[i]); if (i >= 1) { dim3 next_dimGrid{(unsigned int)grid_sizes[i - 1]}; hipLaunchKernelGGL(( Common::kernAddOffsetPerBlock), dim3(next_dimGrid), dim3(dimBlock), 0, 0, array_sizes[i - 1], dev_buffer[i - 1], dev_odata[i], dev_idata[i - 1]); } } hipDeviceSynchronize(); timer().endGpuTimer(); /**********************************/ hipMemcpy(odata, dev_odata[0], n * sizeof(int), hipMemcpyDeviceToHost); // Free all memory allocations for (int i = 0; i < num_scans; ++i) { hipFree(dev_idata[i]); hipFree(dev_odata[i]); hipFree(dev_buffer[i]); } free(grid_sizes); free(array_sizes); free(dev_idata); free(dev_odata); free(dev_buffer); } } // namespace Naive } // namespace StreamCompaction
04a214e0a37ec9a862e0a2931dcdd20954b56fe0.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer &timer() { static PerformanceTimer timer; return timer; } template <typename T> __device__ void inline swap(T &a, T &b) { T c(a); a = b; b = c; } /** * Naive parallel scan algorithm * Input must be stored in `data`. * Output is stored both in `data` and `buffer`. */ __global__ void kernScanInclusive(int n, int *data, int *buffer) { int id = blockDim.x * blockIdx.x + threadIdx.x; int tx = threadIdx.x; int bdim = blockDim.x; int log2n = ilog2ceil((n < bdim) ? n : bdim); if (id < n) { for (int d = 1; d <= log2n; ++d) { buffer[id] = data[id]; __syncthreads(); if (tx >= (1 << (d - 1))) { buffer[id] = data[id - (1 << (d - 1))] + data[id]; } __syncthreads(); data[id] = buffer[id]; __syncthreads(); } } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { if (n <= 0) return; const unsigned int block_size = Common::block_size_naive; int num_scans = 1; int len = n; while ((len + block_size - 1) / block_size > 1) { ++num_scans; len = (len + block_size - 1) / block_size; } int **dev_idata = (int **)malloc(num_scans * sizeof(int *)); int **dev_odata = (int **)malloc(num_scans * sizeof(int *)); int **dev_buffer = (int **)malloc(num_scans * sizeof(int *)); int *array_sizes = (int *)malloc(num_scans * sizeof(int)); int *grid_sizes = (int *)malloc(num_scans * sizeof(int)); len = n; for (int i = 0; i < num_scans; ++i) { cudaMalloc((void **)&dev_idata[i], len * sizeof(int)); cudaMalloc((void **)&dev_odata[i], len * sizeof(int)); cudaMalloc((void **)&dev_buffer[i], len * sizeof(int)); checkCUDAError("cudaMalloc failed for dev_idata, dev_odata, dev_buffer!"); array_sizes[i] = len; len = (len + block_size - 1) / block_size; grid_sizes[i] = len; } cudaMemcpy(dev_idata[0], idata, n * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy failed for idata --> dev_idata[0]!"); /******* KERNEL INVOCATIONS *******/ dim3 dimBlock{block_size}; timer().startGpuTimer(); for (int i = 0; i < num_scans; ++i) { dim3 dimGrid{(unsigned int)grid_sizes[i]}; kernScanInclusive<<<dimGrid, dimBlock>>>(array_sizes[i], dev_idata[i], dev_buffer[i]); if (i < num_scans - 1) { Common::kernExtractLastElementPerBlock<<<dimGrid, dimBlock>>>( array_sizes[i], dev_idata[i + 1], dev_idata[i]); } } for (int i = num_scans - 1; i >= 0; --i) { dim3 dimGrid{(unsigned int)grid_sizes[i]}; Common::kernShiftToExclusive<<<dimGrid, dimBlock>>>( array_sizes[i], dev_odata[i], dev_buffer[i]); if (i >= 1) { dim3 next_dimGrid{(unsigned int)grid_sizes[i - 1]}; Common::kernAddOffsetPerBlock<<<next_dimGrid, dimBlock>>>( array_sizes[i - 1], dev_buffer[i - 1], dev_odata[i], dev_idata[i - 1]); } } cudaDeviceSynchronize(); timer().endGpuTimer(); /**********************************/ cudaMemcpy(odata, dev_odata[0], n * sizeof(int), cudaMemcpyDeviceToHost); // Free all memory allocations for (int i = 0; i < num_scans; ++i) { cudaFree(dev_idata[i]); cudaFree(dev_odata[i]); cudaFree(dev_buffer[i]); } free(grid_sizes); free(array_sizes); free(dev_idata); free(dev_odata); free(dev_buffer); } } // namespace Naive } // namespace StreamCompaction
e5ed226bd55ecbce693b666ad62547f17affb23d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __device__ void radixSort(float *d_InArr, float *d_OutArr, int n, int numBits) { int i; //get current block number int blockId = blockIdx.x; //thread id within a block int threadId = threadIdx.x; //global thread id int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; //TODO: shared mem allocatiom //shared memory to store histogram count //size (2**numBits) __shared__ int sh_count[]; //TODO: shared mem allocation //size is of num threads in block __shared__ int sh_tempArr[]; __shared__ int sh_tempPred[]; //copy input to tempArr //assuming total number of threads greater than number of elems if (globalThreadId < n) { tempArr[threadId] = d_InArr[globalThreadId]; } //initialize shared count to zero sh_tempArr[threadId] = 0; } __global__ void onChipPreSort(int *d_inArr, int n, int startBit, int numBits) { int i; //get current block number int blockId = blockIdx.x; //thread id within a block int threadId = threadIdx.x; //global thread id int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; //block's array to sort //TODO: __shared__ int sh_blockArr[]; __shared__ int sh_blockPred[]; __shared__ int sh_blockOut[]; //won't this cause thread divergence if (globalThreadId < n) { sh_blockArr[threadId] = d_inArr[globalThreadId]; for (i = 0; i < numBits; i++) { //TODO: BLOCKSIZE sortPerBit(startBit+i, BLOCKSIZE, sh_blockArr, sh_blockPred, sh_blockOut); sh_blockArr[threadId] = sh_blockOut[threadId]; } } //sh_blockOut is having final sorted output } //bitPos starts from 0, n -> block element count __device__ void sortPerBit(int bitPos, int n, int *sh_tempArr, int *sh_tempPred, int *sh_tempOut) { int i, key, totalFalses; __shared__ int lastPred; __shared__ int t[]; __shared__ int d[]; //get current block number int blockId = blockIdx.x; //thread id within a block int threadId = threadIdx.x; //global thread id int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; //set 1 for 0 at bitPos if (threadId < n) { sh_tempPred[threadId] = ((sh_tempArr[threadId]>>bitPos) & 1) == 0; } if (threadId == n-1) { //last thread for last element lastPred = sh_tempPred[threadId]; } //scan the 1's //TODO: set n preScan(sh_tempArr, sh_tempPred, n); totalFalses = sh_tempPred[n-1] + lastPred; //t = i - f + totalFalses if (threadId < n) { t[threadId] = threadId - sh_tempPred[threadId] + totalFalses; d[threadId] = ((sh_tempArr[threadId]>>bitPos) & 1) ? t[threadId] : sh_tempPred[threadId]; } //scater input using d as scatter address if (threadId < n) { sh_tempOut[d[threadId]] = sh_tempArr[threadId]; } } __device__ void preScan(int *arr, int *arrPred, int n) { int ai, bi; int thId = threadIdx.x; int d = 0, offset = 1; int temp; //build sum in place for (d = n>>1; d > 0; d >>=1) { __syncthreads(); if (thId < d) { ai = offset*(2*thId+1) - 1; bi = offset*(2*thId+2) - 1; arrPred[bi] += arrPred[ai]; } offset*=2; } //clear last element if (thId == 0) { arrPred[n-1] = 0; } //traverse down tree & build scan for (d = 1; d < n; d *=2) { offset >> = 1; __syncthreads(); if (thId < d) { ai = offset*(2*thId + 1) - 1; bi = offset*(2*thId + 2) - 1; temp = arrPred[ai]; arrPred[ai] = arrPred[bi]; arrPred[bi] += temp; } } __syncthreads(); } __device__ void sortStep(int stepNum, int numBits, int n, int *sh_tempArr, int *sh_count) { int i, key; //get current block number int blockId = blockIdx.x; //thread id within a block int threadId = threadIdx.x; //global thread id int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; int mask = (1<<numBits) - 1; //get histogram of keys if (globalThreadId < n) { key = (sh_tempArr[threadId] >> stepNum*numBits) & mask; atomicAdd(&(sh_count[key]), 1); } // }
e5ed226bd55ecbce693b666ad62547f17affb23d.cu
#include <stdio.h> __device__ void radixSort(float *d_InArr, float *d_OutArr, int n, int numBits) { int i; //get current block number int blockId = blockIdx.x; //thread id within a block int threadId = threadIdx.x; //global thread id int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; //TODO: shared mem allocatiom //shared memory to store histogram count //size (2**numBits) __shared__ int sh_count[]; //TODO: shared mem allocation //size is of num threads in block __shared__ int sh_tempArr[]; __shared__ int sh_tempPred[]; //copy input to tempArr //assuming total number of threads greater than number of elems if (globalThreadId < n) { tempArr[threadId] = d_InArr[globalThreadId]; } //initialize shared count to zero sh_tempArr[threadId] = 0; } __global__ void onChipPreSort(int *d_inArr, int n, int startBit, int numBits) { int i; //get current block number int blockId = blockIdx.x; //thread id within a block int threadId = threadIdx.x; //global thread id int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; //block's array to sort //TODO: __shared__ int sh_blockArr[]; __shared__ int sh_blockPred[]; __shared__ int sh_blockOut[]; //won't this cause thread divergence if (globalThreadId < n) { sh_blockArr[threadId] = d_inArr[globalThreadId]; for (i = 0; i < numBits; i++) { //TODO: BLOCKSIZE sortPerBit(startBit+i, BLOCKSIZE, sh_blockArr, sh_blockPred, sh_blockOut); sh_blockArr[threadId] = sh_blockOut[threadId]; } } //sh_blockOut is having final sorted output } //bitPos starts from 0, n -> block element count __device__ void sortPerBit(int bitPos, int n, int *sh_tempArr, int *sh_tempPred, int *sh_tempOut) { int i, key, totalFalses; __shared__ int lastPred; __shared__ int t[]; __shared__ int d[]; //get current block number int blockId = blockIdx.x; //thread id within a block int threadId = threadIdx.x; //global thread id int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; //set 1 for 0 at bitPos if (threadId < n) { sh_tempPred[threadId] = ((sh_tempArr[threadId]>>bitPos) & 1) == 0; } if (threadId == n-1) { //last thread for last element lastPred = sh_tempPred[threadId]; } //scan the 1's //TODO: set n preScan(sh_tempArr, sh_tempPred, n); totalFalses = sh_tempPred[n-1] + lastPred; //t = i - f + totalFalses if (threadId < n) { t[threadId] = threadId - sh_tempPred[threadId] + totalFalses; d[threadId] = ((sh_tempArr[threadId]>>bitPos) & 1) ? t[threadId] : sh_tempPred[threadId]; } //scater input using d as scatter address if (threadId < n) { sh_tempOut[d[threadId]] = sh_tempArr[threadId]; } } __device__ void preScan(int *arr, int *arrPred, int n) { int ai, bi; int thId = threadIdx.x; int d = 0, offset = 1; int temp; //build sum in place for (d = n>>1; d > 0; d >>=1) { __syncthreads(); if (thId < d) { ai = offset*(2*thId+1) - 1; bi = offset*(2*thId+2) - 1; arrPred[bi] += arrPred[ai]; } offset*=2; } //clear last element if (thId == 0) { arrPred[n-1] = 0; } //traverse down tree & build scan for (d = 1; d < n; d *=2) { offset >> = 1; __syncthreads(); if (thId < d) { ai = offset*(2*thId + 1) - 1; bi = offset*(2*thId + 2) - 1; temp = arrPred[ai]; arrPred[ai] = arrPred[bi]; arrPred[bi] += temp; } } __syncthreads(); } __device__ void sortStep(int stepNum, int numBits, int n, int *sh_tempArr, int *sh_count) { int i, key; //get current block number int blockId = blockIdx.x; //thread id within a block int threadId = threadIdx.x; //global thread id int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; int mask = (1<<numBits) - 1; //get histogram of keys if (globalThreadId < n) { key = (sh_tempArr[threadId] >> stepNum*numBits) & mask; atomicAdd(&(sh_count[key]), 1); } // }
7e007e1c0ba1cb78bf25435a3ed4bc904cb3bcf4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // OpenGL Graphics includes #include <glew.h> #include <freeglut.h> #include <cudaDefs.h> #include <imageManager.h> #include "../cudamem.h" // includes, cuda #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h #include <timer.h> // timing functions // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check #include <helper_cuda_gl.h> // helper functions for CUDA/GL interop #include "imageKernels.cuh" #include "../cudautil.cuh" #include "../opengl/CoreHeaders/shader.h" #include "../opengl/CoreHeaders/shaderProgram.h" #include "../opengl/code/uniform.h" #define BLOCK_DIM 8 struct Particle { float life; }; //CUDA variables static unsigned int imageWidth; static unsigned int imageHeight; static unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit static unsigned int imagePitch; static cudaGraphicsResource_t cudaPBOResource; static cudaGraphicsResource_t cudaTexResource; texture<uchar4, 2, hipReadModeElementType> cudaTexRef; static hipChannelFormatDesc cudaTexChannelDesc; static KernelSetting ks; static unsigned char someValue = 0; //OpenGL static unsigned int pboID; static unsigned int textureID; static unsigned int vaoID; static unsigned int vboID; static ShaderProgram* program; static unsigned int viewportWidth = 1024; static unsigned int viewportHeight = 1024; static unsigned int mouseX = 0; static unsigned int mouseY = 0; static Particle* particleDev = nullptr; #define MAX_LIFE (30.0f) #define RADIUS (200.0f) #pragma region CUDA Routines __global__ void applyFilter(const unsigned char someValue, const unsigned int pboWidth, const unsigned int pboHeight, unsigned char *pbo) { //TODO 9: Create a filter that replaces Red spectrum of RGBA pbo such that RED=someValue int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; uchar4* ptr = (uchar4*) pbo; uchar4 value = tex2D(cudaTexRef, x, y); ptr[y * pboWidth + x] = make_uchar4(someValue, value.y, value.z, value.w); } __global__ void sphere(const unsigned int pboWidth, const unsigned int pboHeight, unsigned char *pbo, int mouseX, int mouseY, Particle* particles) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; uchar4* ptr = (uchar4*)pbo; //uchar4 value = tex2D(cudaTexRef, x, y); mouseX = (mouseX / 1024.0) * pboWidth; mouseY = (mouseY / 1024.0) * pboHeight; uchar4 color = make_uchar4(0, 0, 0, 0); float life = particles[y * pboWidth + x].life; float2 centerPos = make_float2(pboHeight - mouseY, mouseX); float2 pos = make_float2(y, x); if (length(pos - centerPos) < RADIUS) { color = make_uchar4(255, 0, 0, 255); life = MAX_LIFE; } /*else if (abs(pos.y - centerPos.y) < RADIUS && pos.x > centerPos.x && abs(pos.x - centerPos.x) < 50.0f) { life = MAX_LIFE / 2.0f; }*/ else if (life > 0.0f) { color = make_uchar4((life / MAX_LIFE) * 255, 0, 0, 255); } ptr[y * pboWidth + x] = color; particles[y * pboWidth + x].life = max(0.0f, life - 1.0f); } void cudaWorker() { hipArray* array; //TODO 3: Map cudaTexResource hipGraphicsMapResources(1, &cudaTexResource, 0); //TODO 4: Get Mapped Array of cudaTexResource hipGraphicsSubResourceGetMappedArray(&array, cudaTexResource, 0, 0); //TODO 5: Get cudaTexChannelDesc from previously obtained array hipGetChannelDesc(&cudaTexChannelDesc, array); //TODO 6: Binf cudaTexRef to array hipBindTextureToArray(&cudaTexRef, array, &cudaTexChannelDesc); checkError(); unsigned char *pboData; size_t pboSize; //TODO 7: Map cudaPBOResource hipGraphicsMapResources(1, &cudaPBOResource, 0); checkError(); //TODO 7: Map Mapped pointer to cudaPBOResource data hipGraphicsResourceGetMappedPointer((void**) &pboData, &pboSize, cudaPBOResource); checkError(); //TODO 8: Set KernelSetting variable ks (dimBlock, dimGrid, etc.) such that block will have BLOCK_DIM x BLOCK_DIM threads ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1); ks.dimGrid = dim3(getNumberOfParts(imageWidth, BLOCK_DIM), getNumberOfParts(imageHeight, BLOCK_DIM), 1); //Calling applyFileter kernel /*someValue++; if (someValue>255) someValue = 0; applyFilter<<<ks.dimGrid, ks.dimBlock>>>(someValue, imageWidth, imageHeight, pboData);*/ sphere <<< ks.dimGrid, ks.dimBlock >> > (imageWidth, imageHeight, pboData, mouseX, mouseY, particleDev); //Following code release mapped resources, unbinds texture and ensures that PBO data will be coppied into OpenGL texture. Do not modify following code! hipUnbindTexture(&cudaTexRef); hipGraphicsUnmapResources(1, &cudaPBOResource, 0); hipGraphicsUnmapResources(1, &cudaTexResource, 0); glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pboID); glBindTexture( GL_TEXTURE_2D, textureID); glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory } void initCUDAtex() { hipGLSetGLDevice(0); checkError(); //CUDA Texture settings cudaTexRef.normalized = false; //Otherwise TRUE to access with normalized texture coordinates cudaTexRef.filterMode = hipFilterModePoint; //Otherwise texRef.filterMode = hipFilterModeLinear; for Linear interpolation of texels cudaTexRef.addressMode[0] = hipAddressModeClamp; //No repeat texture pattern cudaTexRef.addressMode[1] = hipAddressModeClamp; //No repeat texture pattern //TODO 1: Register OpenGL texture to CUDA resource hipGraphicsGLRegisterImage(&cudaTexResource, textureID, GL_TEXTURE_2D, hipGraphicsMapFlagsReadOnly); checkError(); //TODO 2: Register PBO to CUDA resource hipGraphicsGLRegisterBuffer(&cudaPBOResource, pboID, hipGraphicsRegisterFlagsWriteDiscard); checkError(); glGenVertexArrays(1, &vaoID); glBindVertexArray(vaoID); glGenBuffers(1, &vboID); glBindBuffer(GL_ARRAY_BUFFER, vboID); glEnableVertexAttribArray(0); // position glEnableVertexAttribArray(1); // tex coords. glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), 0); glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (GLvoid*)(3 * sizeof(float))); GLfloat vertices[] = { -1.0f, -1.0f, 0.0f, 0.0f, 0.0f, // left bottom 1.0f, -1.0f, 0.0f, 1.0f, 0.0f, // right bottom 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, // right up -1.0f, -1.0f, 0.0f, 0.0f, 0.0f, // left bottom 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, // right up -1.0f, 1.0f, 0.0f, 0.0f, 1.0f // left up }; glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); //Unbind glBindVertexArray(0); glBindBuffer(GL_ARRAY_BUFFER, 0); //std::string vertexCode = loadFile("opengl/Resources/shaders/screen_v3_t2.vert"); //std::string fragmentCode = loadFile("opengl/Resources/shaders/screen_v3_t2.frag"); Shader vertexShader(GL_VERTEX_SHADER); vertexShader.openFromFile("opengl/Resources/shaders/screen_v3_t2.vert"); Shader fragmentShader(GL_FRAGMENT_SHADER); fragmentShader.openFromFile("opengl/Resources/shaders/screen_v3_t2.frag"); program = new ShaderProgram(&vertexShader, &fragmentShader); program->enable(); Uniform<int>::bind("texSampler", program->m_programObject, 0); } void releaseCUDA() { hipGraphicsUnregisterResource(cudaPBOResource); hipGraphicsUnregisterResource(cudaTexResource); } #pragma endregion #pragma region OpenGL Routines - DO NOT MODIFY THIS SECTION !!! void loadTexture(const char* imageFileName) { FreeImage_Initialise(); FIBITMAP *tmp = ImageManager::GenericLoader(imageFileName, 0); imageWidth = FreeImage_GetWidth(tmp); imageHeight = FreeImage_GetHeight(tmp); imageBPP = FreeImage_GetBPP(tmp); imagePitch = FreeImage_GetPitch(tmp); //OpenGL Texture glEnable(GL_TEXTURE_2D); glGenTextures(1,&textureID); glBindTexture( GL_TEXTURE_2D, textureID); //WARNING: Just some of inner format are supported by CUDA!!! glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp)); glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); FreeImage_Unload(tmp); } void preparePBO() { glGenBuffers(1, &pboID); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID); // Make this the current UNPACK buffer (OpenGL is state-based) glBufferData(GL_PIXEL_UNPACK_BUFFER, imageWidth * imageHeight * 4, NULL,GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image } void my_display() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, textureID); //I know this is a very old OpenGL, but we want to practice CUDA :-) //Now it will be a wasted time to learn you current features of OpenGL. Sorry for that however, you can visit my second seminar dealing with Computer Graphics (CG2). /*glBegin(GL_QUADS); glTexCoord2d(0,0); glVertex2d(0,0); glTexCoord2d(1,0); glVertex2d(viewportWidth, 0); glTexCoord2d(1,1); glVertex2d(viewportWidth, viewportHeight); glTexCoord2d(0,1); glVertex2d(0, viewportHeight); glEnd();*/ glBindVertexArray(vaoID); glDrawArrays(GL_TRIANGLES, 0, 6); glBindVertexArray(0); glDisable(GL_TEXTURE_2D); glFlush(); glutSwapBuffers(); } void my_resize(GLsizei w, GLsizei h) { viewportWidth=w; viewportHeight=h; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glViewport(0,0,viewportWidth,viewportHeight); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0,viewportWidth, 0,viewportHeight); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glutPostRedisplay(); } void my_idle() { cudaWorker(); glutPostRedisplay(); } void initGL(int argc, char **argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(viewportWidth,viewportHeight); glutInitWindowPosition(0,0); glutCreateWindow(":-)"); glutDisplayFunc(my_display); glutReshapeFunc(my_resize); glutIdleFunc(my_idle); glutSetCursor(GLUT_CURSOR_CROSSHAIR); // initialize necessary OpenGL extensions glewInit(); glClearColor(0.0, 0.0, 0.0, 1.0); glShadeModel(GL_SMOOTH); glDisable(GL_CULL_FACE); glViewport(0,0,viewportWidth,viewportHeight); glFlush(); } void releaseOpenGL() { if (textureID > 0) glDeleteTextures(1, &textureID); if (pboID > 0) glDeleteBuffers(1, &pboID); } #pragma endregion void releaseResources() { releaseCUDA(); releaseOpenGL(); } static void mouseMove(int x, int y) { mouseX = x; mouseY = y; } void cviko7(int argc, char** argv) { initGL(argc, argv); loadTexture("graphics/lena.png"); preparePBO(); initCUDAtex(); //start rendering mainloop glutMotionFunc(mouseMove); Particle* particles = new Particle[imageWidth * imageHeight]; memset(particles, 0, sizeof(Particle) * imageWidth * imageHeight); CudaMemory<Particle> cudaParticles(imageWidth * imageHeight, particles); particleDev = cudaParticles.device(); while (true) { cudaWorker(); glutMainLoopEvent(); glutPostRedisplay(); } atexit(releaseResources); }
7e007e1c0ba1cb78bf25435a3ed4bc904cb3bcf4.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // OpenGL Graphics includes #include <glew.h> #include <freeglut.h> #include <cudaDefs.h> #include <imageManager.h> #include "../cudamem.h" // includes, cuda #include <cuda_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h #include <timer.h> // timing functions // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check #include <helper_cuda_gl.h> // helper functions for CUDA/GL interop #include "imageKernels.cuh" #include "../cudautil.cuh" #include "../opengl/CoreHeaders/shader.h" #include "../opengl/CoreHeaders/shaderProgram.h" #include "../opengl/code/uniform.h" #define BLOCK_DIM 8 struct Particle { float life; }; //CUDA variables static unsigned int imageWidth; static unsigned int imageHeight; static unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit static unsigned int imagePitch; static cudaGraphicsResource_t cudaPBOResource; static cudaGraphicsResource_t cudaTexResource; texture<uchar4, 2, cudaReadModeElementType> cudaTexRef; static cudaChannelFormatDesc cudaTexChannelDesc; static KernelSetting ks; static unsigned char someValue = 0; //OpenGL static unsigned int pboID; static unsigned int textureID; static unsigned int vaoID; static unsigned int vboID; static ShaderProgram* program; static unsigned int viewportWidth = 1024; static unsigned int viewportHeight = 1024; static unsigned int mouseX = 0; static unsigned int mouseY = 0; static Particle* particleDev = nullptr; #define MAX_LIFE (30.0f) #define RADIUS (200.0f) #pragma region CUDA Routines __global__ void applyFilter(const unsigned char someValue, const unsigned int pboWidth, const unsigned int pboHeight, unsigned char *pbo) { //TODO 9: Create a filter that replaces Red spectrum of RGBA pbo such that RED=someValue int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; uchar4* ptr = (uchar4*) pbo; uchar4 value = tex2D(cudaTexRef, x, y); ptr[y * pboWidth + x] = make_uchar4(someValue, value.y, value.z, value.w); } __global__ void sphere(const unsigned int pboWidth, const unsigned int pboHeight, unsigned char *pbo, int mouseX, int mouseY, Particle* particles) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; uchar4* ptr = (uchar4*)pbo; //uchar4 value = tex2D(cudaTexRef, x, y); mouseX = (mouseX / 1024.0) * pboWidth; mouseY = (mouseY / 1024.0) * pboHeight; uchar4 color = make_uchar4(0, 0, 0, 0); float life = particles[y * pboWidth + x].life; float2 centerPos = make_float2(pboHeight - mouseY, mouseX); float2 pos = make_float2(y, x); if (length(pos - centerPos) < RADIUS) { color = make_uchar4(255, 0, 0, 255); life = MAX_LIFE; } /*else if (abs(pos.y - centerPos.y) < RADIUS && pos.x > centerPos.x && abs(pos.x - centerPos.x) < 50.0f) { life = MAX_LIFE / 2.0f; }*/ else if (life > 0.0f) { color = make_uchar4((life / MAX_LIFE) * 255, 0, 0, 255); } ptr[y * pboWidth + x] = color; particles[y * pboWidth + x].life = max(0.0f, life - 1.0f); } void cudaWorker() { cudaArray* array; //TODO 3: Map cudaTexResource cudaGraphicsMapResources(1, &cudaTexResource, 0); //TODO 4: Get Mapped Array of cudaTexResource cudaGraphicsSubResourceGetMappedArray(&array, cudaTexResource, 0, 0); //TODO 5: Get cudaTexChannelDesc from previously obtained array cudaGetChannelDesc(&cudaTexChannelDesc, array); //TODO 6: Binf cudaTexRef to array cudaBindTextureToArray(&cudaTexRef, array, &cudaTexChannelDesc); checkError(); unsigned char *pboData; size_t pboSize; //TODO 7: Map cudaPBOResource cudaGraphicsMapResources(1, &cudaPBOResource, 0); checkError(); //TODO 7: Map Mapped pointer to cudaPBOResource data cudaGraphicsResourceGetMappedPointer((void**) &pboData, &pboSize, cudaPBOResource); checkError(); //TODO 8: Set KernelSetting variable ks (dimBlock, dimGrid, etc.) such that block will have BLOCK_DIM x BLOCK_DIM threads ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1); ks.dimGrid = dim3(getNumberOfParts(imageWidth, BLOCK_DIM), getNumberOfParts(imageHeight, BLOCK_DIM), 1); //Calling applyFileter kernel /*someValue++; if (someValue>255) someValue = 0; applyFilter<<<ks.dimGrid, ks.dimBlock>>>(someValue, imageWidth, imageHeight, pboData);*/ sphere <<< ks.dimGrid, ks.dimBlock >> > (imageWidth, imageHeight, pboData, mouseX, mouseY, particleDev); //Following code release mapped resources, unbinds texture and ensures that PBO data will be coppied into OpenGL texture. Do not modify following code! cudaUnbindTexture(&cudaTexRef); cudaGraphicsUnmapResources(1, &cudaPBOResource, 0); cudaGraphicsUnmapResources(1, &cudaTexResource, 0); glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pboID); glBindTexture( GL_TEXTURE_2D, textureID); glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory } void initCUDAtex() { cudaGLSetGLDevice(0); checkError(); //CUDA Texture settings cudaTexRef.normalized = false; //Otherwise TRUE to access with normalized texture coordinates cudaTexRef.filterMode = cudaFilterModePoint; //Otherwise texRef.filterMode = cudaFilterModeLinear; for Linear interpolation of texels cudaTexRef.addressMode[0] = cudaAddressModeClamp; //No repeat texture pattern cudaTexRef.addressMode[1] = cudaAddressModeClamp; //No repeat texture pattern //TODO 1: Register OpenGL texture to CUDA resource cudaGraphicsGLRegisterImage(&cudaTexResource, textureID, GL_TEXTURE_2D, cudaGraphicsMapFlagsReadOnly); checkError(); //TODO 2: Register PBO to CUDA resource cudaGraphicsGLRegisterBuffer(&cudaPBOResource, pboID, cudaGraphicsRegisterFlagsWriteDiscard); checkError(); glGenVertexArrays(1, &vaoID); glBindVertexArray(vaoID); glGenBuffers(1, &vboID); glBindBuffer(GL_ARRAY_BUFFER, vboID); glEnableVertexAttribArray(0); // position glEnableVertexAttribArray(1); // tex coords. glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), 0); glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (GLvoid*)(3 * sizeof(float))); GLfloat vertices[] = { -1.0f, -1.0f, 0.0f, 0.0f, 0.0f, // left bottom 1.0f, -1.0f, 0.0f, 1.0f, 0.0f, // right bottom 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, // right up -1.0f, -1.0f, 0.0f, 0.0f, 0.0f, // left bottom 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, // right up -1.0f, 1.0f, 0.0f, 0.0f, 1.0f // left up }; glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); //Unbind glBindVertexArray(0); glBindBuffer(GL_ARRAY_BUFFER, 0); //std::string vertexCode = loadFile("opengl/Resources/shaders/screen_v3_t2.vert"); //std::string fragmentCode = loadFile("opengl/Resources/shaders/screen_v3_t2.frag"); Shader vertexShader(GL_VERTEX_SHADER); vertexShader.openFromFile("opengl/Resources/shaders/screen_v3_t2.vert"); Shader fragmentShader(GL_FRAGMENT_SHADER); fragmentShader.openFromFile("opengl/Resources/shaders/screen_v3_t2.frag"); program = new ShaderProgram(&vertexShader, &fragmentShader); program->enable(); Uniform<int>::bind("texSampler", program->m_programObject, 0); } void releaseCUDA() { cudaGraphicsUnregisterResource(cudaPBOResource); cudaGraphicsUnregisterResource(cudaTexResource); } #pragma endregion #pragma region OpenGL Routines - DO NOT MODIFY THIS SECTION !!! void loadTexture(const char* imageFileName) { FreeImage_Initialise(); FIBITMAP *tmp = ImageManager::GenericLoader(imageFileName, 0); imageWidth = FreeImage_GetWidth(tmp); imageHeight = FreeImage_GetHeight(tmp); imageBPP = FreeImage_GetBPP(tmp); imagePitch = FreeImage_GetPitch(tmp); //OpenGL Texture glEnable(GL_TEXTURE_2D); glGenTextures(1,&textureID); glBindTexture( GL_TEXTURE_2D, textureID); //WARNING: Just some of inner format are supported by CUDA!!! glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp)); glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); FreeImage_Unload(tmp); } void preparePBO() { glGenBuffers(1, &pboID); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID); // Make this the current UNPACK buffer (OpenGL is state-based) glBufferData(GL_PIXEL_UNPACK_BUFFER, imageWidth * imageHeight * 4, NULL,GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image } void my_display() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, textureID); //I know this is a very old OpenGL, but we want to practice CUDA :-) //Now it will be a wasted time to learn you current features of OpenGL. Sorry for that however, you can visit my second seminar dealing with Computer Graphics (CG2). /*glBegin(GL_QUADS); glTexCoord2d(0,0); glVertex2d(0,0); glTexCoord2d(1,0); glVertex2d(viewportWidth, 0); glTexCoord2d(1,1); glVertex2d(viewportWidth, viewportHeight); glTexCoord2d(0,1); glVertex2d(0, viewportHeight); glEnd();*/ glBindVertexArray(vaoID); glDrawArrays(GL_TRIANGLES, 0, 6); glBindVertexArray(0); glDisable(GL_TEXTURE_2D); glFlush(); glutSwapBuffers(); } void my_resize(GLsizei w, GLsizei h) { viewportWidth=w; viewportHeight=h; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glViewport(0,0,viewportWidth,viewportHeight); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0,viewportWidth, 0,viewportHeight); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glutPostRedisplay(); } void my_idle() { cudaWorker(); glutPostRedisplay(); } void initGL(int argc, char **argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(viewportWidth,viewportHeight); glutInitWindowPosition(0,0); glutCreateWindow(":-)"); glutDisplayFunc(my_display); glutReshapeFunc(my_resize); glutIdleFunc(my_idle); glutSetCursor(GLUT_CURSOR_CROSSHAIR); // initialize necessary OpenGL extensions glewInit(); glClearColor(0.0, 0.0, 0.0, 1.0); glShadeModel(GL_SMOOTH); glDisable(GL_CULL_FACE); glViewport(0,0,viewportWidth,viewportHeight); glFlush(); } void releaseOpenGL() { if (textureID > 0) glDeleteTextures(1, &textureID); if (pboID > 0) glDeleteBuffers(1, &pboID); } #pragma endregion void releaseResources() { releaseCUDA(); releaseOpenGL(); } static void mouseMove(int x, int y) { mouseX = x; mouseY = y; } void cviko7(int argc, char** argv) { initGL(argc, argv); loadTexture("graphics/lena.png"); preparePBO(); initCUDAtex(); //start rendering mainloop glutMotionFunc(mouseMove); Particle* particles = new Particle[imageWidth * imageHeight]; memset(particles, 0, sizeof(Particle) * imageWidth * imageHeight); CudaMemory<Particle> cudaParticles(imageWidth * imageHeight, particles); particleDev = cudaParticles.device(); while (true) { cudaWorker(); glutMainLoopEvent(); glutPostRedisplay(); } atexit(releaseResources); }
4a8264726d7f0e16af5ce3ff9985786c84dd65e9.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/opencv.hpp> #include <iostream> #include <hip/hip_runtime.h> #include "device_launch_parameters.h" #include "GpuTimer.h" #include <time.h> using namespace cv; using namespace std; #define FILTER_SIZE 11 #define BLOCK_SIZE 16 // imgBlurGPU blurs an image on the GPU __global__ void imgBlurGPU(unsigned char* outImg, unsigned char* inImg, int width, int height) { int filterRow, filterCol; int cornerRow, cornerCol; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int filterSize = 2*FILTER_SIZE + 1; // compute global thread coordinates int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; // make sure thread is within image boundaries if ((row < height) && (col < width)) { // instantiate accumulator int numPixels = 0; int cumSum = 0; // top-left corner coordinates cornerRow = row - FILTER_SIZE; cornerCol = col - FILTER_SIZE; // accumulate values inside filter for (int i = 0; i < filterSize; i++) { for (int j = 0; j < filterSize; j++) { // filter coordinates filterRow = cornerRow + i; filterCol = cornerCol + j; // accumulate sum if ((filterRow >= 0) && (filterRow <= height) && (filterCol >= 0) && (filterCol <= width)) { cumSum += inImg[filterRow*width + filterCol]; numPixels++; } } } // set the value of output outImg[row*width + col] = (unsigned char)(cumSum / numPixels); } } __global__ void blurKernel(unsingned char* in, unsigned char* out, int w, int h){ int Col = blockIdx.x * blockDim.x + ThreadIdx.x; int Row = blockIdx.y * blockDim.y + ThreadIdx.y; if (Col < w && Row < h){ int pixVal = 0; int pixels = 0; for(int blurRow= -BLUR_SIZE; blurRow < BLUR_SIZE +1; ++blurRow) for(int blurCol= -BLUR_SIZE; blurCol < BLUR_SIZE +1; ++blurCol){ int curRow = row + blurRow; int curCol = col + blurCol; if (curRow > -1 && curRow < h && curCol >-1 && curCol < w){ pixVal += in[curRow*w + curCol]; pixels ++; } } } out[ Row*w +Col ] = (unsigned char) (pixelVal/ pixels); } int main(int argc, char *argv[]) { // make sure filename given if (argc == 1) { printf("[!] Filename expected.\n"); return 0; } // read image Mat img; img = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); if (img.empty()) { printf("Cannot read image file %s", argv[1]); exit(1); } // define img params and timers int imgWidth = img.cols; int imgHeight = img.rows; size_t imgSize = sizeof(unsigned char)*imgWidth*imgHeight; GpuTimer timer; // allocate mem for host output image vectors unsigned char* h_outImg = (unsigned char*)malloc(imgSize); unsigned char* h_outImg_CPU = (unsigned char*)malloc(imgSize); // grab pointer to host input image unsigned char* h_inImg = img.data; // allocate mem for device input and output unsigned char* d_inImg; unsigned char* d_outImg; hipMalloc((void**)&d_inImg, imgSize); hipMalloc((void**)&d_outImg, imgSize); // copy the input image from the host to the device and record the needed time hipMemcpy(d_inImg, h_inImg, imgSize, hipMemcpyHostToDevice); // execution configuration parameters + kernel launch dim3 dimBlock(16, 16, 1); dim3 dimGrid(ceil(imgWidth/16.0), ceil(imgHeight/16.0), 1); timer.Start(); //imgBlurGPU<<<dimGrid, dimBlock>>>(d_outImg, d_inImg, imgWidth, imgHeight); hipLaunchKernelGGL(( blurKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_outImg, d_inImg, imgWidth, imgHeight); timer.Stop(); float d_t2 = timer.Elapsed(); printf("Implemented CUDA code ran in: %f msecs.\n", d_t2); // copy output image from device to host hipMemcpy(h_outImg, d_outImg, imgSize, hipMemcpyDeviceToHost); // display images Mat img1(imgHeight, imgWidth, CV_8UC1, h_outImg); Mat img2(imgHeight, imgWidth, CV_8UC1, h_outImg_CPU); namedWindow("Before", WINDOW_NORMAL); imshow("Before", img); namedWindow("After (GPU)", WINDOW_NORMAL); imshow("After (GPU)", img1); namedWindow("After (CPU)", WINDOW_NORMAL); imshow("After (CPU)", img2); waitKey(0); // free host and device memory img.release(); img1.release(); img2.release(); free(h_outImg_CPU); free(h_outImg); hipFree(d_outImg); hipFree(d_inImg); return 0; }
4a8264726d7f0e16af5ce3ff9985786c84dd65e9.cu
#include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/opencv.hpp> #include <iostream> #include <cuda_runtime.h> #include "device_launch_parameters.h" #include "GpuTimer.h" #include <time.h> using namespace cv; using namespace std; #define FILTER_SIZE 11 #define BLOCK_SIZE 16 // imgBlurGPU blurs an image on the GPU __global__ void imgBlurGPU(unsigned char* outImg, unsigned char* inImg, int width, int height) { int filterRow, filterCol; int cornerRow, cornerCol; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int filterSize = 2*FILTER_SIZE + 1; // compute global thread coordinates int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; // make sure thread is within image boundaries if ((row < height) && (col < width)) { // instantiate accumulator int numPixels = 0; int cumSum = 0; // top-left corner coordinates cornerRow = row - FILTER_SIZE; cornerCol = col - FILTER_SIZE; // accumulate values inside filter for (int i = 0; i < filterSize; i++) { for (int j = 0; j < filterSize; j++) { // filter coordinates filterRow = cornerRow + i; filterCol = cornerCol + j; // accumulate sum if ((filterRow >= 0) && (filterRow <= height) && (filterCol >= 0) && (filterCol <= width)) { cumSum += inImg[filterRow*width + filterCol]; numPixels++; } } } // set the value of output outImg[row*width + col] = (unsigned char)(cumSum / numPixels); } } __global__ void blurKernel(unsingned char* in, unsigned char* out, int w, int h){ int Col = blockIdx.x * blockDim.x + ThreadIdx.x; int Row = blockIdx.y * blockDim.y + ThreadIdx.y; if (Col < w && Row < h){ int pixVal = 0; int pixels = 0; for(int blurRow= -BLUR_SIZE; blurRow < BLUR_SIZE +1; ++blurRow) for(int blurCol= -BLUR_SIZE; blurCol < BLUR_SIZE +1; ++blurCol){ int curRow = row + blurRow; int curCol = col + blurCol; if (curRow > -1 && curRow < h && curCol >-1 && curCol < w){ pixVal += in[curRow*w + curCol]; pixels ++; } } } out[ Row*w +Col ] = (unsigned char) (pixelVal/ pixels); } int main(int argc, char *argv[]) { // make sure filename given if (argc == 1) { printf("[!] Filename expected.\n"); return 0; } // read image Mat img; img = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); if (img.empty()) { printf("Cannot read image file %s", argv[1]); exit(1); } // define img params and timers int imgWidth = img.cols; int imgHeight = img.rows; size_t imgSize = sizeof(unsigned char)*imgWidth*imgHeight; GpuTimer timer; // allocate mem for host output image vectors unsigned char* h_outImg = (unsigned char*)malloc(imgSize); unsigned char* h_outImg_CPU = (unsigned char*)malloc(imgSize); // grab pointer to host input image unsigned char* h_inImg = img.data; // allocate mem for device input and output unsigned char* d_inImg; unsigned char* d_outImg; cudaMalloc((void**)&d_inImg, imgSize); cudaMalloc((void**)&d_outImg, imgSize); // copy the input image from the host to the device and record the needed time cudaMemcpy(d_inImg, h_inImg, imgSize, cudaMemcpyHostToDevice); // execution configuration parameters + kernel launch dim3 dimBlock(16, 16, 1); dim3 dimGrid(ceil(imgWidth/16.0), ceil(imgHeight/16.0), 1); timer.Start(); //imgBlurGPU<<<dimGrid, dimBlock>>>(d_outImg, d_inImg, imgWidth, imgHeight); blurKernel<<<dimGrid, dimBlock>>>(d_outImg, d_inImg, imgWidth, imgHeight); timer.Stop(); float d_t2 = timer.Elapsed(); printf("Implemented CUDA code ran in: %f msecs.\n", d_t2); // copy output image from device to host cudaMemcpy(h_outImg, d_outImg, imgSize, cudaMemcpyDeviceToHost); // display images Mat img1(imgHeight, imgWidth, CV_8UC1, h_outImg); Mat img2(imgHeight, imgWidth, CV_8UC1, h_outImg_CPU); namedWindow("Before", WINDOW_NORMAL); imshow("Before", img); namedWindow("After (GPU)", WINDOW_NORMAL); imshow("After (GPU)", img1); namedWindow("After (CPU)", WINDOW_NORMAL); imshow("After (CPU)", img2); waitKey(0); // free host and device memory img.release(); img1.release(); img2.release(); free(h_outImg_CPU); free(h_outImg); cudaFree(d_outImg); cudaFree(d_inImg); return 0; }
2fccad0d11fed2046c84461327d9db306f002643.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /* * A simple introduction to programming in CUDA. This program prints "Hello * World from GPU! from 10 CUDA threads running on the GPU. */ __global__ void helloFromGPU() { printf("Hello World from GPU!\n"); } int main(int argc, char **argv) { printf("Hello World from CPU!\n"); hipLaunchKernelGGL(( helloFromGPU), dim3(1), dim3(10), 0, 0, ); hipDeviceReset(); return 0; }
2fccad0d11fed2046c84461327d9db306f002643.cu
#include <stdio.h> /* * A simple introduction to programming in CUDA. This program prints "Hello * World from GPU! from 10 CUDA threads running on the GPU. */ __global__ void helloFromGPU() { printf("Hello World from GPU!\n"); } int main(int argc, char **argv) { printf("Hello World from CPU!\n"); helloFromGPU<<<1, 10>>>(); cudaDeviceReset(); return 0; }
fa2b10c43186ad5e00f125cfabed00dbc2afc561.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"CudaHelper.cuh" #include"CudaInterface.hpp" typedef unsigned char uchar; #define MAX3(a, b, c) fmaxf(fmaxf(a,b),c) #define MIN3(a, b, c) fminf(fminf(a,b),c) #define UNFLOAT(n) ((n) >= 255 ? 255 : ((n) <= 0 ? 0 : uchar((n) + 0.5))) inline __device__ static void getLightest(uchar4& mc, uchar4& a, uchar4& b, uchar4& c, float strength) { mc = make_uchar4( mc.x + strength * (__fdividef(a.x + b.x + c.x, 3.0f) - mc.x) + 0.5f, mc.y + strength * (__fdividef(a.y + b.y + c.y, 3.0f) - mc.y) + 0.5f, mc.z + strength * (__fdividef(a.z + b.z + c.z, 3.0f) - mc.z) + 0.5f, mc.w + strength * (__fdividef(a.w + b.w + c.w, 3.0f) - mc.w) + 0.5f ); } inline __device__ static void getAVerage(uchar4& mc, uchar4& a, uchar4& b, uchar4& c, float strength) { mc = make_uchar4( mc.x + strength * (__fdividef(a.x + b.x + c.x, 3.0f) - mc.x) + 0.5f, mc.y + strength * (__fdividef(a.y + b.y + c.y, 3.0f) - mc.y) + 0.5f, mc.z + strength * (__fdividef(a.z + b.z + c.z, 3.0f) - mc.z) + 0.5f, 0.299f * mc.z + 0.587f * mc.y + 0.114f * mc.x + 0.5f ); } __global__ static void getGray( hipTextureObject_t srcImg, hipSurfaceObject_t dstImg, int W, int H ) { const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (x >= W || y >= H) return; float u = (x + 0.5f) / (float)(W); float v = (y + 0.5f) / (float)(H); float4 fmc = tex2D<float4>(srcImg, u, v); uchar4 mc = make_uchar4( fmc.x * 255.0f + 0.5f, fmc.y * 255.0f + 0.5f, fmc.z * 255.0f + 0.5f, fmc.w * 255.0f + 0.5f ); mc.w = 0.299f * mc.z + 0.587f * mc.y + 0.114f * mc.x + 0.5f; surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); } __global__ static void pushColor( hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg, int W, int H, float strength ) { const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (x >= W || y >= H) return; uchar4 tl, tc, tr, ml, mc, mr, bl, bc, br; surf2Dread(&tl, srcImg, __umul24(sizeof(mc), x - 1), y - 1, hipBoundaryModeZero); surf2Dread(&tc, srcImg, __umul24(sizeof(mc), x), y - 1, hipBoundaryModeZero); surf2Dread(&tr, srcImg, __umul24(sizeof(mc), x + 1), y - 1, hipBoundaryModeZero); surf2Dread(&ml, srcImg, __umul24(sizeof(mc), x - 1), y, hipBoundaryModeZero); surf2Dread(&mc, srcImg, __umul24(sizeof(mc), x), y, hipBoundaryModeZero); surf2Dread(&mr, srcImg, __umul24(sizeof(mc), x + 1), y, hipBoundaryModeZero); surf2Dread(&bl, srcImg, __umul24(sizeof(mc), x - 1), y + 1, hipBoundaryModeZero); surf2Dread(&bc, srcImg, __umul24(sizeof(mc), x), y + 1, hipBoundaryModeZero); surf2Dread(&br, srcImg, __umul24(sizeof(mc), x + 1), y + 1, hipBoundaryModeZero); uchar maxD, minL; //top and bottom maxD = MAX3(bl.w, bc.w, br.w); minL = MIN3(tl.w, tc.w, tr.w); if (minL > mc.w && mc.w > maxD) getLightest(mc, tl, tc, tr, strength); else { maxD = MAX3(tl.w, tc.w, tr.w); minL = MIN3(bl.w, bc.w, br.w); if (minL > mc.w && mc.w > maxD) getLightest(mc, bl, bc, br, strength); } //sundiagonal maxD = MAX3(ml.w, mc.w, bc.w); minL = MIN3(tc.w, tr.w, mr.w); if (minL > maxD) getLightest(mc, tc, tr, mr, strength); else { maxD = MAX3(tc.w, mc.w, mr.w); minL = MIN3(ml.w, bl.w, bc.w); if (minL > maxD) getLightest(mc, ml, bl, bc, strength); } //left and right maxD = MAX3(tl.w, ml.w, bl.w); minL = MIN3(tr.w, mr.w, br.w); if (minL > mc.w && mc.w > maxD) getLightest(mc, tr, mr, br, strength); else { maxD = MAX3(tr.w, mr.w, br.w); minL = MIN3(tl.w, ml.w, bl.w); if (minL > mc.w && mc.w > maxD) getLightest(mc, tl, ml, bl, strength); } //diagonal maxD = MAX3(tc.w, mc.w, ml.w); minL = MIN3(mr.w, br.w, bc.w); if (minL > maxD) getLightest(mc, mr, br, bc, strength); else { maxD = MAX3(bc.w, mc.w, mr.w); minL = MIN3(ml.w, tl.w, tc.w); if (minL > maxD) getLightest(mc, ml, tl, tc, strength); } surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); } __global__ static void getGradient( hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg, int W, int H ) { const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (x >= W || y >= H) return; uchar4 tl, tc, tr, ml, mc, mr, bl, bc, br; surf2Dread(&tl, srcImg, __umul24(sizeof(mc), x - 1), y - 1, hipBoundaryModeZero); surf2Dread(&tc, srcImg, __umul24(sizeof(mc), x), y - 1, hipBoundaryModeZero); surf2Dread(&tr, srcImg, __umul24(sizeof(mc), x + 1), y - 1, hipBoundaryModeZero); surf2Dread(&ml, srcImg, __umul24(sizeof(mc), x - 1), y, hipBoundaryModeZero); surf2Dread(&mc, srcImg, __umul24(sizeof(mc), x), y, hipBoundaryModeZero); surf2Dread(&mr, srcImg, __umul24(sizeof(mc), x + 1), y, hipBoundaryModeZero); surf2Dread(&bl, srcImg, __umul24(sizeof(mc), x - 1), y + 1, hipBoundaryModeZero); surf2Dread(&bc, srcImg, __umul24(sizeof(mc), x), y + 1, hipBoundaryModeZero); surf2Dread(&br, srcImg, __umul24(sizeof(mc), x + 1), y + 1, hipBoundaryModeZero); const float gradX = tr.w + mr.w + mr.w + br.w - tl.w - ml.w - ml.w - bl.w; const float gradY = tl.w + tc.w + tc.w + tr.w - bl.w - bc.w - bc.w - br.w; const int grad = sqrtf(gradX * gradX + gradY * gradY); mc.w = (uchar)255 - UNFLOAT(grad); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); } __global__ static void pushGradient( hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg, int W, int H, float strength ) { const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (x >= W || y >= H) return; uchar4 tl, tc, tr, ml, mc, mr, bl, bc, br; surf2Dread(&tl, srcImg, __umul24(sizeof(mc), x - 1), y - 1, hipBoundaryModeZero); surf2Dread(&tc, srcImg, __umul24(sizeof(mc), x), y - 1, hipBoundaryModeZero); surf2Dread(&tr, srcImg, __umul24(sizeof(mc), x + 1), y - 1, hipBoundaryModeZero); surf2Dread(&ml, srcImg, __umul24(sizeof(mc), x - 1), y, hipBoundaryModeZero); surf2Dread(&mc, srcImg, __umul24(sizeof(mc), x), y, hipBoundaryModeZero); surf2Dread(&mr, srcImg, __umul24(sizeof(mc), x + 1), y, hipBoundaryModeZero); surf2Dread(&bl, srcImg, __umul24(sizeof(mc), x - 1), y + 1, hipBoundaryModeZero); surf2Dread(&bc, srcImg, __umul24(sizeof(mc), x), y + 1, hipBoundaryModeZero); surf2Dread(&br, srcImg, __umul24(sizeof(mc), x + 1), y + 1, hipBoundaryModeZero); uchar maxD, minL; //top and bottom maxD = MAX3(bl.w, bc.w, br.w); minL = MIN3(tl.w, tc.w, tr.w); if (minL > mc.w && mc.w > maxD) { getAVerage(mc, tl, tc, tr, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); return; } maxD = MAX3(tl.w, tc.w, tr.w); minL = MIN3(bl.w, bc.w, br.w); if (minL > mc.w && mc.w > maxD) { getAVerage(mc, bl, bc, br, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); return; } //sundiagonal maxD = MAX3(ml.w, mc.w, bc.w); minL = MIN3(tc.w, tr.w, mr.w); if (minL > maxD) { getAVerage(mc, tc, tr, mr, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); return; } maxD = MAX3(tc.w, mc.w, mr.w); minL = MIN3(ml.w, bl.w, bc.w); if (minL > maxD) { getAVerage(mc, ml, bl, bc, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); return; } //left and right maxD = MAX3(tl.w, ml.w, bl.w); minL = MIN3(tr.w, mr.w, br.w); if (minL > mc.w && mc.w > maxD) { getAVerage(mc, tr, mr, br, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); return; } maxD = MAX3(tr.w, mr.w, br.w); minL = MIN3(tl.w, ml.w, bl.w); if (minL > mc.w && mc.w > maxD) { getAVerage(mc, tl, ml, bl, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); return; } //diagonal maxD = MAX3(tc.w, mc.w, ml.w); minL = MIN3(mr.w, br.w, bc.w); if (minL > maxD) { getAVerage(mc, mr, br, bc, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); return; } maxD = MAX3(bc.w, mc.w, mr.w); minL = MIN3(ml.w, tl.w, tc.w); if (minL > maxD) { getAVerage(mc, ml, tl, tc, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); return; } mc.w = 0.299f * mc.z + 0.587f * mc.y + 0.114f * mc.x + 0.5f; surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, hipBoundaryModeZero); } void cuRunKernelAnime4K09(const unsigned char* inputData, unsigned char* outputData, ACCudaParamAnime4K09 * param) { hipError_t err = hipSuccess; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<uchar4>(); hipArray_t cuArray0; err = hipMallocArray(&cuArray0, &channelDesc, param->orgW, param->orgH); CheckCudaErr(err); hipArray_t cuArray1; err = hipMallocArray(&cuArray1, &channelDesc, param->W, param->H, hipArraySurfaceLoadStore); CheckCudaErr(err); hipArray_t cuArray2; err = hipMallocArray(&cuArray2, &channelDesc, param->W, param->H, hipArraySurfaceLoadStore); CheckCudaErr(err); hipArray_t cuArray3; err = hipMallocArray(&cuArray3, &channelDesc, param->W, param->H, hipArraySurfaceLoadStore); CheckCudaErr(err); struct hipResourceDesc resDesc; struct hipTextureDesc texDesc; memset(&resDesc, 0, sizeof(resDesc)); memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeBorder; texDesc.addressMode[1] = hipAddressModeBorder; texDesc.filterMode = hipFilterModeLinear; texDesc.readMode = hipReadModeNormalizedFloat; texDesc.normalizedCoords = 1; resDesc.resType = hipResourceTypeArray; resDesc.res.array.array = cuArray0; hipTextureObject_t tex = 0; err = hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL); CheckCudaErr(err); resDesc.res.array.array = cuArray1; hipSurfaceObject_t surf1 = 0; err = hipCreateSurfaceObject(&surf1, &resDesc); CheckCudaErr(err); resDesc.res.array.array = cuArray2; hipSurfaceObject_t surf2 = 0; err = hipCreateSurfaceObject(&surf2, &resDesc); CheckCudaErr(err); resDesc.res.array.array = cuArray3; hipSurfaceObject_t surf3 = 0; err = hipCreateSurfaceObject(&surf3, &resDesc); CheckCudaErr(err); err = hipMemcpy2DToArray(cuArray0, 0, 0, inputData, sizeof(uchar4) * param->orgW, sizeof(uchar4) * param->orgW, param->orgH, hipMemcpyHostToDevice); CheckCudaErr(err); dim3 dimBlock(16, 16); dim3 dimGrid( (param->W + dimBlock.x - 1) / dimBlock.x, (param->H + dimBlock.y - 1) / dimBlock.y ); { int i; hipLaunchKernelGGL(( getGray) , dim3(dimGrid), dim3(dimBlock), 0, 0, tex, surf1, param->W, param->H); for (i = 0; i < param->passes && i < param->pushColorCount; i++) { hipLaunchKernelGGL(( pushColor) , dim3(dimGrid), dim3(dimBlock), 0, 0, surf1, surf2, param->W, param->H, param->strengthColor); hipLaunchKernelGGL(( getGradient) , dim3(dimGrid), dim3(dimBlock), 0, 0, surf2, surf3, param->W, param->H); hipLaunchKernelGGL(( pushGradient) , dim3(dimGrid), dim3(dimBlock), 0, 0, surf3, surf1, param->W, param->H, param->strengthGradient); } while (i++ < param->passes) { hipLaunchKernelGGL(( getGradient) , dim3(dimGrid), dim3(dimBlock), 0, 0, surf1, surf2, param->W, param->H); hipLaunchKernelGGL(( pushGradient) , dim3(dimGrid), dim3(dimBlock), 0, 0, surf2, surf1, param->W, param->H, param->strengthGradient); } } err = hipMemcpy2DFromArray(outputData, sizeof(uchar4) * param->W, cuArray1, 0, 0, sizeof(uchar4) * param->W, param->H, hipMemcpyDeviceToHost); CheckCudaErr(err); hipDestroyTextureObject(tex); hipDestroySurfaceObject(surf1); hipDestroySurfaceObject(surf2); hipDestroySurfaceObject(surf3); hipFreeArray(cuArray0); hipFreeArray(cuArray1); hipFreeArray(cuArray2); hipFreeArray(cuArray3); }
fa2b10c43186ad5e00f125cfabed00dbc2afc561.cu
#include"CudaHelper.cuh" #include"CudaInterface.hpp" typedef unsigned char uchar; #define MAX3(a, b, c) fmaxf(fmaxf(a,b),c) #define MIN3(a, b, c) fminf(fminf(a,b),c) #define UNFLOAT(n) ((n) >= 255 ? 255 : ((n) <= 0 ? 0 : uchar((n) + 0.5))) inline __device__ static void getLightest(uchar4& mc, uchar4& a, uchar4& b, uchar4& c, float strength) { mc = make_uchar4( mc.x + strength * (__fdividef(a.x + b.x + c.x, 3.0f) - mc.x) + 0.5f, mc.y + strength * (__fdividef(a.y + b.y + c.y, 3.0f) - mc.y) + 0.5f, mc.z + strength * (__fdividef(a.z + b.z + c.z, 3.0f) - mc.z) + 0.5f, mc.w + strength * (__fdividef(a.w + b.w + c.w, 3.0f) - mc.w) + 0.5f ); } inline __device__ static void getAVerage(uchar4& mc, uchar4& a, uchar4& b, uchar4& c, float strength) { mc = make_uchar4( mc.x + strength * (__fdividef(a.x + b.x + c.x, 3.0f) - mc.x) + 0.5f, mc.y + strength * (__fdividef(a.y + b.y + c.y, 3.0f) - mc.y) + 0.5f, mc.z + strength * (__fdividef(a.z + b.z + c.z, 3.0f) - mc.z) + 0.5f, 0.299f * mc.z + 0.587f * mc.y + 0.114f * mc.x + 0.5f ); } __global__ static void getGray( cudaTextureObject_t srcImg, cudaSurfaceObject_t dstImg, int W, int H ) { const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (x >= W || y >= H) return; float u = (x + 0.5f) / (float)(W); float v = (y + 0.5f) / (float)(H); float4 fmc = tex2D<float4>(srcImg, u, v); uchar4 mc = make_uchar4( fmc.x * 255.0f + 0.5f, fmc.y * 255.0f + 0.5f, fmc.z * 255.0f + 0.5f, fmc.w * 255.0f + 0.5f ); mc.w = 0.299f * mc.z + 0.587f * mc.y + 0.114f * mc.x + 0.5f; surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); } __global__ static void pushColor( cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg, int W, int H, float strength ) { const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (x >= W || y >= H) return; uchar4 tl, tc, tr, ml, mc, mr, bl, bc, br; surf2Dread(&tl, srcImg, __umul24(sizeof(mc), x - 1), y - 1, cudaBoundaryModeZero); surf2Dread(&tc, srcImg, __umul24(sizeof(mc), x), y - 1, cudaBoundaryModeZero); surf2Dread(&tr, srcImg, __umul24(sizeof(mc), x + 1), y - 1, cudaBoundaryModeZero); surf2Dread(&ml, srcImg, __umul24(sizeof(mc), x - 1), y, cudaBoundaryModeZero); surf2Dread(&mc, srcImg, __umul24(sizeof(mc), x), y, cudaBoundaryModeZero); surf2Dread(&mr, srcImg, __umul24(sizeof(mc), x + 1), y, cudaBoundaryModeZero); surf2Dread(&bl, srcImg, __umul24(sizeof(mc), x - 1), y + 1, cudaBoundaryModeZero); surf2Dread(&bc, srcImg, __umul24(sizeof(mc), x), y + 1, cudaBoundaryModeZero); surf2Dread(&br, srcImg, __umul24(sizeof(mc), x + 1), y + 1, cudaBoundaryModeZero); uchar maxD, minL; //top and bottom maxD = MAX3(bl.w, bc.w, br.w); minL = MIN3(tl.w, tc.w, tr.w); if (minL > mc.w && mc.w > maxD) getLightest(mc, tl, tc, tr, strength); else { maxD = MAX3(tl.w, tc.w, tr.w); minL = MIN3(bl.w, bc.w, br.w); if (minL > mc.w && mc.w > maxD) getLightest(mc, bl, bc, br, strength); } //sundiagonal maxD = MAX3(ml.w, mc.w, bc.w); minL = MIN3(tc.w, tr.w, mr.w); if (minL > maxD) getLightest(mc, tc, tr, mr, strength); else { maxD = MAX3(tc.w, mc.w, mr.w); minL = MIN3(ml.w, bl.w, bc.w); if (minL > maxD) getLightest(mc, ml, bl, bc, strength); } //left and right maxD = MAX3(tl.w, ml.w, bl.w); minL = MIN3(tr.w, mr.w, br.w); if (minL > mc.w && mc.w > maxD) getLightest(mc, tr, mr, br, strength); else { maxD = MAX3(tr.w, mr.w, br.w); minL = MIN3(tl.w, ml.w, bl.w); if (minL > mc.w && mc.w > maxD) getLightest(mc, tl, ml, bl, strength); } //diagonal maxD = MAX3(tc.w, mc.w, ml.w); minL = MIN3(mr.w, br.w, bc.w); if (minL > maxD) getLightest(mc, mr, br, bc, strength); else { maxD = MAX3(bc.w, mc.w, mr.w); minL = MIN3(ml.w, tl.w, tc.w); if (minL > maxD) getLightest(mc, ml, tl, tc, strength); } surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); } __global__ static void getGradient( cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg, int W, int H ) { const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (x >= W || y >= H) return; uchar4 tl, tc, tr, ml, mc, mr, bl, bc, br; surf2Dread(&tl, srcImg, __umul24(sizeof(mc), x - 1), y - 1, cudaBoundaryModeZero); surf2Dread(&tc, srcImg, __umul24(sizeof(mc), x), y - 1, cudaBoundaryModeZero); surf2Dread(&tr, srcImg, __umul24(sizeof(mc), x + 1), y - 1, cudaBoundaryModeZero); surf2Dread(&ml, srcImg, __umul24(sizeof(mc), x - 1), y, cudaBoundaryModeZero); surf2Dread(&mc, srcImg, __umul24(sizeof(mc), x), y, cudaBoundaryModeZero); surf2Dread(&mr, srcImg, __umul24(sizeof(mc), x + 1), y, cudaBoundaryModeZero); surf2Dread(&bl, srcImg, __umul24(sizeof(mc), x - 1), y + 1, cudaBoundaryModeZero); surf2Dread(&bc, srcImg, __umul24(sizeof(mc), x), y + 1, cudaBoundaryModeZero); surf2Dread(&br, srcImg, __umul24(sizeof(mc), x + 1), y + 1, cudaBoundaryModeZero); const float gradX = tr.w + mr.w + mr.w + br.w - tl.w - ml.w - ml.w - bl.w; const float gradY = tl.w + tc.w + tc.w + tr.w - bl.w - bc.w - bc.w - br.w; const int grad = sqrtf(gradX * gradX + gradY * gradY); mc.w = (uchar)255 - UNFLOAT(grad); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); } __global__ static void pushGradient( cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg, int W, int H, float strength ) { const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (x >= W || y >= H) return; uchar4 tl, tc, tr, ml, mc, mr, bl, bc, br; surf2Dread(&tl, srcImg, __umul24(sizeof(mc), x - 1), y - 1, cudaBoundaryModeZero); surf2Dread(&tc, srcImg, __umul24(sizeof(mc), x), y - 1, cudaBoundaryModeZero); surf2Dread(&tr, srcImg, __umul24(sizeof(mc), x + 1), y - 1, cudaBoundaryModeZero); surf2Dread(&ml, srcImg, __umul24(sizeof(mc), x - 1), y, cudaBoundaryModeZero); surf2Dread(&mc, srcImg, __umul24(sizeof(mc), x), y, cudaBoundaryModeZero); surf2Dread(&mr, srcImg, __umul24(sizeof(mc), x + 1), y, cudaBoundaryModeZero); surf2Dread(&bl, srcImg, __umul24(sizeof(mc), x - 1), y + 1, cudaBoundaryModeZero); surf2Dread(&bc, srcImg, __umul24(sizeof(mc), x), y + 1, cudaBoundaryModeZero); surf2Dread(&br, srcImg, __umul24(sizeof(mc), x + 1), y + 1, cudaBoundaryModeZero); uchar maxD, minL; //top and bottom maxD = MAX3(bl.w, bc.w, br.w); minL = MIN3(tl.w, tc.w, tr.w); if (minL > mc.w && mc.w > maxD) { getAVerage(mc, tl, tc, tr, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); return; } maxD = MAX3(tl.w, tc.w, tr.w); minL = MIN3(bl.w, bc.w, br.w); if (minL > mc.w && mc.w > maxD) { getAVerage(mc, bl, bc, br, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); return; } //sundiagonal maxD = MAX3(ml.w, mc.w, bc.w); minL = MIN3(tc.w, tr.w, mr.w); if (minL > maxD) { getAVerage(mc, tc, tr, mr, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); return; } maxD = MAX3(tc.w, mc.w, mr.w); minL = MIN3(ml.w, bl.w, bc.w); if (minL > maxD) { getAVerage(mc, ml, bl, bc, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); return; } //left and right maxD = MAX3(tl.w, ml.w, bl.w); minL = MIN3(tr.w, mr.w, br.w); if (minL > mc.w && mc.w > maxD) { getAVerage(mc, tr, mr, br, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); return; } maxD = MAX3(tr.w, mr.w, br.w); minL = MIN3(tl.w, ml.w, bl.w); if (minL > mc.w && mc.w > maxD) { getAVerage(mc, tl, ml, bl, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); return; } //diagonal maxD = MAX3(tc.w, mc.w, ml.w); minL = MIN3(mr.w, br.w, bc.w); if (minL > maxD) { getAVerage(mc, mr, br, bc, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); return; } maxD = MAX3(bc.w, mc.w, mr.w); minL = MIN3(ml.w, tl.w, tc.w); if (minL > maxD) { getAVerage(mc, ml, tl, tc, strength); surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); return; } mc.w = 0.299f * mc.z + 0.587f * mc.y + 0.114f * mc.x + 0.5f; surf2Dwrite(mc, dstImg, sizeof(mc) * x, y, cudaBoundaryModeZero); } void cuRunKernelAnime4K09(const unsigned char* inputData, unsigned char* outputData, ACCudaParamAnime4K09 * param) { cudaError_t err = cudaSuccess; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uchar4>(); cudaArray_t cuArray0; err = cudaMallocArray(&cuArray0, &channelDesc, param->orgW, param->orgH); CheckCudaErr(err); cudaArray_t cuArray1; err = cudaMallocArray(&cuArray1, &channelDesc, param->W, param->H, cudaArraySurfaceLoadStore); CheckCudaErr(err); cudaArray_t cuArray2; err = cudaMallocArray(&cuArray2, &channelDesc, param->W, param->H, cudaArraySurfaceLoadStore); CheckCudaErr(err); cudaArray_t cuArray3; err = cudaMallocArray(&cuArray3, &channelDesc, param->W, param->H, cudaArraySurfaceLoadStore); CheckCudaErr(err); struct cudaResourceDesc resDesc; struct cudaTextureDesc texDesc; memset(&resDesc, 0, sizeof(resDesc)); memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeBorder; texDesc.addressMode[1] = cudaAddressModeBorder; texDesc.filterMode = cudaFilterModeLinear; texDesc.readMode = cudaReadModeNormalizedFloat; texDesc.normalizedCoords = 1; resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = cuArray0; cudaTextureObject_t tex = 0; err = cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL); CheckCudaErr(err); resDesc.res.array.array = cuArray1; cudaSurfaceObject_t surf1 = 0; err = cudaCreateSurfaceObject(&surf1, &resDesc); CheckCudaErr(err); resDesc.res.array.array = cuArray2; cudaSurfaceObject_t surf2 = 0; err = cudaCreateSurfaceObject(&surf2, &resDesc); CheckCudaErr(err); resDesc.res.array.array = cuArray3; cudaSurfaceObject_t surf3 = 0; err = cudaCreateSurfaceObject(&surf3, &resDesc); CheckCudaErr(err); err = cudaMemcpy2DToArray(cuArray0, 0, 0, inputData, sizeof(uchar4) * param->orgW, sizeof(uchar4) * param->orgW, param->orgH, cudaMemcpyHostToDevice); CheckCudaErr(err); dim3 dimBlock(16, 16); dim3 dimGrid( (param->W + dimBlock.x - 1) / dimBlock.x, (param->H + dimBlock.y - 1) / dimBlock.y ); { int i; getGray <<<dimGrid, dimBlock>>> (tex, surf1, param->W, param->H); for (i = 0; i < param->passes && i < param->pushColorCount; i++) { pushColor <<<dimGrid, dimBlock>>> (surf1, surf2, param->W, param->H, param->strengthColor); getGradient <<<dimGrid, dimBlock>>> (surf2, surf3, param->W, param->H); pushGradient <<<dimGrid, dimBlock>>> (surf3, surf1, param->W, param->H, param->strengthGradient); } while (i++ < param->passes) { getGradient <<<dimGrid, dimBlock>>> (surf1, surf2, param->W, param->H); pushGradient <<<dimGrid, dimBlock>>> (surf2, surf1, param->W, param->H, param->strengthGradient); } } err = cudaMemcpy2DFromArray(outputData, sizeof(uchar4) * param->W, cuArray1, 0, 0, sizeof(uchar4) * param->W, param->H, cudaMemcpyDeviceToHost); CheckCudaErr(err); cudaDestroyTextureObject(tex); cudaDestroySurfaceObject(surf1); cudaDestroySurfaceObject(surf2); cudaDestroySurfaceObject(surf3); cudaFreeArray(cuArray0); cudaFreeArray(cuArray1); cudaFreeArray(cuArray2); cudaFreeArray(cuArray3); }
5b7e582dbd226af6cdea4a5363f89fbdc1455b11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/blob.hpp" #include "caffe/layer.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/layers/wino_2x2_trans.hpp" namespace caffe { // dim3 threadsPerBlock(C) // dim3 numBlocks(Batch, nH, nW) // I = (Batch, H, W, C) // O = (16, Batch, nH, nW, C) template <typename T> __global__ void Winograd2x2ImTransCompute(const T *Input, T *Output, int C, int B, int H, int W, int pad_h, int pad_w) { int bx = blockIdx.x; // w int by = blockIdx.y; // h int bz = blockIdx.z; // b int t = threadIdx.x; // c int nW = (W + 1 + 2 * pad_w - 4) / 2 + 1; int nH = (H + 1 + 2 * pad_h - 4) / 2 + 1; int f_b = bz; int xBase = 2 * bx - pad_w; int yBase = 2 * by - pad_h; // T input_patch_1 [16] = {0}; T input_patch_0; T input_patch_1; T input_patch_2; T input_patch_3; T input_patch_4; T input_patch_5; T input_patch_6; T input_patch_7; T input_patch_8; T input_patch_9; T input_patch_10; T input_patch_11; T input_patch_12; T input_patch_13; T input_patch_14; T input_patch_15; // load (4, 4, 1) patch of input from global memory int f_x, f_y; f_x = xBase + 0; f_y = yBase + 0; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_0 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_0 = 0; f_x = xBase + 1; f_y = yBase + 0; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_1 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_1 = 0; f_x = xBase + 2; f_y = yBase + 0; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_2 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_2 = 0; f_x = xBase + 3; f_y = yBase + 0; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_3 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_3 = 0; f_x = xBase + 0; f_y = yBase + 1; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_4 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_4 = 0; f_x = xBase + 1; f_y = yBase + 1; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_5 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_5 = 0; f_x = xBase + 2; f_y = yBase + 1; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_6 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_6 = 0; f_x = xBase + 3; f_y = yBase + 1; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_7 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_7 = 0; f_x = xBase + 0; f_y = yBase + 2; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_8 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_8 = 0; f_x = xBase + 1; f_y = yBase + 2; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_9 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_9 = 0; f_x = xBase + 2; f_y = yBase + 2; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_10 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_10 = 0; f_x = xBase + 3; f_y = yBase + 2; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_11 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_11 = 0; f_x = xBase + 0; f_y = yBase + 3; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_12 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_12 = 0; f_x = xBase + 1; f_y = yBase + 3; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_13 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_13 = 0; f_x = xBase + 2; f_y = yBase + 3; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_14 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_14 = 0; f_x = xBase + 3; f_y = yBase + 3; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_15 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_15 = 0; T trans_input_patch_0; T trans_input_patch_1; T trans_input_patch_2; T trans_input_patch_3; T trans_input_patch_4; T trans_input_patch_5; T trans_input_patch_6; T trans_input_patch_7; T trans_input_patch_8; T trans_input_patch_9; T trans_input_patch_10; T trans_input_patch_11; T trans_input_patch_12; T trans_input_patch_13; T trans_input_patch_14; T trans_input_patch_15; // Winograd Transform trans_input_patch_0 = input_patch_0 - input_patch_2 - input_patch_8 + input_patch_10; trans_input_patch_1 = input_patch_1 + input_patch_2 - input_patch_9 - input_patch_10; trans_input_patch_2 = input_patch_2 - input_patch_1 + input_patch_9 - input_patch_10; trans_input_patch_3 = input_patch_1 - input_patch_3 - input_patch_9 + input_patch_11; trans_input_patch_4 = input_patch_4 - input_patch_6 + input_patch_8 - input_patch_10; trans_input_patch_5 = input_patch_5 + input_patch_6 + input_patch_9 + input_patch_10; trans_input_patch_6 = input_patch_6 - input_patch_5 - input_patch_9 + input_patch_10; trans_input_patch_7 = input_patch_5 - input_patch_7 + input_patch_9 - input_patch_11; trans_input_patch_8 = input_patch_6 - input_patch_4 + input_patch_8 - input_patch_10; trans_input_patch_9 = input_patch_9 - input_patch_6 - input_patch_5 + input_patch_10; trans_input_patch_10 = input_patch_5 - input_patch_6 - input_patch_9 + input_patch_10; trans_input_patch_11 = input_patch_7 - input_patch_5 + input_patch_9 - input_patch_11; trans_input_patch_12 = input_patch_4 - input_patch_6 - input_patch_12 + input_patch_14; trans_input_patch_13 = input_patch_5 + input_patch_6 - input_patch_13 - input_patch_14; trans_input_patch_14 = input_patch_6 - input_patch_5 + input_patch_13 - input_patch_14; trans_input_patch_15 = input_patch_5 - input_patch_7 - input_patch_13 + input_patch_15; int offset = f_b * nH * nW * C + (by * nW + bx) * C + t; int stride = B * nH * nW * C; Output [ 0 * stride + offset ] = trans_input_patch_0; Output [ 1 * stride + offset ] = trans_input_patch_1; Output [ 2 * stride + offset ] = trans_input_patch_2; Output [ 3 * stride + offset ] = trans_input_patch_3; Output [ 4 * stride + offset ] = trans_input_patch_4; Output [ 5 * stride + offset ] = trans_input_patch_5; Output [ 6 * stride + offset ] = trans_input_patch_6; Output [ 7 * stride + offset ] = trans_input_patch_7; Output [ 8 * stride + offset ] = trans_input_patch_8; Output [ 9 * stride + offset ] = trans_input_patch_9; Output [ 10* stride + offset ] = trans_input_patch_10; Output [ 11* stride + offset ] = trans_input_patch_11; Output [ 12* stride + offset ] = trans_input_patch_12; Output [ 13* stride + offset ] = trans_input_patch_13; Output [ 14* stride + offset ] = trans_input_patch_14; Output [ 15* stride + offset ] = trans_input_patch_15; } void Winograd2x2ImTransComputeLauncher(const float *Input, float *TransIm, int C, int B, int H, int W, int pad_h, int pad_w) { int n_patch_width = (W + 1 + 2 * pad_w - 4) / 2 + 1; int n_patch_height = (H + 1 + 2 * pad_h - 4) / 2 + 1; dim3 blockDim(C, 1, 1); dim3 gridDim(n_patch_width, n_patch_height, B); hipLaunchKernelGGL(( Winograd2x2ImTransCompute<float>), dim3(gridDim), dim3(blockDim), 0, 0, Input, TransIm, C, B, H, W, pad_h, pad_w); } void WinogradTransform(const float *input, const float *weights, float *output, int B,int H,int W,int pad_h,int pad_w, int C, int K) { // kernel_dim_; int nW = (W + 1) / 2; int nH = (H + 1) / 2; float *wTransInput; hipMalloc((void **)&wTransInput, 16* B* nH * nW * C* sizeof(float)); hipMemset(wTransInput,0, 16* B* nH * nW * C* sizeof(float)); Winograd2x2ImTransComputeLauncher(input, wTransInput, C, B, H, W,1,1); } void WinogradTransform(const double *input, const double *weights, double *output, int B,int H,int W,int pad_h,int pad_w, int C, int K) { } template<typename Dtype> void Winograd2x2TransLayer<Dtype>::compute_output_shape() { const int *kernel_shape_data = this->kernel_shape_.gpu_data(); const int *stride_data = this->stride_.gpu_data(); const int *pad_data = this->pad_.gpu_data(); const int *dilation_data = this->dilation_.gpu_data(); this->output_shape_.clear(); for (int i = 0; i < this->num_spatial_axes_; ++i) { // i + 1 to skip channel axis const int input_dim = this->input_shape(i + 1); const int kernel_extent = dilation_data[i] * (kernel_shape_data[i] - 1) + 1; const int output_dim = (input_dim + 2 * pad_data[i] - kernel_extent) / stride_data[i] + 1; this->output_shape_.push_back(output_dim); } } template<typename Dtype> void Winograd2x2TransLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { const Dtype *weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype *bottom_data = bottom[i]->gpu_data(); Dtype *top_data = top[i]->mutable_gpu_data(); int H,W,pad_h,pad_w,C; this->get_input_height(H); this->get_input_width(W); this->get_pad_height(pad_h); this->get_pad_width(pad_w); this->get_conv_in_channels(C); const int *kernel_shape_data = this->kernel_shape_.cpu_data(); //printf("B: %d \n", this->num_); //printf("C: %d \n", C); //printf("input_h: %d \n", H); //printf("input_w: %d \n", W); //printf("pad_h: %d \n", pad_h); //printf("pad_w: %d \n", pad_w); //printf("K: %d \n", kernel_shape_data[i]); WinogradTransform(bottom_data, weight, top_data, this->num_,H,W,pad_h,pad_w,C,kernel_shape_data[i]); } } template<typename Dtype> void Winograd2x2TransLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { } void WinogradGradientTransform(const float *input, const float *weights, float *output, int B,int H,int W,int pad_h,int pad_w, int C, int K) { // kernel_dim_; int nW = (W + 1) / 2; int nH = (H + 1) / 2; float *wTransInput; hipMalloc((void **)&wTransInput, 16* B* nH * nW * C* sizeof(float)); hipMemset(wTransInput,0, 16* B* nH * nW * C* sizeof(float)); Winograd2x2ImTransComputeLauncher(input, wTransInput, C, B, H, W,1,1); } void WinogradGradientTransform(const double *input, const double *weights, double *output, int B,int H,int W,int pad_h,int pad_w, int C, int K) { } // dim3 threadsPerBlock(C) // dim3 numBlocks(Batch, nH, nW) // O = (16, Batch, nH, nW, C) template <typename T> __global__ void OutputGradTransform(float *Output_grad, int C, int B, int H, int W, int pad_h, int pad_w) { int bx = blockIdx.x; // nw int by = blockIdx.y; // nh int bz = blockIdx.z; // b int tx = threadIdx.x; // c int nH = (H + 1) / 2; int nW = (W + 1) / 2; int offset_1 = bz * nH * nW * C + (by * nW + bx) * C + tx; int stride_1 = B * nH * nW * C; T trans_input_grad_patch_0 = Output_grad [ 0 * stride_1 + offset_1 ]; T trans_input_grad_patch_1 = Output_grad [ 1 * stride_1 + offset_1 ]; T trans_input_grad_patch_2 = Output_grad [ 2 * stride_1 + offset_1 ]; T trans_input_grad_patch_3 = Output_grad [ 3 * stride_1 + offset_1 ]; T trans_input_grad_patch_4 = Output_grad [ 4 * stride_1 + offset_1 ]; T trans_input_grad_patch_5 = Output_grad [ 5 * stride_1 + offset_1 ]; T trans_input_grad_patch_6 = Output_grad [ 6 * stride_1 + offset_1 ]; T trans_input_grad_patch_7 = Output_grad [ 7 * stride_1 + offset_1 ]; T trans_input_grad_patch_8 = Output_grad [ 8 * stride_1 + offset_1 ]; T trans_input_grad_patch_9 = Output_grad [ 9 * stride_1 + offset_1 ]; T trans_input_grad_patch_10= Output_grad [ 10* stride_1 + offset_1 ]; T trans_input_grad_patch_11= Output_grad [ 11* stride_1 + offset_1 ]; T trans_input_grad_patch_12= Output_grad [ 12* stride_1 + offset_1 ]; T trans_input_grad_patch_13= Output_grad [ 13* stride_1 + offset_1 ]; T trans_input_grad_patch_14= Output_grad [ 14* stride_1 + offset_1 ]; T trans_input_grad_patch_15= Output_grad [ 15* stride_1 + offset_1 ]; T input_grad_patch_0 = trans_input_grad_patch_0; T input_grad_patch_1 = trans_input_grad_patch_1 - trans_input_grad_patch_2 + trans_input_grad_patch_3; T input_grad_patch_2 = trans_input_grad_patch_1 - trans_input_grad_patch_0 + trans_input_grad_patch_2; T input_grad_patch_3 =-trans_input_grad_patch_3; T input_grad_patch_4 = trans_input_grad_patch_4 - trans_input_grad_patch_8 + trans_input_grad_patch_12; T input_grad_patch_5 = trans_input_grad_patch_5 - trans_input_grad_patch_6 + trans_input_grad_patch_7 - trans_input_grad_patch_9 + trans_input_grad_patch_10 - trans_input_grad_patch_11 + trans_input_grad_patch_13 - trans_input_grad_patch_14 + trans_input_grad_patch_15; T input_grad_patch_6 = trans_input_grad_patch_5 - trans_input_grad_patch_4 + trans_input_grad_patch_6 + trans_input_grad_patch_8 - trans_input_grad_patch_9 - trans_input_grad_patch_10 - trans_input_grad_patch_12 + trans_input_grad_patch_13 + trans_input_grad_patch_14; T input_grad_patch_7 = trans_input_grad_patch_11 - trans_input_grad_patch_7 - trans_input_grad_patch_15; T input_grad_patch_8 = trans_input_grad_patch_4 - trans_input_grad_patch_0 + trans_input_grad_patch_8; T input_grad_patch_9 = trans_input_grad_patch_2 - trans_input_grad_patch_1 - trans_input_grad_patch_3 + trans_input_grad_patch_5 - trans_input_grad_patch_6 + trans_input_grad_patch_7 + trans_input_grad_patch_9 - trans_input_grad_patch_10 + trans_input_grad_patch_11; T input_grad_patch_10= trans_input_grad_patch_0 - trans_input_grad_patch_1 - trans_input_grad_patch_2 - trans_input_grad_patch_4 + trans_input_grad_patch_5 + trans_input_grad_patch_6 - trans_input_grad_patch_8 + trans_input_grad_patch_9 + trans_input_grad_patch_10; T input_grad_patch_11= trans_input_grad_patch_3 - trans_input_grad_patch_7 - trans_input_grad_patch_11; T input_grad_patch_12=-trans_input_grad_patch_12; T input_grad_patch_13= trans_input_grad_patch_14 - trans_input_grad_patch_13 - trans_input_grad_patch_15; T input_grad_patch_14= trans_input_grad_patch_12 - trans_input_grad_patch_13 - trans_input_grad_patch_14; T input_grad_patch_15= trans_input_grad_patch_15; __syncthreads(); Output_grad [ 0 * stride_1 + offset_1 ] = input_grad_patch_0; Output_grad [ 1 * stride_1 + offset_1 ] = input_grad_patch_1; Output_grad [ 2 * stride_1 + offset_1 ] = input_grad_patch_2; Output_grad [ 3 * stride_1 + offset_1 ] = input_grad_patch_3; Output_grad [ 4 * stride_1 + offset_1 ] = input_grad_patch_4; Output_grad [ 5 * stride_1 + offset_1 ] = input_grad_patch_5; Output_grad [ 6 * stride_1 + offset_1 ] = input_grad_patch_6; Output_grad [ 7 * stride_1 + offset_1 ] = input_grad_patch_7; Output_grad [ 8 * stride_1 + offset_1 ] = input_grad_patch_8; Output_grad [ 9 * stride_1 + offset_1 ] = input_grad_patch_9; Output_grad [ 10* stride_1 + offset_1 ] = input_grad_patch_10; Output_grad [ 11* stride_1 + offset_1 ] = input_grad_patch_11; Output_grad [ 12* stride_1 + offset_1 ] = input_grad_patch_12; Output_grad [ 13* stride_1 + offset_1 ] = input_grad_patch_13; Output_grad [ 14* stride_1 + offset_1 ] = input_grad_patch_14; Output_grad [ 15* stride_1 + offset_1 ] = input_grad_patch_15; } // dim3 threadsPerBlock(C) // dim3 numBlocks(Batch, H, W) // I = (Batch, H, W, C) // O = (16, Batch, nH, nW, C) template <typename T> __global__ void Winograd2x2ImTransGradCompute(const float *Output_grad, float *Input_grad, int C, int B, int H, int W, int pad_h, int pad_w) { int bx = blockIdx.x; // w int by = blockIdx.y; // h int bz = blockIdx.z; // b int tx = threadIdx.x; // c int nH = (H + 1) / 2; int nW = (W + 1) / 2; int w_eff = bx + pad_w; int h_eff = by + pad_h; int w_col_start = (w_eff < 4) ? 0 : (w_eff - 4) / 2 + 1; int w_col_end = min(w_eff / 2 + 1, nW); int h_col_start = (h_eff < 4) ? 0 : (h_eff - 4) / 2 + 1; int h_col_end = min(h_eff / 2 + 1, nH); T val = 0; int offset = bz * nH * nW * C + tx; int stride = B * nH * nW * C; for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int w_offset = w_eff - w_col * 2; // within 16 int h_offset = h_eff - h_col * 2; // within 16 val += Output_grad [offset + (h_offset * 4 + w_offset) * stride + (h_col * nW + w_col) * C]; } } Input_grad[bz * H * W * C + by * W * C + bx * C + tx] = val; } void Winograd2x2ImTransGradComputeLauncher(const float *Output_grad, float *Input_grad, int C, int B, int H, int W, int pad_h, int pad_w) { int n_patch_width = (W + 1 + 2 * pad_w - 4) / 2 + 1; int n_patch_height = (H + 1 + 2 * pad_h - 4) / 2 + 1; // hipMemset(Input_grad, 0, sizeof(float) * B * C * H * W); hipLaunchKernelGGL(( OutputGradTransform<float>), dim3(dim3(n_patch_width, n_patch_height, B)), dim3(dim3(C, 1, 1)), 0, 0, (float*)Output_grad, C, B, H, W, pad_h, pad_w); // dim3 blockDim1(C, 1, 1); // dim3 gridDim1(n_patch_height, n_patch_width, B); hipLaunchKernelGGL(( Winograd2x2ImTransGradCompute<float>), dim3(dim3(W, H, B)), dim3(dim3(C, 1, 1)), 0, 0, Output_grad, Input_grad, C, B, H, W, pad_h, pad_w); } INSTANTIATE_LAYER_GPU_FUNCS(Winograd2x2TransLayer); } // namespace caffe
5b7e582dbd226af6cdea4a5363f89fbdc1455b11.cu
#include <vector> #include "caffe/blob.hpp" #include "caffe/layer.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/layers/wino_2x2_trans.hpp" namespace caffe { // dim3 threadsPerBlock(C) // dim3 numBlocks(Batch, nH, nW) // I = (Batch, H, W, C) // O = (16, Batch, nH, nW, C) template <typename T> __global__ void Winograd2x2ImTransCompute(const T *Input, T *Output, int C, int B, int H, int W, int pad_h, int pad_w) { int bx = blockIdx.x; // w int by = blockIdx.y; // h int bz = blockIdx.z; // b int t = threadIdx.x; // c int nW = (W + 1 + 2 * pad_w - 4) / 2 + 1; int nH = (H + 1 + 2 * pad_h - 4) / 2 + 1; int f_b = bz; int xBase = 2 * bx - pad_w; int yBase = 2 * by - pad_h; // T input_patch_1 [16] = {0}; T input_patch_0; T input_patch_1; T input_patch_2; T input_patch_3; T input_patch_4; T input_patch_5; T input_patch_6; T input_patch_7; T input_patch_8; T input_patch_9; T input_patch_10; T input_patch_11; T input_patch_12; T input_patch_13; T input_patch_14; T input_patch_15; // load (4, 4, 1) patch of input from global memory int f_x, f_y; f_x = xBase + 0; f_y = yBase + 0; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_0 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_0 = 0; f_x = xBase + 1; f_y = yBase + 0; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_1 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_1 = 0; f_x = xBase + 2; f_y = yBase + 0; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_2 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_2 = 0; f_x = xBase + 3; f_y = yBase + 0; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_3 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_3 = 0; f_x = xBase + 0; f_y = yBase + 1; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_4 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_4 = 0; f_x = xBase + 1; f_y = yBase + 1; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_5 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_5 = 0; f_x = xBase + 2; f_y = yBase + 1; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_6 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_6 = 0; f_x = xBase + 3; f_y = yBase + 1; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_7 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_7 = 0; f_x = xBase + 0; f_y = yBase + 2; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_8 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_8 = 0; f_x = xBase + 1; f_y = yBase + 2; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_9 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_9 = 0; f_x = xBase + 2; f_y = yBase + 2; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_10 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_10 = 0; f_x = xBase + 3; f_y = yBase + 2; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_11 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_11 = 0; f_x = xBase + 0; f_y = yBase + 3; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_12 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_12 = 0; f_x = xBase + 1; f_y = yBase + 3; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_13 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_13 = 0; f_x = xBase + 2; f_y = yBase + 3; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_14 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_14 = 0; f_x = xBase + 3; f_y = yBase + 3; if((f_x > -1) && (f_x < W) && (f_y > -1) && (f_y < H)) input_patch_15 = Input [ f_b * H * W * C + f_y * W * C + f_x * C + t ]; else input_patch_15 = 0; T trans_input_patch_0; T trans_input_patch_1; T trans_input_patch_2; T trans_input_patch_3; T trans_input_patch_4; T trans_input_patch_5; T trans_input_patch_6; T trans_input_patch_7; T trans_input_patch_8; T trans_input_patch_9; T trans_input_patch_10; T trans_input_patch_11; T trans_input_patch_12; T trans_input_patch_13; T trans_input_patch_14; T trans_input_patch_15; // Winograd Transform trans_input_patch_0 = input_patch_0 - input_patch_2 - input_patch_8 + input_patch_10; trans_input_patch_1 = input_patch_1 + input_patch_2 - input_patch_9 - input_patch_10; trans_input_patch_2 = input_patch_2 - input_patch_1 + input_patch_9 - input_patch_10; trans_input_patch_3 = input_patch_1 - input_patch_3 - input_patch_9 + input_patch_11; trans_input_patch_4 = input_patch_4 - input_patch_6 + input_patch_8 - input_patch_10; trans_input_patch_5 = input_patch_5 + input_patch_6 + input_patch_9 + input_patch_10; trans_input_patch_6 = input_patch_6 - input_patch_5 - input_patch_9 + input_patch_10; trans_input_patch_7 = input_patch_5 - input_patch_7 + input_patch_9 - input_patch_11; trans_input_patch_8 = input_patch_6 - input_patch_4 + input_patch_8 - input_patch_10; trans_input_patch_9 = input_patch_9 - input_patch_6 - input_patch_5 + input_patch_10; trans_input_patch_10 = input_patch_5 - input_patch_6 - input_patch_9 + input_patch_10; trans_input_patch_11 = input_patch_7 - input_patch_5 + input_patch_9 - input_patch_11; trans_input_patch_12 = input_patch_4 - input_patch_6 - input_patch_12 + input_patch_14; trans_input_patch_13 = input_patch_5 + input_patch_6 - input_patch_13 - input_patch_14; trans_input_patch_14 = input_patch_6 - input_patch_5 + input_patch_13 - input_patch_14; trans_input_patch_15 = input_patch_5 - input_patch_7 - input_patch_13 + input_patch_15; int offset = f_b * nH * nW * C + (by * nW + bx) * C + t; int stride = B * nH * nW * C; Output [ 0 * stride + offset ] = trans_input_patch_0; Output [ 1 * stride + offset ] = trans_input_patch_1; Output [ 2 * stride + offset ] = trans_input_patch_2; Output [ 3 * stride + offset ] = trans_input_patch_3; Output [ 4 * stride + offset ] = trans_input_patch_4; Output [ 5 * stride + offset ] = trans_input_patch_5; Output [ 6 * stride + offset ] = trans_input_patch_6; Output [ 7 * stride + offset ] = trans_input_patch_7; Output [ 8 * stride + offset ] = trans_input_patch_8; Output [ 9 * stride + offset ] = trans_input_patch_9; Output [ 10* stride + offset ] = trans_input_patch_10; Output [ 11* stride + offset ] = trans_input_patch_11; Output [ 12* stride + offset ] = trans_input_patch_12; Output [ 13* stride + offset ] = trans_input_patch_13; Output [ 14* stride + offset ] = trans_input_patch_14; Output [ 15* stride + offset ] = trans_input_patch_15; } void Winograd2x2ImTransComputeLauncher(const float *Input, float *TransIm, int C, int B, int H, int W, int pad_h, int pad_w) { int n_patch_width = (W + 1 + 2 * pad_w - 4) / 2 + 1; int n_patch_height = (H + 1 + 2 * pad_h - 4) / 2 + 1; dim3 blockDim(C, 1, 1); dim3 gridDim(n_patch_width, n_patch_height, B); Winograd2x2ImTransCompute<float><<<gridDim, blockDim>>>(Input, TransIm, C, B, H, W, pad_h, pad_w); } void WinogradTransform(const float *input, const float *weights, float *output, int B,int H,int W,int pad_h,int pad_w, int C, int K) { // kernel_dim_; int nW = (W + 1) / 2; int nH = (H + 1) / 2; float *wTransInput; cudaMalloc((void **)&wTransInput, 16* B* nH * nW * C* sizeof(float)); cudaMemset(wTransInput,0, 16* B* nH * nW * C* sizeof(float)); Winograd2x2ImTransComputeLauncher(input, wTransInput, C, B, H, W,1,1); } void WinogradTransform(const double *input, const double *weights, double *output, int B,int H,int W,int pad_h,int pad_w, int C, int K) { } template<typename Dtype> void Winograd2x2TransLayer<Dtype>::compute_output_shape() { const int *kernel_shape_data = this->kernel_shape_.gpu_data(); const int *stride_data = this->stride_.gpu_data(); const int *pad_data = this->pad_.gpu_data(); const int *dilation_data = this->dilation_.gpu_data(); this->output_shape_.clear(); for (int i = 0; i < this->num_spatial_axes_; ++i) { // i + 1 to skip channel axis const int input_dim = this->input_shape(i + 1); const int kernel_extent = dilation_data[i] * (kernel_shape_data[i] - 1) + 1; const int output_dim = (input_dim + 2 * pad_data[i] - kernel_extent) / stride_data[i] + 1; this->output_shape_.push_back(output_dim); } } template<typename Dtype> void Winograd2x2TransLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { const Dtype *weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype *bottom_data = bottom[i]->gpu_data(); Dtype *top_data = top[i]->mutable_gpu_data(); int H,W,pad_h,pad_w,C; this->get_input_height(H); this->get_input_width(W); this->get_pad_height(pad_h); this->get_pad_width(pad_w); this->get_conv_in_channels(C); const int *kernel_shape_data = this->kernel_shape_.cpu_data(); //printf("B: %d \n", this->num_); //printf("C: %d \n", C); //printf("input_h: %d \n", H); //printf("input_w: %d \n", W); //printf("pad_h: %d \n", pad_h); //printf("pad_w: %d \n", pad_w); //printf("K: %d \n", kernel_shape_data[i]); WinogradTransform(bottom_data, weight, top_data, this->num_,H,W,pad_h,pad_w,C,kernel_shape_data[i]); } } template<typename Dtype> void Winograd2x2TransLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { } void WinogradGradientTransform(const float *input, const float *weights, float *output, int B,int H,int W,int pad_h,int pad_w, int C, int K) { // kernel_dim_; int nW = (W + 1) / 2; int nH = (H + 1) / 2; float *wTransInput; cudaMalloc((void **)&wTransInput, 16* B* nH * nW * C* sizeof(float)); cudaMemset(wTransInput,0, 16* B* nH * nW * C* sizeof(float)); Winograd2x2ImTransComputeLauncher(input, wTransInput, C, B, H, W,1,1); } void WinogradGradientTransform(const double *input, const double *weights, double *output, int B,int H,int W,int pad_h,int pad_w, int C, int K) { } // dim3 threadsPerBlock(C) // dim3 numBlocks(Batch, nH, nW) // O = (16, Batch, nH, nW, C) template <typename T> __global__ void OutputGradTransform(float *Output_grad, int C, int B, int H, int W, int pad_h, int pad_w) { int bx = blockIdx.x; // nw int by = blockIdx.y; // nh int bz = blockIdx.z; // b int tx = threadIdx.x; // c int nH = (H + 1) / 2; int nW = (W + 1) / 2; int offset_1 = bz * nH * nW * C + (by * nW + bx) * C + tx; int stride_1 = B * nH * nW * C; T trans_input_grad_patch_0 = Output_grad [ 0 * stride_1 + offset_1 ]; T trans_input_grad_patch_1 = Output_grad [ 1 * stride_1 + offset_1 ]; T trans_input_grad_patch_2 = Output_grad [ 2 * stride_1 + offset_1 ]; T trans_input_grad_patch_3 = Output_grad [ 3 * stride_1 + offset_1 ]; T trans_input_grad_patch_4 = Output_grad [ 4 * stride_1 + offset_1 ]; T trans_input_grad_patch_5 = Output_grad [ 5 * stride_1 + offset_1 ]; T trans_input_grad_patch_6 = Output_grad [ 6 * stride_1 + offset_1 ]; T trans_input_grad_patch_7 = Output_grad [ 7 * stride_1 + offset_1 ]; T trans_input_grad_patch_8 = Output_grad [ 8 * stride_1 + offset_1 ]; T trans_input_grad_patch_9 = Output_grad [ 9 * stride_1 + offset_1 ]; T trans_input_grad_patch_10= Output_grad [ 10* stride_1 + offset_1 ]; T trans_input_grad_patch_11= Output_grad [ 11* stride_1 + offset_1 ]; T trans_input_grad_patch_12= Output_grad [ 12* stride_1 + offset_1 ]; T trans_input_grad_patch_13= Output_grad [ 13* stride_1 + offset_1 ]; T trans_input_grad_patch_14= Output_grad [ 14* stride_1 + offset_1 ]; T trans_input_grad_patch_15= Output_grad [ 15* stride_1 + offset_1 ]; T input_grad_patch_0 = trans_input_grad_patch_0; T input_grad_patch_1 = trans_input_grad_patch_1 - trans_input_grad_patch_2 + trans_input_grad_patch_3; T input_grad_patch_2 = trans_input_grad_patch_1 - trans_input_grad_patch_0 + trans_input_grad_patch_2; T input_grad_patch_3 =-trans_input_grad_patch_3; T input_grad_patch_4 = trans_input_grad_patch_4 - trans_input_grad_patch_8 + trans_input_grad_patch_12; T input_grad_patch_5 = trans_input_grad_patch_5 - trans_input_grad_patch_6 + trans_input_grad_patch_7 - trans_input_grad_patch_9 + trans_input_grad_patch_10 - trans_input_grad_patch_11 + trans_input_grad_patch_13 - trans_input_grad_patch_14 + trans_input_grad_patch_15; T input_grad_patch_6 = trans_input_grad_patch_5 - trans_input_grad_patch_4 + trans_input_grad_patch_6 + trans_input_grad_patch_8 - trans_input_grad_patch_9 - trans_input_grad_patch_10 - trans_input_grad_patch_12 + trans_input_grad_patch_13 + trans_input_grad_patch_14; T input_grad_patch_7 = trans_input_grad_patch_11 - trans_input_grad_patch_7 - trans_input_grad_patch_15; T input_grad_patch_8 = trans_input_grad_patch_4 - trans_input_grad_patch_0 + trans_input_grad_patch_8; T input_grad_patch_9 = trans_input_grad_patch_2 - trans_input_grad_patch_1 - trans_input_grad_patch_3 + trans_input_grad_patch_5 - trans_input_grad_patch_6 + trans_input_grad_patch_7 + trans_input_grad_patch_9 - trans_input_grad_patch_10 + trans_input_grad_patch_11; T input_grad_patch_10= trans_input_grad_patch_0 - trans_input_grad_patch_1 - trans_input_grad_patch_2 - trans_input_grad_patch_4 + trans_input_grad_patch_5 + trans_input_grad_patch_6 - trans_input_grad_patch_8 + trans_input_grad_patch_9 + trans_input_grad_patch_10; T input_grad_patch_11= trans_input_grad_patch_3 - trans_input_grad_patch_7 - trans_input_grad_patch_11; T input_grad_patch_12=-trans_input_grad_patch_12; T input_grad_patch_13= trans_input_grad_patch_14 - trans_input_grad_patch_13 - trans_input_grad_patch_15; T input_grad_patch_14= trans_input_grad_patch_12 - trans_input_grad_patch_13 - trans_input_grad_patch_14; T input_grad_patch_15= trans_input_grad_patch_15; __syncthreads(); Output_grad [ 0 * stride_1 + offset_1 ] = input_grad_patch_0; Output_grad [ 1 * stride_1 + offset_1 ] = input_grad_patch_1; Output_grad [ 2 * stride_1 + offset_1 ] = input_grad_patch_2; Output_grad [ 3 * stride_1 + offset_1 ] = input_grad_patch_3; Output_grad [ 4 * stride_1 + offset_1 ] = input_grad_patch_4; Output_grad [ 5 * stride_1 + offset_1 ] = input_grad_patch_5; Output_grad [ 6 * stride_1 + offset_1 ] = input_grad_patch_6; Output_grad [ 7 * stride_1 + offset_1 ] = input_grad_patch_7; Output_grad [ 8 * stride_1 + offset_1 ] = input_grad_patch_8; Output_grad [ 9 * stride_1 + offset_1 ] = input_grad_patch_9; Output_grad [ 10* stride_1 + offset_1 ] = input_grad_patch_10; Output_grad [ 11* stride_1 + offset_1 ] = input_grad_patch_11; Output_grad [ 12* stride_1 + offset_1 ] = input_grad_patch_12; Output_grad [ 13* stride_1 + offset_1 ] = input_grad_patch_13; Output_grad [ 14* stride_1 + offset_1 ] = input_grad_patch_14; Output_grad [ 15* stride_1 + offset_1 ] = input_grad_patch_15; } // dim3 threadsPerBlock(C) // dim3 numBlocks(Batch, H, W) // I = (Batch, H, W, C) // O = (16, Batch, nH, nW, C) template <typename T> __global__ void Winograd2x2ImTransGradCompute(const float *Output_grad, float *Input_grad, int C, int B, int H, int W, int pad_h, int pad_w) { int bx = blockIdx.x; // w int by = blockIdx.y; // h int bz = blockIdx.z; // b int tx = threadIdx.x; // c int nH = (H + 1) / 2; int nW = (W + 1) / 2; int w_eff = bx + pad_w; int h_eff = by + pad_h; int w_col_start = (w_eff < 4) ? 0 : (w_eff - 4) / 2 + 1; int w_col_end = min(w_eff / 2 + 1, nW); int h_col_start = (h_eff < 4) ? 0 : (h_eff - 4) / 2 + 1; int h_col_end = min(h_eff / 2 + 1, nH); T val = 0; int offset = bz * nH * nW * C + tx; int stride = B * nH * nW * C; for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int w_offset = w_eff - w_col * 2; // within 16 int h_offset = h_eff - h_col * 2; // within 16 val += Output_grad [offset + (h_offset * 4 + w_offset) * stride + (h_col * nW + w_col) * C]; } } Input_grad[bz * H * W * C + by * W * C + bx * C + tx] = val; } void Winograd2x2ImTransGradComputeLauncher(const float *Output_grad, float *Input_grad, int C, int B, int H, int W, int pad_h, int pad_w) { int n_patch_width = (W + 1 + 2 * pad_w - 4) / 2 + 1; int n_patch_height = (H + 1 + 2 * pad_h - 4) / 2 + 1; // cudaMemset(Input_grad, 0, sizeof(float) * B * C * H * W); OutputGradTransform<float><<<dim3(n_patch_width, n_patch_height, B), dim3(C, 1, 1)>>>((float*)Output_grad, C, B, H, W, pad_h, pad_w); // dim3 blockDim1(C, 1, 1); // dim3 gridDim1(n_patch_height, n_patch_width, B); Winograd2x2ImTransGradCompute<float><<<dim3(W, H, B), dim3(C, 1, 1)>>>(Output_grad, Input_grad, C, B, H, W, pad_h, pad_w); } INSTANTIATE_LAYER_GPU_FUNCS(Winograd2x2TransLayer); } // namespace caffe
df7ae67cc6fd4f6f2e71c4b75279e23b822b4757.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> __global__ void fun(int *y){ int a = 5; *(&a+1); printf("%d\n", *(&a+1)); } int main(void) { int y; int *dev_y; hipMalloc((void**)&dev_y, sizeof(int)); hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_y); hipMemcpy(&y, dev_y, sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_y); return 0; } // 5
df7ae67cc6fd4f6f2e71c4b75279e23b822b4757.cu
#include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> __global__ void fun(int *y){ int a = 5; *(&a+1); printf("%d\n", *(&a+1)); } int main(void) { int y; int *dev_y; cudaMalloc((void**)&dev_y, sizeof(int)); fun<<<1,1>>>(dev_y); cudaMemcpy(&y, dev_y, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_y); return 0; } //编译通过 5;
706efb2be0b9eb0831700224cb733b5a0dbff870.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> #include <iostream> #include <device_launch_parameters.h> /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ __global__ void exclusive_scan_add( const unsigned int * const values, unsigned int * const output, unsigned int size ) { extern __shared__ unsigned int exclusive_scan_smem[]; int id = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; if ( id < size ) exclusive_scan_smem[tid] = values[id]; else exclusive_scan_smem[tid] = 0; __syncthreads(); //Reduce for ( unsigned int s = 1; s < blockDim.x; s <<= 1 ) { if ( (tid+1) % s == 0 && ((tid+1)/s) % 2 == 0) exclusive_scan_smem[tid] = exclusive_scan_smem[tid] + exclusive_scan_smem[tid-s]; __syncthreads(); } if (tid == (blockDim.x - 1)) exclusive_scan_smem[tid] = 0; __syncthreads(); //Downsweep for ( unsigned int s = blockDim.x; s >= 1; s >>= 1 ) { if ( (tid+1) % s == 0 && ((tid+1)/s) % 2 == 0) { unsigned int tmp = exclusive_scan_smem[tid]; exclusive_scan_smem[tid] = exclusive_scan_smem[tid] + exclusive_scan_smem[tid - s]; exclusive_scan_smem[tid-s] = tmp; } __syncthreads(); } if ( tid < size ) output[tid] = exclusive_scan_smem[tid] + values[tid]; } __global__ void exclusive_scan_add_reduce( const unsigned int * const values, unsigned int *output, unsigned int *output_reduced, unsigned int size) { extern __shared__ unsigned int smem[]; int tid = threadIdx.x; int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < size ) smem[tid] = values[id]; else smem[tid] = 0; __syncthreads(); //Reduce for ( unsigned int s = 1; s < blockDim.x; s <<= 1 ) { if ( (tid+1) % s == 0 && ((tid+1)/s) % 2 == 0) { smem[tid] = smem[tid] + smem[tid - s]; } __syncthreads(); } if (tid == blockDim.x - 1) { smem[tid] = 0; } __syncthreads(); //Downsweep for ( unsigned int s = blockDim.x; s >= 1; s >>= 1 ) { if ( (tid+1) % s == 0 && ((tid+1)/s) % 2 == 0) { unsigned int tmp = smem[tid]; smem[tid] = smem[tid] + smem[tid - s]; smem[tid - s] = tmp; } __syncthreads(); } if ( id < size ) output[id] = smem[tid]; if ( tid == blockDim.x - 1 ) { if ( id < size ) { output_reduced[blockIdx.x] = smem[tid] + values[id]; } else { output_reduced[blockIdx.x] = smem[tid]; } } } __global__ void exclusive_scan_add_merge( unsigned int * scan, unsigned int * reduce, unsigned int size ) { if ( blockIdx.x > 0 ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < size ) scan[tid] = scan[tid] + reduce[blockIdx.x - 1]; } } __global__ void sort_update_position( unsigned int *predicate, unsigned int *address, unsigned int *input_values, unsigned int *input_pos, unsigned int *output_values, unsigned int *output_pos, unsigned int *offset, unsigned int num_items, bool reset_offset ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < num_items ) { if ( predicate[tid] == 1 ) { if ( !reset_offset ) { output_values[address[tid]] = input_values[tid]; output_pos[address[tid]] = input_pos[tid]; } else { output_values[address[tid] + *offset] = input_values[tid]; output_pos[address[tid] + *offset] = input_pos[tid]; } } } __syncthreads(); if (tid == num_items - 1 ) { if ( !reset_offset ) { *offset = address[tid] + predicate[tid]; } else *offset = 0; } } __global__ void predicate_zero_or_one_at_position( bool is_one, unsigned int position, unsigned int *const values, unsigned int *output_predicate, unsigned int num_items) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if ( tid < num_items ) { output_predicate[tid] = ((is_one && (values[tid] & (1 << position))) || (!is_one && !(values[tid] & (1 << position)))) ? 1 : 0; } __syncthreads(); } void your_sort(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t numElems) { size_t num_threads = 1024; size_t num_blocks = static_cast<size_t>(ceil(static_cast<float>(numElems)/static_cast<float>(num_threads))); unsigned int * d_predicate; checkCudaErrors(hipMalloc(&d_predicate, sizeof(unsigned int) * numElems)); unsigned int * d_reduce; checkCudaErrors(hipMalloc(&d_reduce, sizeof(unsigned int) * num_blocks)); unsigned int * d_scan_reduce; checkCudaErrors(hipMalloc(&d_scan_reduce, sizeof(unsigned int) * num_blocks)); unsigned int * d_scan; checkCudaErrors(hipMalloc(&d_scan, sizeof(unsigned int) * numElems)); unsigned int * d_offset; checkCudaErrors(hipMalloc(&d_offset, sizeof(unsigned int))); bool input_is_input = true; for ( unsigned int bit = 0; bit < sizeof(unsigned int) * 8; ++bit ) { unsigned int *const d_input_values = (input_is_input) ? d_inputVals : d_outputVals; unsigned int *const d_input_pos = (input_is_input) ? d_inputPos : d_outputPos; unsigned int *const d_output_values = (!input_is_input) ? d_inputVals : d_outputVals; unsigned int *const d_output_pos = (!input_is_input) ? d_inputPos : d_outputPos; //Bits Zero hipLaunchKernelGGL(( predicate_zero_or_one_at_position), dim3(num_blocks),dim3(num_threads), 0, 0, false, bit, d_input_values, d_predicate, numElems); hipLaunchKernelGGL(( exclusive_scan_add_reduce), dim3(num_blocks), dim3(num_threads), num_threads * sizeof(unsigned int), 0, d_predicate, d_scan, d_reduce, numElems); hipLaunchKernelGGL(( exclusive_scan_add), dim3(1), dim3(num_threads), num_threads* sizeof(unsigned int), 0, d_reduce, d_scan_reduce, num_blocks); hipLaunchKernelGGL(( exclusive_scan_add_merge), dim3(num_blocks), dim3(num_threads), 0, 0, d_scan, d_scan_reduce, numElems); hipLaunchKernelGGL(( sort_update_position), dim3(num_blocks), dim3(num_threads), 0, 0, d_predicate, d_scan, d_input_values, d_input_pos, d_output_values, d_output_pos, d_offset, numElems, false); //Bits One hipLaunchKernelGGL(( predicate_zero_or_one_at_position), dim3(num_blocks),dim3(num_threads), 0, 0, true, bit, d_input_values, d_predicate, numElems); hipLaunchKernelGGL(( exclusive_scan_add_reduce), dim3(num_blocks), dim3(num_threads), num_threads * sizeof(unsigned int), 0, d_predicate, d_scan, d_reduce, numElems); hipLaunchKernelGGL(( exclusive_scan_add), dim3(1), dim3(num_threads), num_threads* sizeof(unsigned int), 0, d_reduce, d_scan_reduce, num_blocks); hipLaunchKernelGGL(( exclusive_scan_add_merge), dim3(num_blocks), dim3(num_threads), 0, 0, d_scan, d_scan_reduce, numElems); hipLaunchKernelGGL(( sort_update_position), dim3(num_blocks), dim3(num_threads), 0, 0, d_predicate, d_scan, d_input_values, d_input_pos, d_output_values, d_output_pos, d_offset, numElems, true); input_is_input = !input_is_input; } if ( input_is_input ) { hipMemcpy(d_outputVals, d_inputVals, sizeof(unsigned int) * numElems, hipMemcpyDeviceToDevice); hipMemcpy(d_outputPos, d_inputPos, sizeof(unsigned int) * numElems, hipMemcpyDeviceToDevice); } checkCudaErrors(hipFree(d_predicate)); checkCudaErrors(hipFree(d_reduce)); checkCudaErrors(hipFree(d_scan_reduce)); checkCudaErrors(hipFree(d_scan)); checkCudaErrors(hipFree(d_offset)); }
706efb2be0b9eb0831700224cb733b5a0dbff870.cu
//Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> #include <iostream> #include <device_launch_parameters.h> /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ __global__ void exclusive_scan_add( const unsigned int * const values, unsigned int * const output, unsigned int size ) { extern __shared__ unsigned int exclusive_scan_smem[]; int id = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; if ( id < size ) exclusive_scan_smem[tid] = values[id]; else exclusive_scan_smem[tid] = 0; __syncthreads(); //Reduce for ( unsigned int s = 1; s < blockDim.x; s <<= 1 ) { if ( (tid+1) % s == 0 && ((tid+1)/s) % 2 == 0) exclusive_scan_smem[tid] = exclusive_scan_smem[tid] + exclusive_scan_smem[tid-s]; __syncthreads(); } if (tid == (blockDim.x - 1)) exclusive_scan_smem[tid] = 0; __syncthreads(); //Downsweep for ( unsigned int s = blockDim.x; s >= 1; s >>= 1 ) { if ( (tid+1) % s == 0 && ((tid+1)/s) % 2 == 0) { unsigned int tmp = exclusive_scan_smem[tid]; exclusive_scan_smem[tid] = exclusive_scan_smem[tid] + exclusive_scan_smem[tid - s]; exclusive_scan_smem[tid-s] = tmp; } __syncthreads(); } if ( tid < size ) output[tid] = exclusive_scan_smem[tid] + values[tid]; } __global__ void exclusive_scan_add_reduce( const unsigned int * const values, unsigned int *output, unsigned int *output_reduced, unsigned int size) { extern __shared__ unsigned int smem[]; int tid = threadIdx.x; int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < size ) smem[tid] = values[id]; else smem[tid] = 0; __syncthreads(); //Reduce for ( unsigned int s = 1; s < blockDim.x; s <<= 1 ) { if ( (tid+1) % s == 0 && ((tid+1)/s) % 2 == 0) { smem[tid] = smem[tid] + smem[tid - s]; } __syncthreads(); } if (tid == blockDim.x - 1) { smem[tid] = 0; } __syncthreads(); //Downsweep for ( unsigned int s = blockDim.x; s >= 1; s >>= 1 ) { if ( (tid+1) % s == 0 && ((tid+1)/s) % 2 == 0) { unsigned int tmp = smem[tid]; smem[tid] = smem[tid] + smem[tid - s]; smem[tid - s] = tmp; } __syncthreads(); } if ( id < size ) output[id] = smem[tid]; if ( tid == blockDim.x - 1 ) { if ( id < size ) { output_reduced[blockIdx.x] = smem[tid] + values[id]; } else { output_reduced[blockIdx.x] = smem[tid]; } } } __global__ void exclusive_scan_add_merge( unsigned int * scan, unsigned int * reduce, unsigned int size ) { if ( blockIdx.x > 0 ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < size ) scan[tid] = scan[tid] + reduce[blockIdx.x - 1]; } } __global__ void sort_update_position( unsigned int *predicate, unsigned int *address, unsigned int *input_values, unsigned int *input_pos, unsigned int *output_values, unsigned int *output_pos, unsigned int *offset, unsigned int num_items, bool reset_offset ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < num_items ) { if ( predicate[tid] == 1 ) { if ( !reset_offset ) { output_values[address[tid]] = input_values[tid]; output_pos[address[tid]] = input_pos[tid]; } else { output_values[address[tid] + *offset] = input_values[tid]; output_pos[address[tid] + *offset] = input_pos[tid]; } } } __syncthreads(); if (tid == num_items - 1 ) { if ( !reset_offset ) { *offset = address[tid] + predicate[tid]; } else *offset = 0; } } __global__ void predicate_zero_or_one_at_position( bool is_one, unsigned int position, unsigned int *const values, unsigned int *output_predicate, unsigned int num_items) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if ( tid < num_items ) { output_predicate[tid] = ((is_one && (values[tid] & (1 << position))) || (!is_one && !(values[tid] & (1 << position)))) ? 1 : 0; } __syncthreads(); } void your_sort(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t numElems) { size_t num_threads = 1024; size_t num_blocks = static_cast<size_t>(ceil(static_cast<float>(numElems)/static_cast<float>(num_threads))); unsigned int * d_predicate; checkCudaErrors(cudaMalloc(&d_predicate, sizeof(unsigned int) * numElems)); unsigned int * d_reduce; checkCudaErrors(cudaMalloc(&d_reduce, sizeof(unsigned int) * num_blocks)); unsigned int * d_scan_reduce; checkCudaErrors(cudaMalloc(&d_scan_reduce, sizeof(unsigned int) * num_blocks)); unsigned int * d_scan; checkCudaErrors(cudaMalloc(&d_scan, sizeof(unsigned int) * numElems)); unsigned int * d_offset; checkCudaErrors(cudaMalloc(&d_offset, sizeof(unsigned int))); bool input_is_input = true; for ( unsigned int bit = 0; bit < sizeof(unsigned int) * 8; ++bit ) { unsigned int *const d_input_values = (input_is_input) ? d_inputVals : d_outputVals; unsigned int *const d_input_pos = (input_is_input) ? d_inputPos : d_outputPos; unsigned int *const d_output_values = (!input_is_input) ? d_inputVals : d_outputVals; unsigned int *const d_output_pos = (!input_is_input) ? d_inputPos : d_outputPos; //Bits Zero predicate_zero_or_one_at_position<<<num_blocks,num_threads>>>(false, bit, d_input_values, d_predicate, numElems); exclusive_scan_add_reduce<<<num_blocks, num_threads, num_threads * sizeof(unsigned int)>>>(d_predicate, d_scan, d_reduce, numElems); exclusive_scan_add<<<1, num_threads, num_threads* sizeof(unsigned int)>>>(d_reduce, d_scan_reduce, num_blocks); exclusive_scan_add_merge<<<num_blocks, num_threads>>>(d_scan, d_scan_reduce, numElems); sort_update_position<<<num_blocks, num_threads>>>(d_predicate, d_scan, d_input_values, d_input_pos, d_output_values, d_output_pos, d_offset, numElems, false); //Bits One predicate_zero_or_one_at_position<<<num_blocks,num_threads>>>(true, bit, d_input_values, d_predicate, numElems); exclusive_scan_add_reduce<<<num_blocks, num_threads, num_threads * sizeof(unsigned int)>>>(d_predicate, d_scan, d_reduce, numElems); exclusive_scan_add<<<1, num_threads, num_threads* sizeof(unsigned int)>>>(d_reduce, d_scan_reduce, num_blocks); exclusive_scan_add_merge<<<num_blocks, num_threads>>>(d_scan, d_scan_reduce, numElems); sort_update_position<<<num_blocks, num_threads>>>(d_predicate, d_scan, d_input_values, d_input_pos, d_output_values, d_output_pos, d_offset, numElems, true); input_is_input = !input_is_input; } if ( input_is_input ) { cudaMemcpy(d_outputVals, d_inputVals, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToDevice); cudaMemcpy(d_outputPos, d_inputPos, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToDevice); } checkCudaErrors(cudaFree(d_predicate)); checkCudaErrors(cudaFree(d_reduce)); checkCudaErrors(cudaFree(d_scan_reduce)); checkCudaErrors(cudaFree(d_scan)); checkCudaErrors(cudaFree(d_offset)); }
02bfe7dd400f7d6d6bc1c4cde7dcf1c46cd8958b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ---------------------------------------------------------------------------- // This source file is part of BehaveRT // http://isis.dia.unisa.it/projects/behavert/ // // Copyright (c) 2008-2010 ISISLab - University of Salerno // Original author: Bernardino Frola <frola@dia.unisa.it> // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // ---------------- // Change log // // 01-09 bf: Created // // ---------------- #pragma once #include <cutil.h> #include <stdio.h> #include <math.h> #include "cutil_math.h" #include "math_constants.h" #include "common_resources.cu" // Same plugIn dependencies #include "include\drawable3d_kernel.cuh" #include "drawable3d_resources.cu" // Other plugIn dependencies #include "..\EnvGrid3D\include\envgrid3d_kernel.cuh" #include "..\EnvGrid3D\envgrid3d_resources.cu" #include "..\Proximity3D\include\Proximity3D_kernel.cuh" #include "..\Proximity3D\Proximity3D_resources.cu" #include "..\Body\include\body3d_kernel.cuh" #include "..\Body\body3d_resources.cu" #include "..\OpenSteerWrapper\include\OpenSteerWrapper_kernel.cuh" #include "..\OpenSteerWrapper\OpenSteerWrapper_resources.cu" __global__ void extractColorFromNeighborhoodD() { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; const int neighIndexBase = __mul24(index, dProximity3DParams.numNeighWordsPerAgent); uint4 neighWord = tex1Dfetch(oldNeighListTex, neighIndexBase); //BehaveRT::uint neighNum = neighWord.x; uint2 sortedData = tex1Dfetch(agentHashTex, index); declare_output(color, float4, dDrawable3DFields.color); float colorFactor = ((float)neighWord.x) / dProximity3DParams.maxNeighbors; // Write the result in an ordered fashion float4 colorFinal = make_float4( dDrawable3DParams.colorBase.x + colorFactor , dDrawable3DParams.colorBase.y + 0.3 + 1 - colorFactor, dDrawable3DParams.colorBase.z + colorFactor * (1 - colorFactor), 1); if (dDrawable3DParams.useCUDAGeometry) { color[sortedData.y * dDrawable3DParams.numVertexes] = colorFinal - make_float4(0.5, 0.5, 0.5, 0); color[sortedData.y * dDrawable3DParams.numVertexes + 1] = colorFinal; color[sortedData.y * dDrawable3DParams.numVertexes + 2] = colorFinal; } else color[sortedData.y] = colorFinal; } __global__ void smoothColorD() { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; float4 targetColor = tex1Dfetch(colorTex, index); float4 oldSmoothedColor = tex1Dfetch(smoothedColorTex, index); uint2 sortedData = tex1Dfetch(agentHashTex, index); if (calcDist4(targetColor, oldSmoothedColor) < 0.1) return; declare_output(smoothedColor, float4, dDrawable3DFields.smoothedColor); smoothedColor[index] = oldSmoothedColor + (targetColor - oldSmoothedColor) / 150; declare_output(newTargetColor, float4, dDrawable3DFields.color); newTargetColor[index] = targetColor; } __global__ void createGeometryD() { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; uint2 sortedData = tex1Dfetch(agentHashTex, index); float4 pos = tex1Dfetch(oldPosTex, index); float4 forward = tex1Dfetch(oldForwardTex, index); declare_output(geometry, float4, dDrawable3DFields.geometry); geometry[sortedData.y * 3] = pos; geometry[sortedData.y * 3 + 1] = make_float4( make_float3(pos) - perpendicularComponent(make_float3(forward) * dBody3DParams.commonRadius * 2 + make_float3(forward) * dBody3DParams.commonRadius, make_float3(0, 1, 0)), pos.w); geometry[sortedData.y * 3 + 2] = make_float4( make_float3(pos) + make_float3(forward) * dBody3DParams.commonRadius * 4, pos.w); } // ---------------------------------------------------- // ---------------------------------------------------- __device__ float distanceFromLine (const float3 point, const float3 lineOrigin, const float3 lineUnitTangent) { const float3 offset = point - lineOrigin; const float3 perp = perpendicularComponent (offset, lineUnitTangent); return length(perp); } // ---------------------------------------------------- // ---------------------------------------------------- __global__ void computeMouseDistance_kernel() { int individualIndex = BehaveRT::getIndividualIndex(); float3 position = make_float3( getInputFeatureCachedElement(oldPosTex, individualIndex)); float distance = distanceFromLine( position, dDrawable3DParams.cameraPosition, dDrawable3DParams.mouseDirection); //uint sortedIndex = FETCH(agentHash, individualIndex).y; // Store the distance into the output array BehaveRT::setOutputFeatureElement<float>( dDrawable3DFields.mouseDistance, individualIndex, distance); } // ---------------------------------------------------- // ---------------------------------------------------- __global__ void dummyKernel() { // Do nothing } // ---------------------------------------------------- extern "C" { // //////////////////////////////////////////////////////////////////////// // Global vars // //////////////////////////////////////////////////////////////////////// // Export kernels BehaveRT::genericKernelFuncPointer extractColorFromNeighborhoodDRef() { return &extractColorFromNeighborhoodD; } BehaveRT::genericKernelFuncPointer smoothColorDRef() { return &smoothColorD; } BehaveRT::genericKernelFuncPointer createGeometryDRef() { return &createGeometryD; } BehaveRT::genericKernelFuncPointer dummyKernelRef() { return &dummyKernel; } BehaveRT_exportKernel(computeMouseDistance_kernel); void Drawable3D::Drawable3D_beforeKernelCall() { OpenSteerWrapper::OpenSteerWrapper_beforeKernelCall(); bind_field_texture(hDrawable3DFields.color, colorTex); bind_field_texture(hDrawable3DFields.smoothedColor, smoothedColorTex); } void Drawable3D::Drawable3D_afterKernelCall() { OpenSteerWrapper::OpenSteerWrapper_afterKernelCall(); unbind_field_texture(colorTex); unbind_field_texture(smoothedColorTex); } void Drawable3D::Drawable3D_copyFieldsToDevice() { CUDA_SAFE_CALL( hipMemcpyToSymbol(dDrawable3DFields, &hDrawable3DFields, sizeof(Drawable3DFields)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(dDrawable3DParams, &hDrawable3DParams, sizeof(Drawable3DParams)) ); } }
02bfe7dd400f7d6d6bc1c4cde7dcf1c46cd8958b.cu
// ---------------------------------------------------------------------------- // This source file is part of BehaveRT // http://isis.dia.unisa.it/projects/behavert/ // // Copyright (c) 2008-2010 ISISLab - University of Salerno // Original author: Bernardino Frola <frola@dia.unisa.it> // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // ---------------- // Change log // // 01-09 bf: Created // // ---------------- #pragma once #include <cutil.h> #include <stdio.h> #include <math.h> #include "cutil_math.h" #include "math_constants.h" #include "common_resources.cu" // Same plugIn dependencies #include "include\drawable3d_kernel.cuh" #include "drawable3d_resources.cu" // Other plugIn dependencies #include "..\EnvGrid3D\include\envgrid3d_kernel.cuh" #include "..\EnvGrid3D\envgrid3d_resources.cu" #include "..\Proximity3D\include\Proximity3D_kernel.cuh" #include "..\Proximity3D\Proximity3D_resources.cu" #include "..\Body\include\body3d_kernel.cuh" #include "..\Body\body3d_resources.cu" #include "..\OpenSteerWrapper\include\OpenSteerWrapper_kernel.cuh" #include "..\OpenSteerWrapper\OpenSteerWrapper_resources.cu" __global__ void extractColorFromNeighborhoodD() { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; const int neighIndexBase = __mul24(index, dProximity3DParams.numNeighWordsPerAgent); uint4 neighWord = tex1Dfetch(oldNeighListTex, neighIndexBase); //BehaveRT::uint neighNum = neighWord.x; uint2 sortedData = tex1Dfetch(agentHashTex, index); declare_output(color, float4, dDrawable3DFields.color); float colorFactor = ((float)neighWord.x) / dProximity3DParams.maxNeighbors; // Write the result in an ordered fashion float4 colorFinal = make_float4( dDrawable3DParams.colorBase.x + colorFactor , dDrawable3DParams.colorBase.y + 0.3 + 1 - colorFactor, dDrawable3DParams.colorBase.z + colorFactor * (1 - colorFactor), 1); if (dDrawable3DParams.useCUDAGeometry) { color[sortedData.y * dDrawable3DParams.numVertexes] = colorFinal - make_float4(0.5, 0.5, 0.5, 0); color[sortedData.y * dDrawable3DParams.numVertexes + 1] = colorFinal; color[sortedData.y * dDrawable3DParams.numVertexes + 2] = colorFinal; } else color[sortedData.y] = colorFinal; } __global__ void smoothColorD() { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; float4 targetColor = tex1Dfetch(colorTex, index); float4 oldSmoothedColor = tex1Dfetch(smoothedColorTex, index); uint2 sortedData = tex1Dfetch(agentHashTex, index); if (calcDist4(targetColor, oldSmoothedColor) < 0.1) return; declare_output(smoothedColor, float4, dDrawable3DFields.smoothedColor); smoothedColor[index] = oldSmoothedColor + (targetColor - oldSmoothedColor) / 150; declare_output(newTargetColor, float4, dDrawable3DFields.color); newTargetColor[index] = targetColor; } __global__ void createGeometryD() { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; uint2 sortedData = tex1Dfetch(agentHashTex, index); float4 pos = tex1Dfetch(oldPosTex, index); float4 forward = tex1Dfetch(oldForwardTex, index); declare_output(geometry, float4, dDrawable3DFields.geometry); geometry[sortedData.y * 3] = pos; geometry[sortedData.y * 3 + 1] = make_float4( make_float3(pos) - perpendicularComponent(make_float3(forward) * dBody3DParams.commonRadius * 2 + make_float3(forward) * dBody3DParams.commonRadius, make_float3(0, 1, 0)), pos.w); geometry[sortedData.y * 3 + 2] = make_float4( make_float3(pos) + make_float3(forward) * dBody3DParams.commonRadius * 4, pos.w); } // ---------------------------------------------------- // ---------------------------------------------------- __device__ float distanceFromLine (const float3 point, const float3 lineOrigin, const float3 lineUnitTangent) { const float3 offset = point - lineOrigin; const float3 perp = perpendicularComponent (offset, lineUnitTangent); return length(perp); } // ---------------------------------------------------- // ---------------------------------------------------- __global__ void computeMouseDistance_kernel() { int individualIndex = BehaveRT::getIndividualIndex(); float3 position = make_float3( getInputFeatureCachedElement(oldPosTex, individualIndex)); float distance = distanceFromLine( position, dDrawable3DParams.cameraPosition, dDrawable3DParams.mouseDirection); //uint sortedIndex = FETCH(agentHash, individualIndex).y; // Store the distance into the output array BehaveRT::setOutputFeatureElement<float>( dDrawable3DFields.mouseDistance, individualIndex, distance); } // ---------------------------------------------------- // ---------------------------------------------------- __global__ void dummyKernel() { // Do nothing } // ---------------------------------------------------- extern "C" { // //////////////////////////////////////////////////////////////////////// // Global vars // //////////////////////////////////////////////////////////////////////// // Export kernels BehaveRT::genericKernelFuncPointer extractColorFromNeighborhoodDRef() { return &extractColorFromNeighborhoodD; } BehaveRT::genericKernelFuncPointer smoothColorDRef() { return &smoothColorD; } BehaveRT::genericKernelFuncPointer createGeometryDRef() { return &createGeometryD; } BehaveRT::genericKernelFuncPointer dummyKernelRef() { return &dummyKernel; } BehaveRT_exportKernel(computeMouseDistance_kernel); void Drawable3D::Drawable3D_beforeKernelCall() { OpenSteerWrapper::OpenSteerWrapper_beforeKernelCall(); bind_field_texture(hDrawable3DFields.color, colorTex); bind_field_texture(hDrawable3DFields.smoothedColor, smoothedColorTex); } void Drawable3D::Drawable3D_afterKernelCall() { OpenSteerWrapper::OpenSteerWrapper_afterKernelCall(); unbind_field_texture(colorTex); unbind_field_texture(smoothedColorTex); } void Drawable3D::Drawable3D_copyFieldsToDevice() { CUDA_SAFE_CALL( cudaMemcpyToSymbol(dDrawable3DFields, &hDrawable3DFields, sizeof(Drawable3DFields)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(dDrawable3DParams, &hDrawable3DParams, sizeof(Drawable3DParams)) ); } }
712cb1c56b0a01245f797f3def56bbf3de66a975.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> //----------------------------------------------------------------------------- // GpuMirroredInt: a struct holding mirrored int data on both the CPU and the // GPU. Functions below will operate on this struct // (because this isn't a workshop on C++) //----------------------------------------------------------------------------- struct GpuMirroredInt { int len; // Length of the array (again, this is not a C++ course) int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers. // In fact, if non-pinned memory is transferred to the // GPU from the host, a temporary allocation of pinned // memory will be created and then destroyed. Pinned // memory is not host-pageable, but the only performance // implication is that creating lots of pinned memory // may make it harder for the host OS to manage large // memory jobs. int* HostData; // Pointer to allocated memory on the host int* DevcData; // Pointer to allocated memory on the GPU. Note that the // host can know what the address of memory on the GPU // is, but it cannot simply de-reference that pointer // in host code. }; typedef struct GpuMirroredInt gpuInt; //----------------------------------------------------------------------------- // WorkbenchKernel: accept data from the host and perform operations on it //----------------------------------------------------------------------------- __global__ void WorkbenchKernel(int* devInts) { int i; // Print from the GPU to show that it sees a copy of the data uploaded // from the host. Note that warp synchronicity is no longer guaranteed // in CUDA 9.0 and later versions (we're now on 10.1), which creates the // need for all the __syncwarp() calls. if (threadIdx.x == 0) { printf("The device sees = [\n"); } __syncwarp(); for (i = 0; i < 16; i++) { if (threadIdx.x == i) { printf(" %3d", devInts[threadIdx.x]); } // Synchronization calls (__syncwarp across 32 threads in a warp or // __syncthreads across all threads in the block--this block just // happens to have only 32 threads) by definition require that all // affected threads reach them. Therefore, they cannot be called // from within conditional statements that exclude any relevant // threads, i.e. threadIdx.x == some number, seen above. __syncwarp(); } if (threadIdx.x == 0) { printf(" [index 0-15]\n"); } __syncwarp(); for (i = 16; i < 32; i++) { if (threadIdx.x == i) { printf(" %3d", devInts[threadIdx.x]); } __syncwarp(); } if (threadIdx.x == 0) { printf(" [index 16-31]\n];\n"); } __syncthreads(); // Do some work on the data that can then be inspected on the CPU. // Remember, (x & (2^N - 1)) is mod(x, 2^N). devInts[threadIdx.x] += (threadIdx.x & 7) * threadIdx.x; } //----------------------------------------------------------------------------- // CreateGpuInt: constructor function for allocating memory in a gpuInt // instance. // // Arguments: // len: the length of array to allocate // pin: flag to have the memory pinned (non-pageable on the host side // for optimal transfer speed ot the device) //----------------------------------------------------------------------------- gpuInt CreateGpuInt(int len, int pin) { gpuInt G; G.len = len; G.IsPinned = pin; // Now that the official length is recorded, upgrade the real length // to the next convenient multiple of 128, so as to always allocate // GPU memory in 512-byte blocks. This is for alignment purposes, // and keeping host to device transfers in line. len = ((len + 127) / 128) * 128; if (pin == 1) { hipHostMalloc((void **)&G.HostData, len * sizeof(int), hipHostMallocMapped); } else { G.HostData = (int*)malloc(len * sizeof(int)); } hipMalloc((void **)&G.DevcData, len * sizeof(int)); memset(G.HostData, 0, len * sizeof(int)); hipMemset((void *)G.DevcData, 0, len * sizeof(int)); return G; } //----------------------------------------------------------------------------- // DestroyGpuInt: destructor function for freeing memory in a gpuInt // instance. //----------------------------------------------------------------------------- void DestroyGpuInt(gpuInt *G) { if (G->IsPinned == 1) { hipHostFree(G->HostData); } else { free(G->HostData); } hipFree(G->DevcData); } //----------------------------------------------------------------------------- // UploadGpuInt: upload an integer array from the host to the device. //----------------------------------------------------------------------------- void UploadGpuInt(gpuInt *G) { hipMemcpy(G->DevcData, G->HostData, G->len * sizeof(int), hipMemcpyHostToDevice); } //----------------------------------------------------------------------------- // DownloadGpuInt: download an integer array from the host to the device. //----------------------------------------------------------------------------- void DownloadGpuInt(gpuInt *G) { hipMemcpy(G->HostData, G->DevcData, G->len * sizeof(int), hipMemcpyHostToDevice); } //----------------------------------------------------------------------------- // main //----------------------------------------------------------------------------- int main() { int i, j; gpuInt myInts; // Create a small array of integers and populate it myInts = CreateGpuInt(32, 1); for (i = 0; i < 32; i++) { // Logical operations such as ((i & 3) == 0) will evaluate to zero or // one if they are true or false, and then feed into the arithmetic. myInts.HostData[i] = ((i & 3) == 0)*(32 - i) + ((i & 3) != 0)*i; } // Print the data as originally laid out on the host printf("Host data starts as = [\n"); j = 0; for (i = 0; i < 32; i++) { printf(" %3d", myInts.HostData[i]); j++; if (j == 16) { printf(" [index 0-15]\n"); } } printf(" [index 16-31]\n];\n"); // Upload data to the device UploadGpuInt(&myInts); // Launch the kernel in more than one block hipLaunchKernelGGL(( WorkbenchKernel), dim3(1), dim3(32), 0, 0, myInts.DevcData); // Download data back to the host DownloadGpuInt(&myInts); // Print the data as the host now sees it, following work on the GPU printf("Host data now reads = [\n"); j = 0; for (i = 0; i < 32; i++) { printf(" %3d", myInts.HostData[i]); j++; if (j == 16) { printf(" [index 0-15]\n"); } } printf(" [index 16-31]\n];\n"); // Device synchronization hipDeviceSynchronize(); return 0; }
712cb1c56b0a01245f797f3def56bbf3de66a975.cu
#include <stdio.h> #include <cuda.h> //----------------------------------------------------------------------------- // GpuMirroredInt: a struct holding mirrored int data on both the CPU and the // GPU. Functions below will operate on this struct // (because this isn't a workshop on C++) //----------------------------------------------------------------------------- struct GpuMirroredInt { int len; // Length of the array (again, this is not a C++ course) int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers. // In fact, if non-pinned memory is transferred to the // GPU from the host, a temporary allocation of pinned // memory will be created and then destroyed. Pinned // memory is not host-pageable, but the only performance // implication is that creating lots of pinned memory // may make it harder for the host OS to manage large // memory jobs. int* HostData; // Pointer to allocated memory on the host int* DevcData; // Pointer to allocated memory on the GPU. Note that the // host can know what the address of memory on the GPU // is, but it cannot simply de-reference that pointer // in host code. }; typedef struct GpuMirroredInt gpuInt; //----------------------------------------------------------------------------- // WorkbenchKernel: accept data from the host and perform operations on it //----------------------------------------------------------------------------- __global__ void WorkbenchKernel(int* devInts) { int i; // Print from the GPU to show that it sees a copy of the data uploaded // from the host. Note that warp synchronicity is no longer guaranteed // in CUDA 9.0 and later versions (we're now on 10.1), which creates the // need for all the __syncwarp() calls. if (threadIdx.x == 0) { printf("The device sees = [\n"); } __syncwarp(); for (i = 0; i < 16; i++) { if (threadIdx.x == i) { printf(" %3d", devInts[threadIdx.x]); } // Synchronization calls (__syncwarp across 32 threads in a warp or // __syncthreads across all threads in the block--this block just // happens to have only 32 threads) by definition require that all // affected threads reach them. Therefore, they cannot be called // from within conditional statements that exclude any relevant // threads, i.e. threadIdx.x == some number, seen above. __syncwarp(); } if (threadIdx.x == 0) { printf(" [index 0-15]\n"); } __syncwarp(); for (i = 16; i < 32; i++) { if (threadIdx.x == i) { printf(" %3d", devInts[threadIdx.x]); } __syncwarp(); } if (threadIdx.x == 0) { printf(" [index 16-31]\n];\n"); } __syncthreads(); // Do some work on the data that can then be inspected on the CPU. // Remember, (x & (2^N - 1)) is mod(x, 2^N). devInts[threadIdx.x] += (threadIdx.x & 7) * threadIdx.x; } //----------------------------------------------------------------------------- // CreateGpuInt: constructor function for allocating memory in a gpuInt // instance. // // Arguments: // len: the length of array to allocate // pin: flag to have the memory pinned (non-pageable on the host side // for optimal transfer speed ot the device) //----------------------------------------------------------------------------- gpuInt CreateGpuInt(int len, int pin) { gpuInt G; G.len = len; G.IsPinned = pin; // Now that the official length is recorded, upgrade the real length // to the next convenient multiple of 128, so as to always allocate // GPU memory in 512-byte blocks. This is for alignment purposes, // and keeping host to device transfers in line. len = ((len + 127) / 128) * 128; if (pin == 1) { cudaHostAlloc((void **)&G.HostData, len * sizeof(int), cudaHostAllocMapped); } else { G.HostData = (int*)malloc(len * sizeof(int)); } cudaMalloc((void **)&G.DevcData, len * sizeof(int)); memset(G.HostData, 0, len * sizeof(int)); cudaMemset((void *)G.DevcData, 0, len * sizeof(int)); return G; } //----------------------------------------------------------------------------- // DestroyGpuInt: destructor function for freeing memory in a gpuInt // instance. //----------------------------------------------------------------------------- void DestroyGpuInt(gpuInt *G) { if (G->IsPinned == 1) { cudaFreeHost(G->HostData); } else { free(G->HostData); } cudaFree(G->DevcData); } //----------------------------------------------------------------------------- // UploadGpuInt: upload an integer array from the host to the device. //----------------------------------------------------------------------------- void UploadGpuInt(gpuInt *G) { cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(int), cudaMemcpyHostToDevice); } //----------------------------------------------------------------------------- // DownloadGpuInt: download an integer array from the host to the device. //----------------------------------------------------------------------------- void DownloadGpuInt(gpuInt *G) { cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(int), cudaMemcpyHostToDevice); } //----------------------------------------------------------------------------- // main //----------------------------------------------------------------------------- int main() { int i, j; gpuInt myInts; // Create a small array of integers and populate it myInts = CreateGpuInt(32, 1); for (i = 0; i < 32; i++) { // Logical operations such as ((i & 3) == 0) will evaluate to zero or // one if they are true or false, and then feed into the arithmetic. myInts.HostData[i] = ((i & 3) == 0)*(32 - i) + ((i & 3) != 0)*i; } // Print the data as originally laid out on the host printf("Host data starts as = [\n"); j = 0; for (i = 0; i < 32; i++) { printf(" %3d", myInts.HostData[i]); j++; if (j == 16) { printf(" [index 0-15]\n"); } } printf(" [index 16-31]\n];\n"); // Upload data to the device UploadGpuInt(&myInts); // Launch the kernel in more than one block WorkbenchKernel<<<1, 32>>>(myInts.DevcData); // Download data back to the host DownloadGpuInt(&myInts); // Print the data as the host now sees it, following work on the GPU printf("Host data now reads = [\n"); j = 0; for (i = 0; i < 32; i++) { printf(" %3d", myInts.HostData[i]); j++; if (j == 16) { printf(" [index 0-15]\n"); } } printf(" [index 16-31]\n];\n"); // Device synchronization cudaDeviceSynchronize(); return 0; }
a2824561005cd97c46e58dad1937a334ae21ff14.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorMathPointwise.hip" #else #include <ATen/MemoryOverlap.h> #include <ATen/NamedTensorUtils.h> #include <ATen/core/EnableNamedTensor.h> void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitand is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #if !defined(THC_REAL_IS_BOOL) static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) { #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(result, src); #endif } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(scalar_t* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \ at::assert_no_internal_overlap(self_); \ if (self_ == src) { \ if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(hipGetLastError()); \ propagate_names_if_named_tensor_enabled(self_, src); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real) #endif #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value, scalar_t max_value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y)); int64_t sx = THCTensor_(stride)(state, x, dimension); int64_t sy = THCTensor_(stride)(state, y, dimension); int64_t so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(self_, src); #endif } #endif namespace { c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) { c10::raw::intrusive_ptr::incref(self); return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self); } } void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("clshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("crshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #endif #endif
a2824561005cd97c46e58dad1937a334ae21ff14.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorMathPointwise.cu" #else #include <ATen/MemoryOverlap.h> #include <ATen/NamedTensorUtils.h> #include <ATen/core/EnableNamedTensor.h> void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitand is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #if !defined(THC_REAL_IS_BOOL) static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) { #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(result, src); #endif } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(scalar_t* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \ at::assert_no_internal_overlap(self_); \ if (self_ == src) { \ if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(cudaGetLastError()); \ propagate_names_if_named_tensor_enabled(self_, src); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real) #endif #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value, scalar_t max_value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y)); int64_t sx = THCTensor_(stride)(state, x, dimension); int64_t sy = THCTensor_(stride)(state, y, dimension); int64_t so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(self_, src); #endif } #endif namespace { c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) { c10::raw::intrusive_ptr::incref(self); return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self); } } void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("clshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("crshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #endif #endif
26832c01c1be5f7fd93ba96daa7c6f63eb952871.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_gemv_batched_strided_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl" using ThreadBlockShape = cutlass::gemm::GemmShape<1, 128, 8>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 1>; using GemvKernel = cutlass::gemm::kernel::DefaultGemv< ThreadBlockShape, ThreadShape, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor>; template void megdnn::cuda::cutlass_wrapper:: cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>( BatchedGemmCoord const& problem_size, const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a, const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b, typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c, hipStream_t stream); #pragma GCC diagnostic pop #endif
26832c01c1be5f7fd93ba96daa7c6f63eb952871.cu
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_gemv_batched_strided_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl" using ThreadBlockShape = cutlass::gemm::GemmShape<1, 128, 8>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 1>; using GemvKernel = cutlass::gemm::kernel::DefaultGemv< ThreadBlockShape, ThreadShape, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor>; template void megdnn::cuda::cutlass_wrapper:: cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>( BatchedGemmCoord const& problem_size, const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a, const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b, typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c, cudaStream_t stream); #pragma GCC diagnostic pop #endif
2b766c0d19126c0d48aeef1e3bf91ccf48aabc40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "softmaxcrossentropy_impl.h" #include "core/providers/cuda/cuda_common.h" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _SoftMaxCrossEntropy( const T* log_prob_data, const T* label_data, CUDA_LONG NORMALIZE_FACTOR, T* output_data, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); output_data[id] = -log_prob_data[id] * label_data[id] / NORMALIZE_FACTOR; } template <typename T> void SoftMaxCrossEntropyImpl( const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); CUDA_LONG NORMALIZE_FACTOR = static_cast<CUDA_LONG>(normalize_factor); hipLaunchKernelGGL(( _SoftMaxCrossEntropy<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, log_prob, label, NORMALIZE_FACTOR, output_data, N); } #define SPECIALIZED_IMPL_SoftMaxEntropyImpl(T) \ template void SoftMaxCrossEntropyImpl( \ const T* log_prob, \ const T* label, \ size_t normalize_factor, \ T* output_data, \ size_t count); SPECIALIZED_IMPL_SoftMaxEntropyImpl(float) template <typename T> __global__ void _SoftMaxCrossEntropyGrad( const T* dY, const T* log_prob, const T* label, CUDA_LONG NORMALIZE_FACTOR, T* output_data, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); output_data[id] = (_Exp(log_prob[id]) - label[id]) * (*dY) / NORMALIZE_FACTOR; } template <typename T> void SoftMaxCrossEntropyGradImpl( const T* dY, const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); CUDA_LONG NORMALIZE_FACTOR = static_cast<CUDA_LONG>(normalize_factor); hipLaunchKernelGGL(( _SoftMaxCrossEntropyGrad<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, dY, log_prob, label, NORMALIZE_FACTOR, output_data, N); } #define SPECIALIZED_IMPL_SoftMaxEntropyGradImpl(T) \ template void SoftMaxCrossEntropyGradImpl( \ const T* dY, \ const T* log_prob, \ const T* label, \ size_t normalize_factor, \ T* output_data, \ size_t count); SPECIALIZED_IMPL_SoftMaxEntropyGradImpl(float) template <typename T, typename Tin> __global__ void _SparseSoftmaxCrossEntropy( const T* log_prob_data, const Tin* label_data, const T* normalize_factor_data, T* output_data, CUDA_LONG N, CUDA_LONG D) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N); CUDA_KERNEL_ASSERT(label_data[i] >= 0 && label_data[i] < D); if (*normalize_factor_data == 0) { output_data[i] = 0; } else { output_data[i] = -log_prob_data[i * D + label_data[i]] / (*normalize_factor_data); } } template <typename T, typename Tin> __global__ void _WeightedSparseSoftmaxCrossEntropy( const T* log_prob_data, const Tin* label_data, const T* weight_data, const T* normalize_factor_data, T* output_data, CUDA_LONG N, CUDA_LONG D) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N); CUDA_KERNEL_ASSERT(label_data[i] >= 0 && label_data[i] < D); if (*normalize_factor_data == 0) { output_data[i] = 0; } else { output_data[i] = -log_prob_data[i * D + label_data[i]] * weight_data[i] / (*normalize_factor_data); } } template <typename T, typename Tin> void SparseSoftmaxCrossEntropyImpl( const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); CUDA_LONG D = static_cast<CUDA_LONG>(label_depth); if (weight) { hipLaunchKernelGGL(( _WeightedSparseSoftmaxCrossEntropy<T, Tin>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, log_prob, label, weight, normalize_factor, output_data, N, D); } else { hipLaunchKernelGGL(( _SparseSoftmaxCrossEntropy<T, Tin>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, log_prob, label, normalize_factor, output_data, N, D); } } #define SPECIALIZED_IMPL_SparseSoftMaxEntropyImpl(T, Tin) \ template void SparseSoftmaxCrossEntropyImpl( \ const T* log_prob, \ const Tin* label, \ const T* weight, \ const T* normalize_factor, \ T* output_data, \ size_t count, \ size_t label_depth); SPECIALIZED_IMPL_SparseSoftMaxEntropyImpl(float, int32_t) SPECIALIZED_IMPL_SparseSoftMaxEntropyImpl(float, int64_t) template <typename T, typename Tin> __global__ void _SparseSoftmaxCrossEntropyGrad( const T* dY, const T* log_prob, const Tin* label, const T* normalize_factor, T* output_data, CUDA_LONG N, CUDA_LONG D) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N * D); int row = i / D; int d = i % D; if (*normalize_factor == 0) { output_data[i] = 0; } else { output_data[i] = (*dY) * (_Exp(log_prob[i]) - 1.0 * (d == label[row])) / (*normalize_factor); } } template <typename T, typename Tin> __global__ void _WeightedSparseSoftmaxCrossEntropyGrad( const T* dY, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, CUDA_LONG N, CUDA_LONG D) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N * D); int row = i / D; int d = i % D; if (*normalize_factor == 0) { output_data[i] = 0; } else { output_data[i] = (*dY) * weight[row] * (_Exp(log_prob[i]) - 1.0 * (d == label[row])) / (*normalize_factor); } } template <typename T, typename Tin> void SparseSoftmaxCrossEntropyGradImpl( const T* dY, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth) { CUDA_LONG N = static_cast<CUDA_LONG>(count); CUDA_LONG D = static_cast<CUDA_LONG>(label_depth); int blocksPerGrid = (int)(ceil(static_cast<float>(N * D) / GridDim::maxThreadsPerBlock)); if (weight) { hipLaunchKernelGGL(( _WeightedSparseSoftmaxCrossEntropyGrad<T, Tin>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, dY, log_prob, label, weight, normalize_factor, output_data, N, D); } else { hipLaunchKernelGGL(( _SparseSoftmaxCrossEntropyGrad<T, Tin>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, dY, log_prob, label, normalize_factor, output_data, N, D); } } #define SPECIALIZED_IMPL_SparseSoftMaxEntropyGradImpl(T, Tin) \ template void SparseSoftmaxCrossEntropyGradImpl( \ const T* dY, \ const T* log_prob, \ const Tin* label, \ const T* weight, \ const T* normalize_factor, \ T* output_data, \ size_t count, \ size_t label_depth); SPECIALIZED_IMPL_SparseSoftMaxEntropyGradImpl(float, int32_t) SPECIALIZED_IMPL_SparseSoftMaxEntropyGradImpl(float, int64_t) } // namespace cuda } // namespace onnxruntime
2b766c0d19126c0d48aeef1e3bf91ccf48aabc40.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "softmaxcrossentropy_impl.h" #include "core/providers/cuda/cuda_common.h" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _SoftMaxCrossEntropy( const T* log_prob_data, const T* label_data, CUDA_LONG NORMALIZE_FACTOR, T* output_data, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); output_data[id] = -log_prob_data[id] * label_data[id] / NORMALIZE_FACTOR; } template <typename T> void SoftMaxCrossEntropyImpl( const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); CUDA_LONG NORMALIZE_FACTOR = static_cast<CUDA_LONG>(normalize_factor); _SoftMaxCrossEntropy<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( log_prob, label, NORMALIZE_FACTOR, output_data, N); } #define SPECIALIZED_IMPL_SoftMaxEntropyImpl(T) \ template void SoftMaxCrossEntropyImpl( \ const T* log_prob, \ const T* label, \ size_t normalize_factor, \ T* output_data, \ size_t count); SPECIALIZED_IMPL_SoftMaxEntropyImpl(float) template <typename T> __global__ void _SoftMaxCrossEntropyGrad( const T* dY, const T* log_prob, const T* label, CUDA_LONG NORMALIZE_FACTOR, T* output_data, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); output_data[id] = (_Exp(log_prob[id]) - label[id]) * (*dY) / NORMALIZE_FACTOR; } template <typename T> void SoftMaxCrossEntropyGradImpl( const T* dY, const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); CUDA_LONG NORMALIZE_FACTOR = static_cast<CUDA_LONG>(normalize_factor); _SoftMaxCrossEntropyGrad<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( dY, log_prob, label, NORMALIZE_FACTOR, output_data, N); } #define SPECIALIZED_IMPL_SoftMaxEntropyGradImpl(T) \ template void SoftMaxCrossEntropyGradImpl( \ const T* dY, \ const T* log_prob, \ const T* label, \ size_t normalize_factor, \ T* output_data, \ size_t count); SPECIALIZED_IMPL_SoftMaxEntropyGradImpl(float) template <typename T, typename Tin> __global__ void _SparseSoftmaxCrossEntropy( const T* log_prob_data, const Tin* label_data, const T* normalize_factor_data, T* output_data, CUDA_LONG N, CUDA_LONG D) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N); CUDA_KERNEL_ASSERT(label_data[i] >= 0 && label_data[i] < D); if (*normalize_factor_data == 0) { output_data[i] = 0; } else { output_data[i] = -log_prob_data[i * D + label_data[i]] / (*normalize_factor_data); } } template <typename T, typename Tin> __global__ void _WeightedSparseSoftmaxCrossEntropy( const T* log_prob_data, const Tin* label_data, const T* weight_data, const T* normalize_factor_data, T* output_data, CUDA_LONG N, CUDA_LONG D) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N); CUDA_KERNEL_ASSERT(label_data[i] >= 0 && label_data[i] < D); if (*normalize_factor_data == 0) { output_data[i] = 0; } else { output_data[i] = -log_prob_data[i * D + label_data[i]] * weight_data[i] / (*normalize_factor_data); } } template <typename T, typename Tin> void SparseSoftmaxCrossEntropyImpl( const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); CUDA_LONG D = static_cast<CUDA_LONG>(label_depth); if (weight) { _WeightedSparseSoftmaxCrossEntropy<T, Tin><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( log_prob, label, weight, normalize_factor, output_data, N, D); } else { _SparseSoftmaxCrossEntropy<T, Tin><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( log_prob, label, normalize_factor, output_data, N, D); } } #define SPECIALIZED_IMPL_SparseSoftMaxEntropyImpl(T, Tin) \ template void SparseSoftmaxCrossEntropyImpl( \ const T* log_prob, \ const Tin* label, \ const T* weight, \ const T* normalize_factor, \ T* output_data, \ size_t count, \ size_t label_depth); SPECIALIZED_IMPL_SparseSoftMaxEntropyImpl(float, int32_t) SPECIALIZED_IMPL_SparseSoftMaxEntropyImpl(float, int64_t) template <typename T, typename Tin> __global__ void _SparseSoftmaxCrossEntropyGrad( const T* dY, const T* log_prob, const Tin* label, const T* normalize_factor, T* output_data, CUDA_LONG N, CUDA_LONG D) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N * D); int row = i / D; int d = i % D; if (*normalize_factor == 0) { output_data[i] = 0; } else { output_data[i] = (*dY) * (_Exp(log_prob[i]) - 1.0 * (d == label[row])) / (*normalize_factor); } } template <typename T, typename Tin> __global__ void _WeightedSparseSoftmaxCrossEntropyGrad( const T* dY, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, CUDA_LONG N, CUDA_LONG D) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N * D); int row = i / D; int d = i % D; if (*normalize_factor == 0) { output_data[i] = 0; } else { output_data[i] = (*dY) * weight[row] * (_Exp(log_prob[i]) - 1.0 * (d == label[row])) / (*normalize_factor); } } template <typename T, typename Tin> void SparseSoftmaxCrossEntropyGradImpl( const T* dY, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth) { CUDA_LONG N = static_cast<CUDA_LONG>(count); CUDA_LONG D = static_cast<CUDA_LONG>(label_depth); int blocksPerGrid = (int)(ceil(static_cast<float>(N * D) / GridDim::maxThreadsPerBlock)); if (weight) { _WeightedSparseSoftmaxCrossEntropyGrad<T, Tin><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( dY, log_prob, label, weight, normalize_factor, output_data, N, D); } else { _SparseSoftmaxCrossEntropyGrad<T, Tin><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( dY, log_prob, label, normalize_factor, output_data, N, D); } } #define SPECIALIZED_IMPL_SparseSoftMaxEntropyGradImpl(T, Tin) \ template void SparseSoftmaxCrossEntropyGradImpl( \ const T* dY, \ const T* log_prob, \ const Tin* label, \ const T* weight, \ const T* normalize_factor, \ T* output_data, \ size_t count, \ size_t label_depth); SPECIALIZED_IMPL_SparseSoftMaxEntropyGradImpl(float, int32_t) SPECIALIZED_IMPL_SparseSoftMaxEntropyGradImpl(float, int64_t) } // namespace cuda } // namespace onnxruntime
050c1decb4aa033a98c9b4a2cb33ca27af749a06.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include <pointercast.h> #include <types/types.h> #include <types/float16.h> #include <op_boilerplate.h> #include <loops/summarystatsreduce.h> #include <helpers/shape.h> #include <helpers/TAD.h> #include <dll.h> #include <Environment.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helpers/DebugHelper.h> #include <specials_cuda.h> using namespace simdOps; namespace functions { namespace summarystats { template <typename X, typename Z> void _CUDA_G summaryStatsReduceT(int op, void *dx, Nd4jLong *xShapeInfo, int xRank, void *extraParams, void *z, Nd4jLong *zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::summarystats::SummaryStatsReduce<X,Z>::transform(op,dx,xShapeInfo,extraParams,z,zShapeInfo,dimension,dimensionLength,biasCorrected,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets); } /** * * @param sPartialsRef * @param tid * @param extraParams */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::aggregatePartials(SummaryStatsData<X> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto extraParams = static_cast<Z*>(vextraParams); SummaryStatsData<X> *sPartials = *sPartialsRef; Nd4jLong floorPow2 = blockDim.x; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { SummaryStatsData<X> prev = sPartials[tid - floorPow2]; SummaryStatsData<X> curr = sPartials[tid]; sPartials[tid - floorPow2] = update(prev, curr, extraParams); } __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numElements) { SummaryStatsData<X> curr = sPartials[tid]; SummaryStatsData<X> next = sPartials[tid + activeThreads]; sPartials[tid] = update(curr, next, extraParams); } __syncthreads(); } }; /** * @param n n is the number of * elements to loop through * @param dx the data to operate on * @param xVectorInfo the meta data for the vector: * 0 is the offset * 1 is the increment/stride * 2 is the real length of the buffer (n and dx.length won't always be the same) * 3 is the element wise stride for the buffer * 4 is the number of elements it takes to get to the next row/column/tensor * @param gpuInformation * 0 is the block size * 1 is the grid size * 2 is the shared memory size * @param problemDefinition * 0 is the number of elements per vector * 1 is the number of vectors */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::transform(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { auto dx = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); auto reductionBuffer = static_cast<Z*>(vreductionBuffer); int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ volatile int resultScalar; __shared__ int xElementWiseStride; int numElements = blockDim.x; //shared memory space for storing intermediate results __shared__ SummaryStatsData<X> *sPartials; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<SummaryStatsData<X>*>(shmem); } __syncthreads(); Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; //length for the tad __shared__ volatile int xLength; __shared__ volatile int resultLength; SummaryStatsData<X> reduction; reduction.initWithValue(0.0); reduction.n = 0; if (threadIdx.x == 0) { if (zShapeInfo != nullptr) resultLength = shape::length(zShapeInfo); else resultLength = 1; if (dimensionLength == 1) { if (resultLength == 1 && (dimension == nullptr || dimension[0] == MAX_DIMENSION)) resultScalar = 1; else resultScalar = 0; } else resultScalar = 0; if (resultLength == 1) resultScalar = 1; auto xStride = shape::stride(xShapeInfo); auto xOrder = shape::order(xShapeInfo); if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) { xElementWiseStride = xStride[dimension[0]]; } else { xElementWiseStride = shape::elementWiseStride(xShapeInfo); } xLength = shape::length(xShapeInfo); } __syncthreads(); if (!resultScalar) { __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; if (threadIdx.x == 0) { tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; } __syncthreads(); if (tadEWS == 0) { for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto tadOffsetForBlock = tadOffsets[r]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[xOffset]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); } __syncthreads(); } } else { for (int i = blockIdx.x; i < numTads; i += gridDim.x) { auto tadOffsetForBlock = tadOffsets[i]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int x = threadIdx.x; x < tadLength; x += blockDim.x) { auto indexX = tadOffsetForBlock + x * tadEWS; SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[indexX]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams); } } } } else if (resultScalar) { __shared__ int n; if (threadIdx.x == 0) { xElementWiseStride = shape::elementWiseStride(xShapeInfo); n = shape::length(xShapeInfo); } __syncthreads(); if (xElementWiseStride >= 1) { for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) { SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[i * xElementWiseStride]); reduction = update(reduction, indexVal2, extraParams); } } else { for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) { auto offset = shape::getIndexOffset(i, xShapeInfo, n); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[offset]); reduction = update(reduction, indexVal2, extraParams); } } sPartials[threadIdx.x] = reduction; __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, blockDim.x, extraParams); __syncthreads(); if (gridDim.x > 1) { __shared__ bool amLast; unsigned int *tc = (unsigned int *)reductionBuffer; tid = threadIdx.x; if (threadIdx.x == 0) { SummaryStatsData<X> *pBuffer = (SummaryStatsData<X>*) reductionBuffer; pBuffer[blockIdx.x] = sPartials[0]; } __threadfence(); __syncthreads(); if (tid == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*) reductionBuffer; Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, gridDim.x, extraParams); __syncthreads(); if (tid == 0) { z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } else { if (tid == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } }; template <typename X, typename Y> _CUDA_D void SummaryStatsReduce<X,Y>::transform(const int opNum, void *dx, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(transform, PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS); }; template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduceScalar(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto extraParams = static_cast<Z*>(vextraParams); auto z = reinterpret_cast<Z*>(vz); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D16 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( summaryStatsReduceT<X,Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); // this is blocking method since method should return scalar nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed"); } template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F17 opNum:[%i]\n", opNum); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); hipLaunchKernelGGL(( summaryStatsReduceT<X,Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D18 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( summaryStatsReduceT<X, Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), dimension, dimensionLength, 1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } template <typename X, typename Y> Y SummaryStatsReduce<X,Y>::execScalar(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams) { return 0; } template <typename X, typename Y> void SummaryStatsReduce<X,Y>::execScalar(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer) { } template <typename X, typename Y> void SummaryStatsReduce<X,Y>::exec(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { } template <typename X, typename Y> template<typename OpType> Y SummaryStatsReduce<X,Y>::execScalar(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams) { return 0; } template <typename X, typename Y> template<typename OpType> void SummaryStatsReduce<X,Y>::execScalar(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer) { // } template <typename X, typename Y> template<typename OpType> void SummaryStatsReduce<X,Y>::exec(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT SummaryStatsReduce, , LIBND4J_TYPES, FLOAT_TYPES); } }
050c1decb4aa033a98c9b4a2cb33ca27af749a06.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include <pointercast.h> #include <types/types.h> #include <types/float16.h> #include <op_boilerplate.h> #include <loops/summarystatsreduce.h> #include <helpers/shape.h> #include <helpers/TAD.h> #include <dll.h> #include <Environment.h> #include <cuda.h> #include <cuda_runtime.h> #include <helpers/DebugHelper.h> #include <specials_cuda.h> using namespace simdOps; namespace functions { namespace summarystats { template <typename X, typename Z> void _CUDA_G summaryStatsReduceT(int op, void *dx, Nd4jLong *xShapeInfo, int xRank, void *extraParams, void *z, Nd4jLong *zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::summarystats::SummaryStatsReduce<X,Z>::transform(op,dx,xShapeInfo,extraParams,z,zShapeInfo,dimension,dimensionLength,biasCorrected,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets); } /** * * @param sPartialsRef * @param tid * @param extraParams */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::aggregatePartials(SummaryStatsData<X> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto extraParams = static_cast<Z*>(vextraParams); SummaryStatsData<X> *sPartials = *sPartialsRef; Nd4jLong floorPow2 = blockDim.x; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { SummaryStatsData<X> prev = sPartials[tid - floorPow2]; SummaryStatsData<X> curr = sPartials[tid]; sPartials[tid - floorPow2] = update(prev, curr, extraParams); } __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numElements) { SummaryStatsData<X> curr = sPartials[tid]; SummaryStatsData<X> next = sPartials[tid + activeThreads]; sPartials[tid] = update(curr, next, extraParams); } __syncthreads(); } }; /** * @param n n is the number of * elements to loop through * @param dx the data to operate on * @param xVectorInfo the meta data for the vector: * 0 is the offset * 1 is the increment/stride * 2 is the real length of the buffer (n and dx.length won't always be the same) * 3 is the element wise stride for the buffer * 4 is the number of elements it takes to get to the next row/column/tensor * @param gpuInformation * 0 is the block size * 1 is the grid size * 2 is the shared memory size * @param problemDefinition * 0 is the number of elements per vector * 1 is the number of vectors */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::transform(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { auto dx = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); auto reductionBuffer = static_cast<Z*>(vreductionBuffer); int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ volatile int resultScalar; __shared__ int xElementWiseStride; int numElements = blockDim.x; //shared memory space for storing intermediate results __shared__ SummaryStatsData<X> *sPartials; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<SummaryStatsData<X>*>(shmem); } __syncthreads(); Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; //length for the tad __shared__ volatile int xLength; __shared__ volatile int resultLength; SummaryStatsData<X> reduction; reduction.initWithValue(0.0); reduction.n = 0; if (threadIdx.x == 0) { if (zShapeInfo != nullptr) resultLength = shape::length(zShapeInfo); else resultLength = 1; if (dimensionLength == 1) { if (resultLength == 1 && (dimension == nullptr || dimension[0] == MAX_DIMENSION)) resultScalar = 1; else resultScalar = 0; } else resultScalar = 0; if (resultLength == 1) resultScalar = 1; auto xStride = shape::stride(xShapeInfo); auto xOrder = shape::order(xShapeInfo); if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) { xElementWiseStride = xStride[dimension[0]]; } else { xElementWiseStride = shape::elementWiseStride(xShapeInfo); } xLength = shape::length(xShapeInfo); } __syncthreads(); if (!resultScalar) { __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; if (threadIdx.x == 0) { tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; } __syncthreads(); if (tadEWS == 0) { for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto tadOffsetForBlock = tadOffsets[r]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[xOffset]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); } __syncthreads(); } } else { for (int i = blockIdx.x; i < numTads; i += gridDim.x) { auto tadOffsetForBlock = tadOffsets[i]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int x = threadIdx.x; x < tadLength; x += blockDim.x) { auto indexX = tadOffsetForBlock + x * tadEWS; SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[indexX]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams); } } } } else if (resultScalar) { __shared__ int n; if (threadIdx.x == 0) { xElementWiseStride = shape::elementWiseStride(xShapeInfo); n = shape::length(xShapeInfo); } __syncthreads(); if (xElementWiseStride >= 1) { for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) { SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[i * xElementWiseStride]); reduction = update(reduction, indexVal2, extraParams); } } else { for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) { auto offset = shape::getIndexOffset(i, xShapeInfo, n); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[offset]); reduction = update(reduction, indexVal2, extraParams); } } sPartials[threadIdx.x] = reduction; __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, blockDim.x, extraParams); __syncthreads(); if (gridDim.x > 1) { __shared__ bool amLast; unsigned int *tc = (unsigned int *)reductionBuffer; tid = threadIdx.x; if (threadIdx.x == 0) { SummaryStatsData<X> *pBuffer = (SummaryStatsData<X>*) reductionBuffer; pBuffer[blockIdx.x] = sPartials[0]; } __threadfence(); __syncthreads(); if (tid == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*) reductionBuffer; Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, gridDim.x, extraParams); __syncthreads(); if (tid == 0) { z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } else { if (tid == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } }; template <typename X, typename Y> _CUDA_D void SummaryStatsReduce<X,Y>::transform(const int opNum, void *dx, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(transform, PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS); }; template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduceScalar(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto extraParams = static_cast<Z*>(vextraParams); auto z = reinterpret_cast<Z*>(vz); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D16 opNum:[%i]\n", opNum); summaryStatsReduceT<X,Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); // this is blocking method since method should return scalar nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed"); } template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F17 opNum:[%i]\n", opNum); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); summaryStatsReduceT<X,Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D18 opNum:[%i]\n", opNum); summaryStatsReduceT<X, Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), dimension, dimensionLength, 1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } template <typename X, typename Y> Y SummaryStatsReduce<X,Y>::execScalar(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams) { return 0; } template <typename X, typename Y> void SummaryStatsReduce<X,Y>::execScalar(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer) { } template <typename X, typename Y> void SummaryStatsReduce<X,Y>::exec(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { } template <typename X, typename Y> template<typename OpType> Y SummaryStatsReduce<X,Y>::execScalar(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams) { return 0; } template <typename X, typename Y> template<typename OpType> void SummaryStatsReduce<X,Y>::execScalar(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer) { // } template <typename X, typename Y> template<typename OpType> void SummaryStatsReduce<X,Y>::exec(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT SummaryStatsReduce, , LIBND4J_TYPES, FLOAT_TYPES); } }
329e6c20713e2afcc9f26ab45a5b64924da8b83a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Matrix multiplication: C = A * B. Host side code. Author: Naga Kandasamy Date modified: 03/07/2017 */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> // includes, kernels #include "matrixmul_kernel.hip" extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix); Matrix AllocateMatrix(int, int, int); void CopyToDeviceMatrix(Matrix, const Matrix); void CopyFromDeviceMatrix(Matrix, const Matrix); void FreeDeviceMatrix(Matrix *); void FreeMatrix(Matrix *); void MatrixMulOnDevice(const Matrix, const Matrix, Matrix); void checkCUDAError(const char *); int checkResults(float *, float *, int, float); int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; srand(time(NULL)); // Allocate and initialize the matrices M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1); N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1); P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); // M * N on the device printf("Performing matrix multiplication on the GPU. \n"); MatrixMulOnDevice(M, N, P); printf("GPU computation complete. \n"); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); printf("Performing matrix multiplication on the CPU. \n"); computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width); printf("CPU computation complete. \n"); /* Check if the device result is equivalent to the expected solution. */ int num_elements = P.height*P.width; int status = checkResults(reference.elements, P.elements, num_elements, 0.001f); printf("Test %s\n", (1 == status) ? "PASSED" : "FAILED"); // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P) { // Load M and N to the device Matrix Md = AllocateDeviceMatrix(M); CopyToDeviceMatrix(Md, M); Matrix Nd = AllocateDeviceMatrix(N); CopyToDeviceMatrix(Nd, N); // Allocate P on the device Matrix Pd = AllocateDeviceMatrix(P); /* Bind Md and Nd to 1D textures. Note: the maximum width for 1D texture reference bound to linear memory varies with the GPU generation and compute capability. Currently it is set to 2^{27} elements. */ // hipBindTexture(NULL, M_on_tex, Md.elements, M.width * M.height * sizeof(float)); // hipBindTexture(NULL, N_on_tex, Nd.elements, N.width * N.height * sizeof(float)); /* Bind Md and Nd to 2D textures. Note: as with 1D textures, there is a maximum width and height for 2D texture reference bound to a CUDA array or to a linear memory. */ hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(NULL, M_on_tex_2D, Md.elements, desc, M.width, M.height, M.width * sizeof(float)); hipBindTexture2D(NULL, N_on_tex_2D, Nd.elements, desc, N.width, N.height, N.width * sizeof(float)); // Setup the execution configuration dim3 threads(TILE_SIZE, TILE_SIZE); dim3 grid((Pd.width + TILE_SIZE - 1)/TILE_SIZE, (Pd.height + TILE_SIZE - 1)/TILE_SIZE); struct timeval start, stop; gettimeofday(&start, NULL); // Execute the kernel // MatrixMulKernel_vanilla<<< grid, threads >>>(Pd.elements, Md.elements, Nd.elements, MATRIX_SIZE); // MatrixMulKernel_1Dtex<<< grid, threads >>>(Pd.elements, Md.elements, Nd.elements, MATRIX_SIZE); hipLaunchKernelGGL(( MatrixMulKernel_2Dtex), dim3(grid), dim3(threads) , 0, 0, Pd.elements, Md.elements, Nd.elements, MATRIX_SIZE); hipDeviceSynchronize(); // check if kernel execution generated an error checkCUDAError("Error in kernel"); gettimeofday(&stop, NULL); printf("Execution time = %fs. \n",\ (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000)); // Read P from the device CopyFromDeviceMatrix(P, Pd); // Unbind texture references // hipUnbindTexture(M_on_tex); // hipUnbindTexture(N_on_tex); hipUnbindTexture(M_on_tex_2D); hipUnbindTexture(N_on_tex_2D); // Free device matrices FreeDeviceMatrix(&Md); FreeDeviceMatrix(&Nd); FreeDeviceMatrix(&Pd); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++){ M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { hipFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { printf("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } int checkResults(float *reference, float *gpu_result, int num_elements, float threshold) { int checkMark = 1; float epsilon = 0.0; for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){ checkMark = 0; break; } for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){ epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]); } printf("Max epsilon = %f. \n", epsilon); return checkMark; }
329e6c20713e2afcc9f26ab45a5b64924da8b83a.cu
/* Matrix multiplication: C = A * B. Host side code. Author: Naga Kandasamy Date modified: 03/07/2017 */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> // includes, kernels #include "matrixmul_kernel.cu" extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix); Matrix AllocateMatrix(int, int, int); void CopyToDeviceMatrix(Matrix, const Matrix); void CopyFromDeviceMatrix(Matrix, const Matrix); void FreeDeviceMatrix(Matrix *); void FreeMatrix(Matrix *); void MatrixMulOnDevice(const Matrix, const Matrix, Matrix); void checkCUDAError(const char *); int checkResults(float *, float *, int, float); int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; srand(time(NULL)); // Allocate and initialize the matrices M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1); N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1); P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); // M * N on the device printf("Performing matrix multiplication on the GPU. \n"); MatrixMulOnDevice(M, N, P); printf("GPU computation complete. \n"); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); printf("Performing matrix multiplication on the CPU. \n"); computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width); printf("CPU computation complete. \n"); /* Check if the device result is equivalent to the expected solution. */ int num_elements = P.height*P.width; int status = checkResults(reference.elements, P.elements, num_elements, 0.001f); printf("Test %s\n", (1 == status) ? "PASSED" : "FAILED"); // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P) { // Load M and N to the device Matrix Md = AllocateDeviceMatrix(M); CopyToDeviceMatrix(Md, M); Matrix Nd = AllocateDeviceMatrix(N); CopyToDeviceMatrix(Nd, N); // Allocate P on the device Matrix Pd = AllocateDeviceMatrix(P); /* Bind Md and Nd to 1D textures. Note: the maximum width for 1D texture reference bound to linear memory varies with the GPU generation and compute capability. Currently it is set to 2^{27} elements. */ // cudaBindTexture(NULL, M_on_tex, Md.elements, M.width * M.height * sizeof(float)); // cudaBindTexture(NULL, N_on_tex, Nd.elements, N.width * N.height * sizeof(float)); /* Bind Md and Nd to 2D textures. Note: as with 1D textures, there is a maximum width and height for 2D texture reference bound to a CUDA array or to a linear memory. */ cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(NULL, M_on_tex_2D, Md.elements, desc, M.width, M.height, M.width * sizeof(float)); cudaBindTexture2D(NULL, N_on_tex_2D, Nd.elements, desc, N.width, N.height, N.width * sizeof(float)); // Setup the execution configuration dim3 threads(TILE_SIZE, TILE_SIZE); dim3 grid((Pd.width + TILE_SIZE - 1)/TILE_SIZE, (Pd.height + TILE_SIZE - 1)/TILE_SIZE); struct timeval start, stop; gettimeofday(&start, NULL); // Execute the kernel // MatrixMulKernel_vanilla<<< grid, threads >>>(Pd.elements, Md.elements, Nd.elements, MATRIX_SIZE); // MatrixMulKernel_1Dtex<<< grid, threads >>>(Pd.elements, Md.elements, Nd.elements, MATRIX_SIZE); MatrixMulKernel_2Dtex<<< grid, threads >>>(Pd.elements, Md.elements, Nd.elements, MATRIX_SIZE); cudaThreadSynchronize(); // check if kernel execution generated an error checkCUDAError("Error in kernel"); gettimeofday(&stop, NULL); printf("Execution time = %fs. \n",\ (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000)); // Read P from the device CopyFromDeviceMatrix(P, Pd); // Unbind texture references // cudaUnbindTexture(M_on_tex); // cudaUnbindTexture(N_on_tex); cudaUnbindTexture(M_on_tex_2D); cudaUnbindTexture(N_on_tex_2D); // Free device matrices FreeDeviceMatrix(&Md); FreeDeviceMatrix(&Nd); FreeDeviceMatrix(&Pd); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++){ M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { cudaFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } int checkResults(float *reference, float *gpu_result, int num_elements, float threshold) { int checkMark = 1; float epsilon = 0.0; for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){ checkMark = 0; break; } for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){ epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]); } printf("Max epsilon = %f. \n", epsilon); return checkMark; }
8ab608ac3531deb1cfac94335bce38f54d1dd408.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gain.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int width = XSIZE; int height = YSIZE; float rGain = 1; float gGain = 1; float bGain = 1; float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gain), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,rGain,gGain,bGain,input,output); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gain), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,rGain,gGain,bGain,input,output); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gain), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,rGain,gGain,bGain,input,output); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8ab608ac3531deb1cfac94335bce38f54d1dd408.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gain.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int width = XSIZE; int height = YSIZE; float rGain = 1; float gGain = 1; float bGain = 1; float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gain<<<gridBlock,threadBlock>>>(width,height,rGain,gGain,bGain,input,output); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gain<<<gridBlock,threadBlock>>>(width,height,rGain,gGain,bGain,input,output); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gain<<<gridBlock,threadBlock>>>(width,height,rGain,gGain,bGain,input,output); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c86c31041d9a5c437704f26da3330b2034c6e43e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * kernels.cu * * Created on: Jun 20, 2017 * Author: kbmod-usr */ #ifndef KERNELS_CU_ #define KERNELS_CU_ #include "common.h" #include "PointSpreadFunc.h" #include <helper_cuda.h> #include <stdio.h> namespace kbmod { /* * Device kernel that convolves the provided image with the psf */ __global__ void convolvePSF(int width, int height, float *sourceImage, float *resultImage, float *psf, int psfRad, int psfDim, float psfSum, float maskFlag) { // Find bounds of convolution area const int x = blockIdx.x*CONV_THREAD_DIM+threadIdx.x; const int y = blockIdx.y*CONV_THREAD_DIM+threadIdx.y; if (x < 0 || x > width-1 || y < 0 || y > height-1) return; const int minX = max(x-psfRad, 0); const int minY = max(y-psfRad, 0); const int maxX = min(x+psfRad, width-1); const int maxY = min(y+psfRad, height-1); // Read kernel float sum = 0.0; float psfPortion = 0.0; float center = sourceImage[y*width+x]; if (center != MASK_FLAG) { for (int j=minY; j<=maxY; j++) { // #pragma unroll for (int i=minX; i<=maxX; i++) { float currentPixel = sourceImage[j*width+i]; if (currentPixel != MASK_FLAG) { float currentPSF = psf[(j-minY)*psfDim+i-minX]; psfPortion += currentPSF; sum += currentPixel * currentPSF; } } } resultImage[y*width+x] = (sum*psfSum)/psfPortion; } else { // Leave masked pixel alone (these could be replaced here with zero) resultImage[y*width+x] = center; // 0.0 } } extern "C" void deviceConvolve(float *sourceImg, float *resultImg, int width, int height, PointSpreadFunc *PSF) { // Pointers to device memory // float *deviceKernel; float *deviceSourceImg; float *deviceResultImg; long pixelsPerImage = width*height; dim3 blocks(width/CONV_THREAD_DIM+1,height/CONV_THREAD_DIM+1); dim3 threads(CONV_THREAD_DIM,CONV_THREAD_DIM); // Allocate Device memory checkCudaErrors(hipMalloc((void **)&deviceKernel, sizeof(float)*PSF->getSize())); checkCudaErrors(hipMalloc((void **)&deviceSourceImg, sizeof(float)*pixelsPerImage)); checkCudaErrors(hipMalloc((void **)&deviceResultImg, sizeof(float)*pixelsPerImage)); checkCudaErrors(hipMemcpy(deviceKernel, PSF->kernelData(), sizeof(float)*PSF->getSize(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(deviceSourceImg, sourceImg, sizeof(float)*pixelsPerImage, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( convolvePSF), dim3(blocks), dim3(threads), 0, 0, width, height, deviceSourceImg, deviceResultImg, deviceKernel, PSF->getRadius(), PSF->getDim(), PSF->getSum(), MASK_FLAG); checkCudaErrors(hipMemcpy(resultImg, deviceResultImg, sizeof(float)*pixelsPerImage, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(deviceKernel)); checkCudaErrors(hipFree(deviceSourceImg)); checkCudaErrors(hipFree(deviceResultImg)); } /* * Searches through images (represented as a flat array of floats) looking for most likely * trajectories in the given list. Outputs a results image of best trajectories. Returns a * fixed number of results per pixel specified by RESULTS_PER_PIXEL */ __global__ void searchImages(int trajectoryCount, int width, int height, int imageCount, float *psiPhiImages, trajectory *trajectories, trajectory *results, float *imgTimes) { // Get trajectory origin const unsigned short x = blockIdx.x*THREAD_DIM_X+threadIdx.x; const unsigned short y = blockIdx.y*THREAD_DIM_Y+threadIdx.y; trajectory best[RESULTS_PER_PIXEL]; for (int r=0; r<RESULTS_PER_PIXEL; ++r) { best[r].lh = -1.0; } __shared__ float sImgTimes[256]; int idx = threadIdx.x*THREAD_DIM_X+threadIdx.y; if (idx<imageCount) sImgTimes[idx] = imgTimes[idx]; // Give up on any trajectories starting outside the image if (x >= width || y >= height) { return; } const unsigned int pixelsPerImage = width*height; // For each trajectory we'd like to search for (int t=0; t<trajectoryCount; ++t) { trajectory currentT; currentT.x = x; currentT.y = y; currentT.xVel = trajectories[t].xVel; currentT.yVel = trajectories[t].yVel; currentT.obsCount = 0; float psiSum = 0.0; float phiSum = 0.0; // Loop over each image and sample the appropriate pixel for (int i=0; i<imageCount; ++i) { float cTime = sImgTimes[i]; int currentX = x + int(currentT.xVel*cTime+0.5); int currentY = y + int(currentT.yVel*cTime+0.5); // Test if trajectory goes out of image bounds // Branching could be avoided here by setting a // black image border and clamping coordinates if (currentX >= width || currentY >= height || currentX < 0 || currentY < 0) { // Penalize trajctories that leave edge //psiSum += -0.1; continue; } unsigned int pixel = (pixelsPerImage*i + currentY*width + currentX); //float cPsi = psiPhiImages[pixel]; //float cPhi = psiPhiImages[pixel+1]; float2 cPsiPhi = reinterpret_cast<float2*>(psiPhiImages)[pixel]; if (cPsiPhi.x == MASK_FLAG) continue; currentT.obsCount++; psiSum += cPsiPhi.x;// < MASK_FLAG/2 /*== MASK_FLAG* / ? 0.0 : cPsiPhi.x;//min(cPsi,0.3); phiSum += cPsiPhi.y;// < MASK_FLAG/2 /*== MASK_FLAG* / ? 0.0 : cPsiPhi.y; //if (psiSum <= 0.0 && i>4) break; } // Just in case a phiSum is zero //phiSum += phiSum*1.0005+0.001; currentT.lh = psiSum/sqrt(phiSum); currentT.flux = /*2.0*fluxPix**/ psiSum/phiSum; trajectory temp; for (int r=0; r<RESULTS_PER_PIXEL; ++r) { if ( currentT.lh > best[r].lh ) { temp = best[r]; best[r] = currentT; currentT = temp; } } } for (int r=0; r<RESULTS_PER_PIXEL; ++r) { results[ (y*width + x)*RESULTS_PER_PIXEL + r ] = best[r]; } } __device__ float2 readPixel(float* img, int x, int y, int width, int height) { float2 p; int i = y*width+x; p.x = img[i]; p.y = img[i+1]; return p; } extern "C" void deviceSearch(int trajCount, int imageCount, int psiPhiSize, int resultsCount, trajectory * trajectoriesToSearch, trajectory *bestTrajects, float *imageTimes, float *interleavedPsiPhi, int width, int height) { // Allocate Device memory trajectory *deviceTests; float *deviceImgTimes; float *devicePsiPhi; trajectory *deviceSearchResults; checkCudaErrors(hipMalloc((void **)&deviceTests, sizeof(trajectory)*trajCount)); checkCudaErrors(hipMalloc((void **)&deviceImgTimes, sizeof(float)*imageCount)); checkCudaErrors(hipMalloc((void **)&devicePsiPhi, sizeof(float)*psiPhiSize)); checkCudaErrors(hipMalloc((void **)&deviceSearchResults, sizeof(trajectory)*resultsCount)); // Copy trajectories to search checkCudaErrors(hipMemcpy(deviceTests, trajectoriesToSearch, sizeof(trajectory)*trajCount, hipMemcpyHostToDevice)); // Copy image times checkCudaErrors(hipMemcpy(deviceImgTimes, imageTimes, sizeof(float)*imageCount, hipMemcpyHostToDevice)); // Copy interleaved buffer of psi and phi images checkCudaErrors(hipMemcpy(devicePsiPhi, interleavedPsiPhi, sizeof(float)*psiPhiSize, hipMemcpyHostToDevice)); //dim3 blocks(width,height); dim3 blocks(width/THREAD_DIM_X+1,height/THREAD_DIM_Y+1); dim3 threads(THREAD_DIM_X,THREAD_DIM_Y); // Launch Search hipLaunchKernelGGL(( searchImages), dim3(blocks), dim3(threads), 0, 0, trajCount, width, height, imageCount, devicePsiPhi, deviceTests, deviceSearchResults, deviceImgTimes); // Read back results checkCudaErrors(hipMemcpy(bestTrajects, deviceSearchResults, sizeof(trajectory)*resultsCount, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(deviceTests)); checkCudaErrors(hipFree(deviceImgTimes)); checkCudaErrors(hipFree(deviceSearchResults)); checkCudaErrors(hipFree(devicePsiPhi)); } } /* namespace kbmod */ #endif /* KERNELS_CU_ */
c86c31041d9a5c437704f26da3330b2034c6e43e.cu
/* * kernels.cu * * Created on: Jun 20, 2017 * Author: kbmod-usr */ #ifndef KERNELS_CU_ #define KERNELS_CU_ #include "common.h" #include "PointSpreadFunc.h" #include <helper_cuda.h> #include <stdio.h> namespace kbmod { /* * Device kernel that convolves the provided image with the psf */ __global__ void convolvePSF(int width, int height, float *sourceImage, float *resultImage, float *psf, int psfRad, int psfDim, float psfSum, float maskFlag) { // Find bounds of convolution area const int x = blockIdx.x*CONV_THREAD_DIM+threadIdx.x; const int y = blockIdx.y*CONV_THREAD_DIM+threadIdx.y; if (x < 0 || x > width-1 || y < 0 || y > height-1) return; const int minX = max(x-psfRad, 0); const int minY = max(y-psfRad, 0); const int maxX = min(x+psfRad, width-1); const int maxY = min(y+psfRad, height-1); // Read kernel float sum = 0.0; float psfPortion = 0.0; float center = sourceImage[y*width+x]; if (center != MASK_FLAG) { for (int j=minY; j<=maxY; j++) { // #pragma unroll for (int i=minX; i<=maxX; i++) { float currentPixel = sourceImage[j*width+i]; if (currentPixel != MASK_FLAG) { float currentPSF = psf[(j-minY)*psfDim+i-minX]; psfPortion += currentPSF; sum += currentPixel * currentPSF; } } } resultImage[y*width+x] = (sum*psfSum)/psfPortion; } else { // Leave masked pixel alone (these could be replaced here with zero) resultImage[y*width+x] = center; // 0.0 } } extern "C" void deviceConvolve(float *sourceImg, float *resultImg, int width, int height, PointSpreadFunc *PSF) { // Pointers to device memory // float *deviceKernel; float *deviceSourceImg; float *deviceResultImg; long pixelsPerImage = width*height; dim3 blocks(width/CONV_THREAD_DIM+1,height/CONV_THREAD_DIM+1); dim3 threads(CONV_THREAD_DIM,CONV_THREAD_DIM); // Allocate Device memory checkCudaErrors(cudaMalloc((void **)&deviceKernel, sizeof(float)*PSF->getSize())); checkCudaErrors(cudaMalloc((void **)&deviceSourceImg, sizeof(float)*pixelsPerImage)); checkCudaErrors(cudaMalloc((void **)&deviceResultImg, sizeof(float)*pixelsPerImage)); checkCudaErrors(cudaMemcpy(deviceKernel, PSF->kernelData(), sizeof(float)*PSF->getSize(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(deviceSourceImg, sourceImg, sizeof(float)*pixelsPerImage, cudaMemcpyHostToDevice)); convolvePSF<<<blocks, threads>>> (width, height, deviceSourceImg, deviceResultImg, deviceKernel, PSF->getRadius(), PSF->getDim(), PSF->getSum(), MASK_FLAG); checkCudaErrors(cudaMemcpy(resultImg, deviceResultImg, sizeof(float)*pixelsPerImage, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(deviceKernel)); checkCudaErrors(cudaFree(deviceSourceImg)); checkCudaErrors(cudaFree(deviceResultImg)); } /* * Searches through images (represented as a flat array of floats) looking for most likely * trajectories in the given list. Outputs a results image of best trajectories. Returns a * fixed number of results per pixel specified by RESULTS_PER_PIXEL */ __global__ void searchImages(int trajectoryCount, int width, int height, int imageCount, float *psiPhiImages, trajectory *trajectories, trajectory *results, float *imgTimes) { // Get trajectory origin const unsigned short x = blockIdx.x*THREAD_DIM_X+threadIdx.x; const unsigned short y = blockIdx.y*THREAD_DIM_Y+threadIdx.y; trajectory best[RESULTS_PER_PIXEL]; for (int r=0; r<RESULTS_PER_PIXEL; ++r) { best[r].lh = -1.0; } __shared__ float sImgTimes[256]; int idx = threadIdx.x*THREAD_DIM_X+threadIdx.y; if (idx<imageCount) sImgTimes[idx] = imgTimes[idx]; // Give up on any trajectories starting outside the image if (x >= width || y >= height) { return; } const unsigned int pixelsPerImage = width*height; // For each trajectory we'd like to search for (int t=0; t<trajectoryCount; ++t) { trajectory currentT; currentT.x = x; currentT.y = y; currentT.xVel = trajectories[t].xVel; currentT.yVel = trajectories[t].yVel; currentT.obsCount = 0; float psiSum = 0.0; float phiSum = 0.0; // Loop over each image and sample the appropriate pixel for (int i=0; i<imageCount; ++i) { float cTime = sImgTimes[i]; int currentX = x + int(currentT.xVel*cTime+0.5); int currentY = y + int(currentT.yVel*cTime+0.5); // Test if trajectory goes out of image bounds // Branching could be avoided here by setting a // black image border and clamping coordinates if (currentX >= width || currentY >= height || currentX < 0 || currentY < 0) { // Penalize trajctories that leave edge //psiSum += -0.1; continue; } unsigned int pixel = (pixelsPerImage*i + currentY*width + currentX); //float cPsi = psiPhiImages[pixel]; //float cPhi = psiPhiImages[pixel+1]; float2 cPsiPhi = reinterpret_cast<float2*>(psiPhiImages)[pixel]; if (cPsiPhi.x == MASK_FLAG) continue; currentT.obsCount++; psiSum += cPsiPhi.x;// < MASK_FLAG/2 /*== MASK_FLAG* / ? 0.0 : cPsiPhi.x;//min(cPsi,0.3); phiSum += cPsiPhi.y;// < MASK_FLAG/2 /*== MASK_FLAG* / ? 0.0 : cPsiPhi.y; //if (psiSum <= 0.0 && i>4) break; } // Just in case a phiSum is zero //phiSum += phiSum*1.0005+0.001; currentT.lh = psiSum/sqrt(phiSum); currentT.flux = /*2.0*fluxPix**/ psiSum/phiSum; trajectory temp; for (int r=0; r<RESULTS_PER_PIXEL; ++r) { if ( currentT.lh > best[r].lh ) { temp = best[r]; best[r] = currentT; currentT = temp; } } } for (int r=0; r<RESULTS_PER_PIXEL; ++r) { results[ (y*width + x)*RESULTS_PER_PIXEL + r ] = best[r]; } } __device__ float2 readPixel(float* img, int x, int y, int width, int height) { float2 p; int i = y*width+x; p.x = img[i]; p.y = img[i+1]; return p; } extern "C" void deviceSearch(int trajCount, int imageCount, int psiPhiSize, int resultsCount, trajectory * trajectoriesToSearch, trajectory *bestTrajects, float *imageTimes, float *interleavedPsiPhi, int width, int height) { // Allocate Device memory trajectory *deviceTests; float *deviceImgTimes; float *devicePsiPhi; trajectory *deviceSearchResults; checkCudaErrors(cudaMalloc((void **)&deviceTests, sizeof(trajectory)*trajCount)); checkCudaErrors(cudaMalloc((void **)&deviceImgTimes, sizeof(float)*imageCount)); checkCudaErrors(cudaMalloc((void **)&devicePsiPhi, sizeof(float)*psiPhiSize)); checkCudaErrors(cudaMalloc((void **)&deviceSearchResults, sizeof(trajectory)*resultsCount)); // Copy trajectories to search checkCudaErrors(cudaMemcpy(deviceTests, trajectoriesToSearch, sizeof(trajectory)*trajCount, cudaMemcpyHostToDevice)); // Copy image times checkCudaErrors(cudaMemcpy(deviceImgTimes, imageTimes, sizeof(float)*imageCount, cudaMemcpyHostToDevice)); // Copy interleaved buffer of psi and phi images checkCudaErrors(cudaMemcpy(devicePsiPhi, interleavedPsiPhi, sizeof(float)*psiPhiSize, cudaMemcpyHostToDevice)); //dim3 blocks(width,height); dim3 blocks(width/THREAD_DIM_X+1,height/THREAD_DIM_Y+1); dim3 threads(THREAD_DIM_X,THREAD_DIM_Y); // Launch Search searchImages<<<blocks, threads>>> (trajCount, width, height, imageCount, devicePsiPhi, deviceTests, deviceSearchResults, deviceImgTimes); // Read back results checkCudaErrors(cudaMemcpy(bestTrajects, deviceSearchResults, sizeof(trajectory)*resultsCount, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(deviceTests)); checkCudaErrors(cudaFree(deviceImgTimes)); checkCudaErrors(cudaFree(deviceSearchResults)); checkCudaErrors(cudaFree(devicePsiPhi)); } } /* namespace kbmod */ #endif /* KERNELS_CU_ */
d7fb75cf5e3cbddd108dfcfc4137b6b9ccca8461.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* ********************************* BLUEBOTTLE ********************************** ******************************************************************************* * * Copyright 2012 - 2016 Adam Sierakowski, The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ /* entrySearch * * An algorithm to find the minimum or maximum value of an array using a GPU. * This program searches an array of reals of arbitrary length and returns the * minimum or maximum value of the array. * * Adam J. Sierakowski, JHU/APL 2011 */ #include "entrySearch.h" #include <hip/hip_runtime.h> #define MAXTHREADS 128 #define MAXBLOCKS 64 /* A bitwise function to determine the maximum exponent x that satisfies the * inequality 2^x < n. */ int floorLog2(unsigned int n) { int pos = 0; if (n >= 1<<16) { n >>= 16; pos += 16; } if (n >= 1<< 8) { n >>= 8; pos += 8; } if (n >= 1<< 4) { n >>= 4; pos += 4; } if (n >= 1<< 2) { n >>= 2; pos += 2; } if (n >= 1<< 1) { pos += 1; } return ((n == 0) ? (-1) : pos); } /* A bitwise function to determine the minimum number n that satisfies the * inequality n > x, where n = 2^a for arbitrary a. */ unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* A function to determine the proper number of blocks and threads into which * the array should be split when parallelized on the GPU. */ void getNumBlocksAndThreads(int n, int &blocks, int &threads) { threads = (n < MAXTHREADS * 2) ? nextPow2((n + 1) / 2): MAXTHREADS; blocks = (n + threads * 2 - 1) / (threads * 2); } /* A function to create random input data on CPU for program testing. During * generation, the minimum value is recorded and returned for use in verifying * the GPU test result. */ void randArrGen(int size, real *arr, real* minmax) { srand(time(NULL)); for(int i=0; i<size; i++) { arr[i] = (rand() % size) - size / 2; if (arr[i] < minmax[0]) { minmax[0] = arr[i]; } if (arr[i] > minmax[1]) { minmax[1] = arr[i]; } } } /* The base function of the minimum search algorithm. */ real find_min(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_minarr = NULL; (hipMalloc((void**)&d_minarr, h_bytes)); gpumem += h_bytes; hipDeviceSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel hipLaunchKernelGGL(( entrySearch_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_iarr, d_minarr, size); hipDeviceSynchronize(); // if there was more than one block, re-run the kernel on the minimum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); hipLaunchKernelGGL(( entrySearch_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_minarr, d_minarr, size); hipDeviceSynchronize(); } // grab final answer real min; (hipMemcpy(&min, d_minarr, sizeof(real), hipMemcpyDeviceToHost)); (hipFree(d_minarr)); return min; } /* The base function of the maximum search algorithm. */ real find_max(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; (hipMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; hipDeviceSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel hipLaunchKernelGGL(( entrySearch_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_iarr, d_maxarr, size); hipDeviceSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); hipLaunchKernelGGL(( entrySearch_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_maxarr, d_maxarr, size); hipDeviceSynchronize(); } // grab final answer real max; (hipMemcpy(&max, d_maxarr, sizeof(real), hipMemcpyDeviceToHost)); (hipFree(d_maxarr)); return max; } /* The base function of the maximum search algorithm. */ int find_max_int(int size, int *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(int); int *d_maxarr = NULL; (hipMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; hipDeviceSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(int); // run kernel hipLaunchKernelGGL(( entrySearch_max_int_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_iarr, d_maxarr, size); hipDeviceSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); hipLaunchKernelGGL(( entrySearch_max_int_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_maxarr, d_maxarr, size); hipDeviceSynchronize(); } // grab final answer int max; (hipMemcpy(&max, d_maxarr, sizeof(int), hipMemcpyDeviceToHost)); (hipFree(d_maxarr)); return max; } /* The base function of the maximum search algorithm. */ real find_max_mag(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; (hipMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; hipDeviceSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel hipLaunchKernelGGL(( entrySearch_max_mag_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_iarr, d_maxarr, size); hipDeviceSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); hipLaunchKernelGGL(( entrySearch_max_mag_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_maxarr, d_maxarr, size); hipDeviceSynchronize(); } // grab final answer real max; (hipMemcpy(&max, d_maxarr, sizeof(real), hipMemcpyDeviceToHost)); (hipFree(d_maxarr)); return max; } /* The base function of the average algorithm. */ real avg_entries(int size, real *d_iarr) { int blocks = 0; int threads = 0; int size_in = size; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; (hipMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; hipDeviceSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel hipLaunchKernelGGL(( entrySearch_avg_entries_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_iarr, d_maxarr, size); hipDeviceSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); hipLaunchKernelGGL(( entrySearch_avg_entries_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_maxarr, d_maxarr, size); hipDeviceSynchronize(); } // grab final answer real max; (hipMemcpy(&max, d_maxarr, sizeof(real), hipMemcpyDeviceToHost)); (hipFree(d_maxarr)); return max / size_in; } /* The base function of the sum algorithm. */ real sum_entries(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; (hipMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; hipDeviceSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel hipLaunchKernelGGL(( entrySearch_avg_entries_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_iarr, d_maxarr, size); hipDeviceSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); hipLaunchKernelGGL(( entrySearch_avg_entries_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_maxarr, d_maxarr, size); hipDeviceSynchronize(); } // grab final answer real max; (hipMemcpy(&max, d_maxarr, sizeof(real), hipMemcpyDeviceToHost)); (hipFree(d_maxarr)); return max; } /* The main test function that creates a test array of random values and calls * find_min(...). It displays both the known result as maintained through the * CPU-generated array and the GPU test result. */ /*int main(int argc, char** argv) { hipDeviceProp_t deviceProp; deviceProp.major = 1; deviceProp.minor = 0; // force use of device number zero int dev = 0; (hipSetDevice(dev)); (hipGetDeviceProperties(&deviceProp, dev)); printf("\nUsing device %d: \"%s\"\n", dev, deviceProp.name); (hipSetDevice(dev)); // number of elements to reduce int size = pow(2, 23); printf("\nSearching %d randomly-generated elements", size); printf(" for the minimum value...\n"); // create random input data on CPU real* h_arr = (real*) malloc(size * sizeof(real)); cpumem += size * sizeof(real); real* minmax = (real*) malloc(2 * sizeof(real)); cpumem += 2 * sizeof(real); randArrGen(size, h_arr, minmax); // load host data to device int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(size, numBlocks, numThreads); unsigned int inbytes = size * sizeof(real); real* d_iarr = NULL; (hipMalloc((void**)&d_iarr, inbytes)); gpumem += in_bytes; (hipMemcpy(d_iarr, h_arr, inbytes, hipMemcpyHostToDevice)); // run test real gpu_result = 0; int numcount = 100; // the number of iterations to test // timing stuff hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); real elapsedTime; // run GPU test printf("\nComputing GPU result %d times...\n", numcount); hipEventRecord(start, 0); for(int count = 0; count < numcount; count++) { gpu_result = find_min(size, d_iarr); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // run CPU test printf("\nComputing CPU result %d times...\n", numcount); hipEventRecord(start, 0); real cpu_result = size * 2; for(int count = 0; count < numcount; count++) { for(int z = 0; z < size; z++) { if(h_arr[z] < cpu_result) { cpu_result = h_arr[z]; } } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // final minimum values printf("\nKnown result = %0.0f\n", minmax[0]); printf("CPU result = %0.0f\n", cpu_result); printf("GPU result = %0.0f\n", gpu_result); printf("\nSearching %d randomly-generated elements", size); printf(" for the maximum value...\n"); // run GPU test printf("\nComputing GPU result %d times...\n", numcount); hipEventRecord(start, 0); for(int count = 0; count < numcount; count++) { gpu_result = find_max(size, d_iarr); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // run CPU test printf("\nComputing CPU result %d times...\n", numcount); hipEventRecord(start, 0); cpu_result = -size * 2; for(int count = 0; count < numcount; count++) { for(int z = 0; z < size; z++) { if(h_arr[z] > cpu_result) { cpu_result = h_arr[z]; } } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // final maximum values printf("\nKnown result = %0.0f\n", minmax[1]); printf("CPU result = %0.0f\n", cpu_result); printf("GPU result = %0.0f\n", gpu_result); // clean up (hipFree(d_iarr)); free(h_arr); free(minmax); hipDeviceReset(); cutilExit(argc, argv); } */
d7fb75cf5e3cbddd108dfcfc4137b6b9ccca8461.cu
/******************************************************************************* ********************************* BLUEBOTTLE ********************************** ******************************************************************************* * * Copyright 2012 - 2016 Adam Sierakowski, The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ /* entrySearch * * An algorithm to find the minimum or maximum value of an array using a GPU. * This program searches an array of reals of arbitrary length and returns the * minimum or maximum value of the array. * * Adam J. Sierakowski, JHU/APL 2011 */ #include "entrySearch.h" #include <cuda.h> #define MAXTHREADS 128 #define MAXBLOCKS 64 /* A bitwise function to determine the maximum exponent x that satisfies the * inequality 2^x < n. */ int floorLog2(unsigned int n) { int pos = 0; if (n >= 1<<16) { n >>= 16; pos += 16; } if (n >= 1<< 8) { n >>= 8; pos += 8; } if (n >= 1<< 4) { n >>= 4; pos += 4; } if (n >= 1<< 2) { n >>= 2; pos += 2; } if (n >= 1<< 1) { pos += 1; } return ((n == 0) ? (-1) : pos); } /* A bitwise function to determine the minimum number n that satisfies the * inequality n > x, where n = 2^a for arbitrary a. */ unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* A function to determine the proper number of blocks and threads into which * the array should be split when parallelized on the GPU. */ void getNumBlocksAndThreads(int n, int &blocks, int &threads) { threads = (n < MAXTHREADS * 2) ? nextPow2((n + 1) / 2): MAXTHREADS; blocks = (n + threads * 2 - 1) / (threads * 2); } /* A function to create random input data on CPU for program testing. During * generation, the minimum value is recorded and returned for use in verifying * the GPU test result. */ void randArrGen(int size, real *arr, real* minmax) { srand(time(NULL)); for(int i=0; i<size; i++) { arr[i] = (rand() % size) - size / 2; if (arr[i] < minmax[0]) { minmax[0] = arr[i]; } if (arr[i] > minmax[1]) { minmax[1] = arr[i]; } } } /* The base function of the minimum search algorithm. */ real find_min(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_minarr = NULL; (cudaMalloc((void**)&d_minarr, h_bytes)); gpumem += h_bytes; cudaThreadSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel entrySearch_min_kernel<<<dimGrid, dimBlock, smemSize>>>(d_iarr, d_minarr, size); cudaThreadSynchronize(); // if there was more than one block, re-run the kernel on the minimum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); entrySearch_min_kernel<<<dimGrid, dimBlock, smemSize>>>(d_minarr, d_minarr, size); cudaThreadSynchronize(); } // grab final answer real min; (cudaMemcpy(&min, d_minarr, sizeof(real), cudaMemcpyDeviceToHost)); (cudaFree(d_minarr)); return min; } /* The base function of the maximum search algorithm. */ real find_max(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; (cudaMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; cudaThreadSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel entrySearch_max_kernel<<<dimGrid, dimBlock, smemSize>>>(d_iarr, d_maxarr, size); cudaThreadSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); entrySearch_max_kernel<<<dimGrid, dimBlock, smemSize>>>(d_maxarr, d_maxarr, size); cudaThreadSynchronize(); } // grab final answer real max; (cudaMemcpy(&max, d_maxarr, sizeof(real), cudaMemcpyDeviceToHost)); (cudaFree(d_maxarr)); return max; } /* The base function of the maximum search algorithm. */ int find_max_int(int size, int *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(int); int *d_maxarr = NULL; (cudaMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; cudaThreadSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(int); // run kernel entrySearch_max_int_kernel<<<dimGrid, dimBlock, smemSize>>>(d_iarr, d_maxarr, size); cudaThreadSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); entrySearch_max_int_kernel<<<dimGrid, dimBlock, smemSize>>>(d_maxarr, d_maxarr, size); cudaThreadSynchronize(); } // grab final answer int max; (cudaMemcpy(&max, d_maxarr, sizeof(int), cudaMemcpyDeviceToHost)); (cudaFree(d_maxarr)); return max; } /* The base function of the maximum search algorithm. */ real find_max_mag(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; (cudaMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; cudaThreadSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel entrySearch_max_mag_kernel<<<dimGrid, dimBlock, smemSize>>>(d_iarr, d_maxarr, size); cudaThreadSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); entrySearch_max_mag_kernel<<<dimGrid, dimBlock, smemSize>>>(d_maxarr, d_maxarr, size); cudaThreadSynchronize(); } // grab final answer real max; (cudaMemcpy(&max, d_maxarr, sizeof(real), cudaMemcpyDeviceToHost)); (cudaFree(d_maxarr)); return max; } /* The base function of the average algorithm. */ real avg_entries(int size, real *d_iarr) { int blocks = 0; int threads = 0; int size_in = size; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; (cudaMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; cudaThreadSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel entrySearch_avg_entries_kernel<<<dimGrid, dimBlock, smemSize>>>(d_iarr, d_maxarr, size); cudaThreadSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); entrySearch_avg_entries_kernel<<<dimGrid, dimBlock, smemSize>>>(d_maxarr, d_maxarr, size); cudaThreadSynchronize(); } // grab final answer real max; (cudaMemcpy(&max, d_maxarr, sizeof(real), cudaMemcpyDeviceToHost)); (cudaFree(d_maxarr)); return max / size_in; } /* The base function of the sum algorithm. */ real sum_entries(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; (cudaMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; cudaThreadSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel entrySearch_avg_entries_kernel<<<dimGrid, dimBlock, smemSize>>>(d_iarr, d_maxarr, size); cudaThreadSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); entrySearch_avg_entries_kernel<<<dimGrid, dimBlock, smemSize>>>(d_maxarr, d_maxarr, size); cudaThreadSynchronize(); } // grab final answer real max; (cudaMemcpy(&max, d_maxarr, sizeof(real), cudaMemcpyDeviceToHost)); (cudaFree(d_maxarr)); return max; } /* The main test function that creates a test array of random values and calls * find_min(...). It displays both the known result as maintained through the * CPU-generated array and the GPU test result. */ /*int main(int argc, char** argv) { cudaDeviceProp deviceProp; deviceProp.major = 1; deviceProp.minor = 0; // force use of device number zero int dev = 0; (cudaSetDevice(dev)); (cudaGetDeviceProperties(&deviceProp, dev)); printf("\nUsing device %d: \"%s\"\n", dev, deviceProp.name); (cudaSetDevice(dev)); // number of elements to reduce int size = pow(2, 23); printf("\nSearching %d randomly-generated elements", size); printf(" for the minimum value...\n"); // create random input data on CPU real* h_arr = (real*) malloc(size * sizeof(real)); cpumem += size * sizeof(real); real* minmax = (real*) malloc(2 * sizeof(real)); cpumem += 2 * sizeof(real); randArrGen(size, h_arr, minmax); // load host data to device int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(size, numBlocks, numThreads); unsigned int inbytes = size * sizeof(real); real* d_iarr = NULL; (cudaMalloc((void**)&d_iarr, inbytes)); gpumem += in_bytes; (cudaMemcpy(d_iarr, h_arr, inbytes, cudaMemcpyHostToDevice)); // run test real gpu_result = 0; int numcount = 100; // the number of iterations to test // timing stuff cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); real elapsedTime; // run GPU test printf("\nComputing GPU result %d times...\n", numcount); cudaEventRecord(start, 0); for(int count = 0; count < numcount; count++) { gpu_result = find_min(size, d_iarr); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // run CPU test printf("\nComputing CPU result %d times...\n", numcount); cudaEventRecord(start, 0); real cpu_result = size * 2; for(int count = 0; count < numcount; count++) { for(int z = 0; z < size; z++) { if(h_arr[z] < cpu_result) { cpu_result = h_arr[z]; } } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // final minimum values printf("\nKnown result = %0.0f\n", minmax[0]); printf("CPU result = %0.0f\n", cpu_result); printf("GPU result = %0.0f\n", gpu_result); printf("\nSearching %d randomly-generated elements", size); printf(" for the maximum value...\n"); // run GPU test printf("\nComputing GPU result %d times...\n", numcount); cudaEventRecord(start, 0); for(int count = 0; count < numcount; count++) { gpu_result = find_max(size, d_iarr); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // run CPU test printf("\nComputing CPU result %d times...\n", numcount); cudaEventRecord(start, 0); cpu_result = -size * 2; for(int count = 0; count < numcount; count++) { for(int z = 0; z < size; z++) { if(h_arr[z] > cpu_result) { cpu_result = h_arr[z]; } } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // final maximum values printf("\nKnown result = %0.0f\n", minmax[1]); printf("CPU result = %0.0f\n", cpu_result); printf("GPU result = %0.0f\n", gpu_result); // clean up (cudaFree(d_iarr)); free(h_arr); free(minmax); cudaThreadExit(); cutilExit(argc, argv); } */
1ecde0812a330bfc79bc1a05790a6c2ca6af8ca1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda.h> #include<bits/stdc++.h> #include<hiprand/hiprand.h> #include<hiprand/hiprand_kernel.h> #define PS 500 #define SUB_PS 4 #define CITIES 48 #define CROSS_GLOBALTEACHER 0 #define CROSS_LOCALTEACHER 1 #define CROSS_MEAN 2 #define CROSS_LOCALMEAN 3 using namespace std; __device__ volatile int *best_sol; __device__ volatile int best_sol_dis; __device__ volatile unsigned int var = PS; __device__ volatile unsigned int itr = PS; __global__ void setup_kernel(hiprandState_t *state) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; hiprand_init(1234, id, 0, &state[id]); } __global__ void tlboKernel(int *gpupopulation, int *gpuDistanceMat, int numberOfCities, hiprandState_t *state, int CYCLES) { __shared__ int subPop[SUB_PS][CITIES]; __shared__ int fitness[SUB_PS]; __shared__ int mean[CITIES]; __shared__ int block_teacher[CITIES]; __shared__ int block_teacher_dis; int tempA[CITIES], tempB[CITIES], tempC[CITIES]; int count[CITIES]; int vresult[CITIES]; unsigned id = threadIdx.x + blockIdx.x * blockDim.x; for(int j = 0; j < CITIES ; j++) { subPop[threadIdx.x][j] = gpupopulation[id * CITIES + j]; } //Initialize best solution if(blockIdx.x == 0 && threadIdx.x == 0) { best_sol_dis = INT_MAX; best_sol = (volatile int*)malloc(CITIES*sizeof( volatile int)); } if(threadIdx.x == 0) { block_teacher_dis = INT_MAX; } //Calculate fitness int dis = 0; for(int i = 0; i < CITIES-1 ; i++) { dis += gpuDistanceMat[subPop[threadIdx.x][i] * CITIES + subPop[threadIdx.x][i+1]]; } dis += gpuDistanceMat[subPop[threadIdx.x][CITIES-1] * CITIES + subPop[threadIdx.x][0]]; fitness[threadIdx.x] = dis; //Global Teacher atomicMin((int*)&best_sol_dis, fitness[threadIdx.x]); if(threadIdx.x == 0 && 0) { printf("best sol = %d\n", best_sol_dis); } //Subpopulation Teacher int old = atomicMin(&block_teacher_dis, fitness[threadIdx.x]); if( old != block_teacher_dis ) { for(int i = 0; i < CITIES; i++) { block_teacher[i] = subPop[threadIdx.x][i]; } } //PUT BARRIER HERE atomicAdd((int*)&var, -1); while( var != 0 ); if(threadIdx.x == 0 && best_sol_dis == block_teacher_dis) { printf("%d\n", best_sol_dis); for(int i = 0; i < CITIES; i++) { best_sol[i] = block_teacher[i]; } var = PS; } if(threadIdx.x == 0 ) { printf("Block = %d : Block Teacher = %d, Global teacher = %d\n",blockIdx.x, block_teacher_dis, best_sol_dis); } for(int c = 0; c < CYCLES; c++) { // TEACHER PHASE //1. Calculate Mean if(blockIdx.x == 0 && threadIdx.x == 0 ) { itr = PS; } while( itr != PS ); memset(mean, 0, CITIES*sizeof(int)); for(int j = 0; j < CITIES; j++) { atomicAdd(&mean[j], subPop[threadIdx.x][j]); } __syncthreads(); if(threadIdx.x == 0 ) { for(int j = 0; j < CITIES; j++) { mean[j] = mean[j]/SUB_PS; } //viability_op(mean); memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { count[mean[i]]++; } for(int i = CITIES-1; i >= 0; i--) { if(count[mean[i]] > 1) { tempA[i] = mean[i]; count[mean[i]] = -1; } if(count[mean[i]] == 1) { tempC[i] = mean[i]; count[mean[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int i = 0; while(i < CITIES) { vresult[i] = tempA[i]; if(vresult[i] == -1) { vresult[i] = tempC[i]; } i++; } int j = 0; i = 0; while( i < CITIES) { if(tempB[i] == -1) { i++; } else { if(vresult[j] == -1) { vresult[j] = tempB[i]; j++; i++; } else { j++; } } } for(int i = 0; i < CITIES; i++) mean[i] = vresult[i]; } //2. Teacher Iteration int *newA; int *C; int crossleft, crossright; float randf = hiprand_uniform(&state[threadIdx.x]); int newA_dis = 0;; int *result; __syncthreads(); randf = hiprand_uniform(&state[threadIdx.x]); int crossType = ((int)(randf*100))%4; switch(crossType) { case CROSS_GLOBALTEACHER: { //2.1 CROSSOVER newA = (int*)malloc(CITIES*sizeof(int)); for(int j = 0; j < CITIES; j++) newA[j] = subPop[threadIdx.x][j]; crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j=crossleft;j <= crossright;j++) newA[j] = (int)best_sol[j]; memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { count[newA[i]]++; } for(int i = CITIES-1; i >= 0; i--) { if(count[newA[i]] > 1) { tempA[i] = newA[i]; count[newA[i]] = -1; } if(count[newA[i]] == 1) { tempC[i] = newA[i]; count[newA[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int i = 0; while(i < CITIES) { vresult[i] = tempA[i]; if(vresult[i] == -1) { vresult[i] = tempC[i]; } i++; } int j = 0; i = 0; while( i < CITIES) { if(tempB[i] == -1) { i++; } else { if(vresult[j] == -1) { vresult[j] = tempB[i]; j++; i++; } else { j++; } } } for(int i = 0; i < CITIES; i++) { newA[i] = vresult[i]; } C = newA; //2.2 MUTATION result = (int*)malloc(CITIES*sizeof(int)); randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j = 0; j < CITIES; j++) { result[j] = C[j]; } for(int i=crossleft,j=crossright;i<=crossright&&j>=crossleft;i++,j--) { result[i]=C[j]; } for(int i = 0; i < CITIES; i++) { newA[i] = result[i]; } break; } case CROSS_LOCALTEACHER: { //2.1 CROSSOVER newA = (int*)malloc(CITIES*sizeof(int)); for(int j = 0; j < CITIES; j++) newA[j] = subPop[threadIdx.x][j]; crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j=crossleft;j <= crossright;j++) newA[j] = block_teacher[j]; memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { count[newA[i]]++; } for(int i = CITIES-1; i >= 0; i--) { if(count[newA[i]] > 1) { tempA[i] = newA[i]; count[newA[i]] = -1; } if(count[newA[i]] == 1) { tempC[i] = newA[i]; count[newA[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int i = 0; while(i < CITIES) { vresult[i] = tempA[i]; if(vresult[i] == -1) { vresult[i] = tempC[i]; } i++; } int j = 0; i = 0; while( i < CITIES) { if(tempB[i] == -1) { i++; } else { if(vresult[j] == -1) { vresult[j] = tempB[i]; j++; i++; } else { j++; } } } for(int i = 0; i < CITIES; i++) { newA[i] = vresult[i]; } C = newA; //2.2 MUTATION result = (int*)malloc(CITIES*sizeof(int)); randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j = 0; j < CITIES; j++) { result[j] = C[j]; } for(int i=crossleft,j=crossright;i<=crossright&&j>=crossleft;i++,j--) { result[i]=C[j]; } for(int i = 0; i < CITIES; i++) { newA[i] = result[i]; } break; } case CROSS_MEAN: { //2.1 CROSSOVER newA = (int*)malloc(CITIES*sizeof(int)); for(int j = 0; j < CITIES; j++) newA[j] = subPop[threadIdx.x][j]; crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j=crossleft;j <= crossright;j++) newA[j] = mean[j]; memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { count[newA[i]]++; } for(int i = CITIES-1; i >= 0; i--) { if(count[newA[i]] > 1) { tempA[i] = newA[i]; count[newA[i]] = -1; } if(count[newA[i]] == 1) { tempC[i] = newA[i]; count[newA[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int i = 0; while(i < CITIES) { vresult[i] = tempA[i]; if(vresult[i] == -1) { vresult[i] = tempC[i]; } i++; } int j = 0; i = 0; while( i < CITIES) { if(tempB[i] == -1) { i++; } else { if(vresult[j] == -1) { vresult[j] = tempB[i]; j++; i++; } else { j++; } } } for(int i = 0; i < CITIES; i++) { newA[i] = vresult[i]; } C = newA; //2.2 MUTATION result = (int*)malloc(CITIES*sizeof(int)); randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j = 0; j < CITIES; j++) { result[j] = C[j]; } for(int i=crossleft,j=crossright;i<=crossright&&j>=crossleft;i++,j--) { result[i]=C[j]; } for(int i = 0; i < CITIES; i++) { newA[i] = result[i]; } break; } case CROSS_LOCALMEAN: { //2.1 CROSSOVER newA = (int*)malloc(CITIES*sizeof(int)); for(int j = 0; j < CITIES; j++) newA[j] = mean[j]; crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j=crossleft;j <= crossright;j++) newA[j] = block_teacher[j]; memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { count[newA[i]]++; } for(int i = CITIES-1; i >= 0; i--) { if(count[newA[i]] > 1) { tempA[i] = newA[i]; count[newA[i]] = -1; } if(count[newA[i]] == 1) { tempC[i] = newA[i]; count[newA[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int i = 0; while(i < CITIES) { vresult[i] = tempA[i]; if(vresult[i] == -1) { vresult[i] = tempC[i]; } i++; } int j = 0; i = 0; while( i < CITIES) { if(tempB[i] == -1) { i++; } else { if(vresult[j] == -1) { vresult[j] = tempB[i]; j++; i++; } else { j++; } } } for(int i = 0; i < CITIES; i++) { newA[i] = vresult[i]; } C = newA; //2.2 MUTATION result = (int*)malloc(CITIES*sizeof(int)); randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j = 0; j < CITIES; j++) { result[j] = C[j]; } for(int i=crossleft,j=crossright;i<=crossright&&j>=crossleft;i++,j--) { result[i]=C[j]; } for(int i = 0; i < CITIES; i++) { newA[i] = result[i]; } break; } } for(int i = 0; i < CITIES-1; i++) newA_dis += gpuDistanceMat[newA[i] * CITIES + newA[i+1]]; newA_dis += gpuDistanceMat[newA[CITIES-1] * CITIES + newA[0]]; if(fitness[threadIdx.x] > newA_dis) { fitness[threadIdx.x] = newA_dis; for(int i = 0; i < CITIES; i++) subPop[threadIdx.x][i] = newA[i]; } //LEARNER PHASE int randomK = -1; for(int i = 0; i < 4; i++) { if(i != threadIdx.x && fitness[i] <= fitness[threadIdx.x]) { randomK = i; break; } } if(randomK == -1) { randomK = threadIdx.x; } for(int i = 0; i < CITIES; i++) newA[i] = subPop[threadIdx.x][i]; randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j=crossleft;j <= crossright;j++) newA[j] = subPop[randomK][j]; int tempA[CITIES], tempB[CITIES], tempC[CITIES]; memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); int count[CITIES]; memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) count[newA[i]]++; for(int i = CITIES-1; i >= 0; i--) { if(count[newA[i]] > 1) { tempA[i] = newA[i]; count[newA[i]] = -1; } if(count[newA[i]] == 1) { tempC[i] = newA[i]; count[newA[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int result2[CITIES]; int i = 0; while(i < CITIES) { result2[i] = tempA[i]; if(result2[i] == -1) { result2[i] = tempC[i]; } i++; } int j = 0, k = 0; while( k < CITIES && j < CITIES) { if(tempB[k] == -1) k++; else { if(result2[j] == -1) { result2[j] = tempB[k]; j++; k++; } else { j++; } } } for(int j = 0; j < CITIES; j++) newA[j] = result2[j]; //2.2 MUTATION randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = hiprand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = hiprand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } int CC[CITIES]; for(int j = 0; j < CITIES; j++) CC[j] = newA[j]; for(int j = 0; j < CITIES; j++) { result[j] = CC[j]; } for(int i=crossleft,j=crossright;i<=crossright&&j>=crossleft;i++,j--) { result[i]=CC[j]; } for(int i = 0; i < CITIES; i++) newA[i] = result[i]; newA_dis = 0; for(int i = 0; i < CITIES-1; i++) newA_dis += gpuDistanceMat[newA[i] * CITIES + newA[i+1]]; newA_dis += gpuDistanceMat[newA[CITIES-1] * CITIES + newA[0]]; if(fitness[threadIdx.x] > newA_dis) { fitness[threadIdx.x] = newA_dis; for(int i = 0; i < CITIES; i++) subPop[threadIdx.x][i] = newA[i]; } atomicMin((int*)&best_sol_dis, fitness[threadIdx.x]); old = atomicMin(&block_teacher_dis, fitness[threadIdx.x]); if( old != block_teacher_dis ) { for(int i = 0; i < CITIES; i++) block_teacher[i] = subPop[threadIdx.x][i]; } //PUT BARRIER HERE atomicAdd((int*)&var, -1); while( var != 0 ); if(threadIdx.x == 0 && best_sol_dis == block_teacher_dis) { for(int i = 0; i < CITIES; i++) best_sol[i] = block_teacher[i]; var = PS; } atomicAdd((int*)&itr, -1); while(itr != 0); free(result); free(newA); } if(blockIdx.x == 0 && threadIdx.x == 0) { printf("Best Solution :: %d\n", best_sol_dis); for(int i = 0; i < CITIES; i++) printf("%d ", best_sol[i]); printf("\n"); } } void createPopulation(int *population) { for(int j = 0; j < PS; j++) { int count[CITIES]; memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { int ind = rand()%CITIES; while(count[ind]) { ind = rand()%CITIES; } population[j*CITIES+i] = ind; count[ind] = 1; } } } int main(int argc, char **argv) { srand(time(NULL)); FILE *input; input = fopen(argv[1], "r"); if(input == NULL) { printf("error: failed to open input file\n"); return 0; } int CYCLES; sscanf(argv[2], "%d", &CYCLES); printf("Number of iterations of DTLBO = %d\n", CYCLES); hiprandState_t *d_state; hipMalloc(&d_state, sizeof(hiprandState_t)); int numberOfCities; vector<pair<float, float> > points; fscanf(input, "%d", &numberOfCities); printf("Number of Cities = %d\n", numberOfCities); for(int i = 0; i < numberOfCities; i++) { float x , y; fscanf(input, "%f", &x); fscanf(input, "%f", &y); points.push_back(make_pair(x, y)); } int *distanceMat ; hipHostMalloc(&distanceMat, numberOfCities*numberOfCities*sizeof(int), hipHostMallocMapped); for(int i = 0; i < numberOfCities; i++) { for(int j = 0; j < numberOfCities; j++) { float ed; float x = (points[j].first - points[i].first)*(points[j].first - points[i].first); float y = (points[j].second - points[i].second)*(points[j].second - points[i].second); ed = sqrt(x+y); distanceMat[i*numberOfCities+j] = floor(ed); } } printf("Generated distance matrix successfully...\n"); int noOfBlocks = ceil((float)PS/SUB_PS); int *population; hipHostMalloc(&population, PS*CITIES*sizeof(int), hipHostMallocMapped); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; hipEventRecord(start,0); createPopulation(population); printf("Generated random population...\n"); printf("Starting kernel now...Please wait\n"); hipLaunchKernelGGL(( setup_kernel), dim3(noOfBlocks),dim3(SUB_PS), 0, 0, d_state); hipLaunchKernelGGL(( tlboKernel), dim3(noOfBlocks), dim3(SUB_PS), 0, 0, population, distanceMat, numberOfCities, d_state, CYCLES); hipDeviceSynchronize(); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("Time taken by function to execute is: %.6f ms\n", milliseconds); return 0; }
1ecde0812a330bfc79bc1a05790a6c2ca6af8ca1.cu
#include<cuda.h> #include<bits/stdc++.h> #include<curand.h> #include<curand_kernel.h> #define PS 500 #define SUB_PS 4 #define CITIES 48 #define CROSS_GLOBALTEACHER 0 #define CROSS_LOCALTEACHER 1 #define CROSS_MEAN 2 #define CROSS_LOCALMEAN 3 using namespace std; __device__ volatile int *best_sol; __device__ volatile int best_sol_dis; __device__ volatile unsigned int var = PS; __device__ volatile unsigned int itr = PS; __global__ void setup_kernel(curandState *state) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; curand_init(1234, id, 0, &state[id]); } __global__ void tlboKernel(int *gpupopulation, int *gpuDistanceMat, int numberOfCities, curandState *state, int CYCLES) { __shared__ int subPop[SUB_PS][CITIES]; __shared__ int fitness[SUB_PS]; __shared__ int mean[CITIES]; __shared__ int block_teacher[CITIES]; __shared__ int block_teacher_dis; int tempA[CITIES], tempB[CITIES], tempC[CITIES]; int count[CITIES]; int vresult[CITIES]; unsigned id = threadIdx.x + blockIdx.x * blockDim.x; for(int j = 0; j < CITIES ; j++) { subPop[threadIdx.x][j] = gpupopulation[id * CITIES + j]; } //Initialize best solution if(blockIdx.x == 0 && threadIdx.x == 0) { best_sol_dis = INT_MAX; best_sol = (volatile int*)malloc(CITIES*sizeof( volatile int)); } if(threadIdx.x == 0) { block_teacher_dis = INT_MAX; } //Calculate fitness int dis = 0; for(int i = 0; i < CITIES-1 ; i++) { dis += gpuDistanceMat[subPop[threadIdx.x][i] * CITIES + subPop[threadIdx.x][i+1]]; } dis += gpuDistanceMat[subPop[threadIdx.x][CITIES-1] * CITIES + subPop[threadIdx.x][0]]; fitness[threadIdx.x] = dis; //Global Teacher atomicMin((int*)&best_sol_dis, fitness[threadIdx.x]); if(threadIdx.x == 0 && 0) { printf("best sol = %d\n", best_sol_dis); } //Subpopulation Teacher int old = atomicMin(&block_teacher_dis, fitness[threadIdx.x]); if( old != block_teacher_dis ) { for(int i = 0; i < CITIES; i++) { block_teacher[i] = subPop[threadIdx.x][i]; } } //PUT BARRIER HERE atomicAdd((int*)&var, -1); while( var != 0 ); if(threadIdx.x == 0 && best_sol_dis == block_teacher_dis) { printf("%d\n", best_sol_dis); for(int i = 0; i < CITIES; i++) { best_sol[i] = block_teacher[i]; } var = PS; } if(threadIdx.x == 0 ) { printf("Block = %d : Block Teacher = %d, Global teacher = %d\n",blockIdx.x, block_teacher_dis, best_sol_dis); } for(int c = 0; c < CYCLES; c++) { // TEACHER PHASE //1. Calculate Mean if(blockIdx.x == 0 && threadIdx.x == 0 ) { itr = PS; } while( itr != PS ); memset(mean, 0, CITIES*sizeof(int)); for(int j = 0; j < CITIES; j++) { atomicAdd(&mean[j], subPop[threadIdx.x][j]); } __syncthreads(); if(threadIdx.x == 0 ) { for(int j = 0; j < CITIES; j++) { mean[j] = mean[j]/SUB_PS; } //viability_op(mean); memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { count[mean[i]]++; } for(int i = CITIES-1; i >= 0; i--) { if(count[mean[i]] > 1) { tempA[i] = mean[i]; count[mean[i]] = -1; } if(count[mean[i]] == 1) { tempC[i] = mean[i]; count[mean[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int i = 0; while(i < CITIES) { vresult[i] = tempA[i]; if(vresult[i] == -1) { vresult[i] = tempC[i]; } i++; } int j = 0; i = 0; while( i < CITIES) { if(tempB[i] == -1) { i++; } else { if(vresult[j] == -1) { vresult[j] = tempB[i]; j++; i++; } else { j++; } } } for(int i = 0; i < CITIES; i++) mean[i] = vresult[i]; } //2. Teacher Iteration int *newA; int *C; int crossleft, crossright; float randf = curand_uniform(&state[threadIdx.x]); int newA_dis = 0;; int *result; __syncthreads(); randf = curand_uniform(&state[threadIdx.x]); int crossType = ((int)(randf*100))%4; switch(crossType) { case CROSS_GLOBALTEACHER: { //2.1 CROSSOVER newA = (int*)malloc(CITIES*sizeof(int)); for(int j = 0; j < CITIES; j++) newA[j] = subPop[threadIdx.x][j]; crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j=crossleft;j <= crossright;j++) newA[j] = (int)best_sol[j]; memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { count[newA[i]]++; } for(int i = CITIES-1; i >= 0; i--) { if(count[newA[i]] > 1) { tempA[i] = newA[i]; count[newA[i]] = -1; } if(count[newA[i]] == 1) { tempC[i] = newA[i]; count[newA[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int i = 0; while(i < CITIES) { vresult[i] = tempA[i]; if(vresult[i] == -1) { vresult[i] = tempC[i]; } i++; } int j = 0; i = 0; while( i < CITIES) { if(tempB[i] == -1) { i++; } else { if(vresult[j] == -1) { vresult[j] = tempB[i]; j++; i++; } else { j++; } } } for(int i = 0; i < CITIES; i++) { newA[i] = vresult[i]; } C = newA; //2.2 MUTATION result = (int*)malloc(CITIES*sizeof(int)); randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j = 0; j < CITIES; j++) { result[j] = C[j]; } for(int i=crossleft,j=crossright;i<=crossright&&j>=crossleft;i++,j--) { result[i]=C[j]; } for(int i = 0; i < CITIES; i++) { newA[i] = result[i]; } break; } case CROSS_LOCALTEACHER: { //2.1 CROSSOVER newA = (int*)malloc(CITIES*sizeof(int)); for(int j = 0; j < CITIES; j++) newA[j] = subPop[threadIdx.x][j]; crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j=crossleft;j <= crossright;j++) newA[j] = block_teacher[j]; memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { count[newA[i]]++; } for(int i = CITIES-1; i >= 0; i--) { if(count[newA[i]] > 1) { tempA[i] = newA[i]; count[newA[i]] = -1; } if(count[newA[i]] == 1) { tempC[i] = newA[i]; count[newA[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int i = 0; while(i < CITIES) { vresult[i] = tempA[i]; if(vresult[i] == -1) { vresult[i] = tempC[i]; } i++; } int j = 0; i = 0; while( i < CITIES) { if(tempB[i] == -1) { i++; } else { if(vresult[j] == -1) { vresult[j] = tempB[i]; j++; i++; } else { j++; } } } for(int i = 0; i < CITIES; i++) { newA[i] = vresult[i]; } C = newA; //2.2 MUTATION result = (int*)malloc(CITIES*sizeof(int)); randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j = 0; j < CITIES; j++) { result[j] = C[j]; } for(int i=crossleft,j=crossright;i<=crossright&&j>=crossleft;i++,j--) { result[i]=C[j]; } for(int i = 0; i < CITIES; i++) { newA[i] = result[i]; } break; } case CROSS_MEAN: { //2.1 CROSSOVER newA = (int*)malloc(CITIES*sizeof(int)); for(int j = 0; j < CITIES; j++) newA[j] = subPop[threadIdx.x][j]; crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j=crossleft;j <= crossright;j++) newA[j] = mean[j]; memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { count[newA[i]]++; } for(int i = CITIES-1; i >= 0; i--) { if(count[newA[i]] > 1) { tempA[i] = newA[i]; count[newA[i]] = -1; } if(count[newA[i]] == 1) { tempC[i] = newA[i]; count[newA[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int i = 0; while(i < CITIES) { vresult[i] = tempA[i]; if(vresult[i] == -1) { vresult[i] = tempC[i]; } i++; } int j = 0; i = 0; while( i < CITIES) { if(tempB[i] == -1) { i++; } else { if(vresult[j] == -1) { vresult[j] = tempB[i]; j++; i++; } else { j++; } } } for(int i = 0; i < CITIES; i++) { newA[i] = vresult[i]; } C = newA; //2.2 MUTATION result = (int*)malloc(CITIES*sizeof(int)); randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j = 0; j < CITIES; j++) { result[j] = C[j]; } for(int i=crossleft,j=crossright;i<=crossright&&j>=crossleft;i++,j--) { result[i]=C[j]; } for(int i = 0; i < CITIES; i++) { newA[i] = result[i]; } break; } case CROSS_LOCALMEAN: { //2.1 CROSSOVER newA = (int*)malloc(CITIES*sizeof(int)); for(int j = 0; j < CITIES; j++) newA[j] = mean[j]; crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j=crossleft;j <= crossright;j++) newA[j] = block_teacher[j]; memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { count[newA[i]]++; } for(int i = CITIES-1; i >= 0; i--) { if(count[newA[i]] > 1) { tempA[i] = newA[i]; count[newA[i]] = -1; } if(count[newA[i]] == 1) { tempC[i] = newA[i]; count[newA[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int i = 0; while(i < CITIES) { vresult[i] = tempA[i]; if(vresult[i] == -1) { vresult[i] = tempC[i]; } i++; } int j = 0; i = 0; while( i < CITIES) { if(tempB[i] == -1) { i++; } else { if(vresult[j] == -1) { vresult[j] = tempB[i]; j++; i++; } else { j++; } } } for(int i = 0; i < CITIES; i++) { newA[i] = vresult[i]; } C = newA; //2.2 MUTATION result = (int*)malloc(CITIES*sizeof(int)); randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j = 0; j < CITIES; j++) { result[j] = C[j]; } for(int i=crossleft,j=crossright;i<=crossright&&j>=crossleft;i++,j--) { result[i]=C[j]; } for(int i = 0; i < CITIES; i++) { newA[i] = result[i]; } break; } } for(int i = 0; i < CITIES-1; i++) newA_dis += gpuDistanceMat[newA[i] * CITIES + newA[i+1]]; newA_dis += gpuDistanceMat[newA[CITIES-1] * CITIES + newA[0]]; if(fitness[threadIdx.x] > newA_dis) { fitness[threadIdx.x] = newA_dis; for(int i = 0; i < CITIES; i++) subPop[threadIdx.x][i] = newA[i]; } //LEARNER PHASE int randomK = -1; for(int i = 0; i < 4; i++) { if(i != threadIdx.x && fitness[i] <= fitness[threadIdx.x]) { randomK = i; break; } } if(randomK == -1) { randomK = threadIdx.x; } for(int i = 0; i < CITIES; i++) newA[i] = subPop[threadIdx.x][i]; randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } for(int j=crossleft;j <= crossright;j++) newA[j] = subPop[randomK][j]; int tempA[CITIES], tempB[CITIES], tempC[CITIES]; memset(tempA, -1, CITIES*sizeof(int)); memset(tempB, -1, CITIES*sizeof(int)); memset(tempC, -1, CITIES*sizeof(int)); int count[CITIES]; memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) count[newA[i]]++; for(int i = CITIES-1; i >= 0; i--) { if(count[newA[i]] > 1) { tempA[i] = newA[i]; count[newA[i]] = -1; } if(count[newA[i]] == 1) { tempC[i] = newA[i]; count[newA[i]] = -1; } } for(int i = 0; i < CITIES; i++) { if(count[i] == 0) { tempB[i] = i; } } int result2[CITIES]; int i = 0; while(i < CITIES) { result2[i] = tempA[i]; if(result2[i] == -1) { result2[i] = tempC[i]; } i++; } int j = 0, k = 0; while( k < CITIES && j < CITIES) { if(tempB[k] == -1) k++; else { if(result2[j] == -1) { result2[j] = tempB[k]; j++; k++; } else { j++; } } } for(int j = 0; j < CITIES; j++) newA[j] = result2[j]; //2.2 MUTATION randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; if(crossleft > crossright) { int tmp = crossleft; crossleft = crossright; crossright = tmp; } while(crossleft >= crossright) { randf = curand_uniform(&state[threadIdx.x]); crossleft = ((int)(randf*100))%CITIES; randf = curand_uniform(&state[threadIdx.x]); crossright = ((int)(randf*100))%CITIES; } int CC[CITIES]; for(int j = 0; j < CITIES; j++) CC[j] = newA[j]; for(int j = 0; j < CITIES; j++) { result[j] = CC[j]; } for(int i=crossleft,j=crossright;i<=crossright&&j>=crossleft;i++,j--) { result[i]=CC[j]; } for(int i = 0; i < CITIES; i++) newA[i] = result[i]; newA_dis = 0; for(int i = 0; i < CITIES-1; i++) newA_dis += gpuDistanceMat[newA[i] * CITIES + newA[i+1]]; newA_dis += gpuDistanceMat[newA[CITIES-1] * CITIES + newA[0]]; if(fitness[threadIdx.x] > newA_dis) { fitness[threadIdx.x] = newA_dis; for(int i = 0; i < CITIES; i++) subPop[threadIdx.x][i] = newA[i]; } atomicMin((int*)&best_sol_dis, fitness[threadIdx.x]); old = atomicMin(&block_teacher_dis, fitness[threadIdx.x]); if( old != block_teacher_dis ) { for(int i = 0; i < CITIES; i++) block_teacher[i] = subPop[threadIdx.x][i]; } //PUT BARRIER HERE atomicAdd((int*)&var, -1); while( var != 0 ); if(threadIdx.x == 0 && best_sol_dis == block_teacher_dis) { for(int i = 0; i < CITIES; i++) best_sol[i] = block_teacher[i]; var = PS; } atomicAdd((int*)&itr, -1); while(itr != 0); free(result); free(newA); } if(blockIdx.x == 0 && threadIdx.x == 0) { printf("Best Solution :: %d\n", best_sol_dis); for(int i = 0; i < CITIES; i++) printf("%d ", best_sol[i]); printf("\n"); } } void createPopulation(int *population) { for(int j = 0; j < PS; j++) { int count[CITIES]; memset(count, 0, CITIES*sizeof(int)); for(int i = 0; i < CITIES; i++) { int ind = rand()%CITIES; while(count[ind]) { ind = rand()%CITIES; } population[j*CITIES+i] = ind; count[ind] = 1; } } } int main(int argc, char **argv) { srand(time(NULL)); FILE *input; input = fopen(argv[1], "r"); if(input == NULL) { printf("error: failed to open input file\n"); return 0; } int CYCLES; sscanf(argv[2], "%d", &CYCLES); printf("Number of iterations of DTLBO = %d\n", CYCLES); curandState *d_state; cudaMalloc(&d_state, sizeof(curandState)); int numberOfCities; vector<pair<float, float> > points; fscanf(input, "%d", &numberOfCities); printf("Number of Cities = %d\n", numberOfCities); for(int i = 0; i < numberOfCities; i++) { float x , y; fscanf(input, "%f", &x); fscanf(input, "%f", &y); points.push_back(make_pair(x, y)); } int *distanceMat ; cudaHostAlloc(&distanceMat, numberOfCities*numberOfCities*sizeof(int), cudaHostAllocMapped); for(int i = 0; i < numberOfCities; i++) { for(int j = 0; j < numberOfCities; j++) { float ed; float x = (points[j].first - points[i].first)*(points[j].first - points[i].first); float y = (points[j].second - points[i].second)*(points[j].second - points[i].second); ed = sqrt(x+y); distanceMat[i*numberOfCities+j] = floor(ed); } } printf("Generated distance matrix successfully...\n"); int noOfBlocks = ceil((float)PS/SUB_PS); int *population; cudaHostAlloc(&population, PS*CITIES*sizeof(int), cudaHostAllocMapped); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start,0); createPopulation(population); printf("Generated random population...\n"); printf("Starting kernel now...Please wait\n"); setup_kernel<<<noOfBlocks,SUB_PS>>>(d_state); tlboKernel<<<noOfBlocks, SUB_PS>>>(population, distanceMat, numberOfCities, d_state, CYCLES); cudaDeviceSynchronize(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("Time taken by function to execute is: %.6f ms\n", milliseconds); return 0; }
0c8762df0ed30375d85c3f849eb18d2f18fb0edd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_vars_kernel; int xdim0_vars_kernel_h = -1; int ydim0_vars_kernel_h = -1; __constant__ int xdim1_vars_kernel; int xdim1_vars_kernel_h = -1; int ydim1_vars_kernel_h = -1; __constant__ int xdim2_vars_kernel; int xdim2_vars_kernel_h = -1; int ydim2_vars_kernel_h = -1; __constant__ int xdim3_vars_kernel; int xdim3_vars_kernel_h = -1; int ydim3_vars_kernel_h = -1; __constant__ int xdim4_vars_kernel; int xdim4_vars_kernel_h = -1; int ydim4_vars_kernel_h = -1; #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 #undef OPS_ACC_MD2 #undef OPS_ACC_MD3 #undef OPS_ACC_MD4 #define OPS_ACC_MD0(d, x) ((x)*3 + (d)) #define OPS_ACC_MD1(d, x) ((x)*3 + (d)) #define OPS_ACC_MD2(d, x) ((x)*3 + (d)) #define OPS_ACC_MD3(d, x) ((x)*3 + (d)) #define OPS_ACC_MD4(d, x) ((x)*3 + (d)) // user function __device__ void vars_kernel_gpu(const double *alam, const double *al, const double *gt, double *cmp, double *cf) { double anu, aaa, ga, qf, ww; for (int m = 0; m < 3; m++) { anu = alam[OPS_ACC_MD0(m, 0)]; aaa = al[OPS_ACC_MD1(m, 0)]; ga = aaa * (gt[OPS_ACC_MD2(m, 1)] - gt[OPS_ACC_MD2(m, 0)]) / (pow(aaa, 2.0) + del2); qf = sqrt(con + pow(anu, 2.0)); cmp[OPS_ACC_MD3(m, 0)] = 0.50 * qf; ww = anu + cmp[OPS_ACC_MD3(m, 0)] * ga; qf = sqrt(con + pow(ww, 2.0)); cf[OPS_ACC_MD4(m, 0)] = qf; } } #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 #undef OPS_ACC_MD2 #undef OPS_ACC_MD3 #undef OPS_ACC_MD4 __global__ void ops_vars_kernel(const double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, double *__restrict arg3, double *__restrict arg4, int size0) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 3; arg1 += idx_x * 1 * 3; arg2 += idx_x * 1 * 3; arg3 += idx_x * 1 * 3; arg4 += idx_x * 1 * 3; if (idx_x < size0) { vars_kernel_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function void ops_par_loop_vars_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { // Timing double t1, t2, c1, c2; ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 5, range, 10)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(10, "vars_kernel"); OPS_kernels[10].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[1]; int end[1]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 1; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 1; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; if (xdim0 != xdim0_vars_kernel_h || xdim1 != xdim1_vars_kernel_h || xdim2 != xdim2_vars_kernel_h || xdim3 != xdim3_vars_kernel_h || xdim4 != xdim4_vars_kernel_h) { hipMemcpyToSymbol(xdim0_vars_kernel, &xdim0, sizeof(int)); xdim0_vars_kernel_h = xdim0; hipMemcpyToSymbol(xdim1_vars_kernel, &xdim1, sizeof(int)); xdim1_vars_kernel_h = xdim1; hipMemcpyToSymbol(xdim2_vars_kernel, &xdim2, sizeof(int)); xdim2_vars_kernel_h = xdim2; hipMemcpyToSymbol(xdim3_vars_kernel, &xdim3, sizeof(int)); xdim3_vars_kernel_h = xdim3; hipMemcpyToSymbol(xdim4_vars_kernel, &xdim4, sizeof(int)); xdim4_vars_kernel_h = xdim4; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1); dim3 tblock(OPS_block_size_x, 1, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; char *p_a[5]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); p_a[4] = (char *)args[4].data_d + base4; ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args, 5, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[10].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_vars_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], x_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[10].time += t1 - t2; } ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[10].mpi_time += t2 - t1; OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg4); } }
0c8762df0ed30375d85c3f849eb18d2f18fb0edd.cu
// // auto-generated by ops.py // __constant__ int xdim0_vars_kernel; int xdim0_vars_kernel_h = -1; int ydim0_vars_kernel_h = -1; __constant__ int xdim1_vars_kernel; int xdim1_vars_kernel_h = -1; int ydim1_vars_kernel_h = -1; __constant__ int xdim2_vars_kernel; int xdim2_vars_kernel_h = -1; int ydim2_vars_kernel_h = -1; __constant__ int xdim3_vars_kernel; int xdim3_vars_kernel_h = -1; int ydim3_vars_kernel_h = -1; __constant__ int xdim4_vars_kernel; int xdim4_vars_kernel_h = -1; int ydim4_vars_kernel_h = -1; #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 #undef OPS_ACC_MD2 #undef OPS_ACC_MD3 #undef OPS_ACC_MD4 #define OPS_ACC_MD0(d, x) ((x)*3 + (d)) #define OPS_ACC_MD1(d, x) ((x)*3 + (d)) #define OPS_ACC_MD2(d, x) ((x)*3 + (d)) #define OPS_ACC_MD3(d, x) ((x)*3 + (d)) #define OPS_ACC_MD4(d, x) ((x)*3 + (d)) // user function __device__ void vars_kernel_gpu(const double *alam, const double *al, const double *gt, double *cmp, double *cf) { double anu, aaa, ga, qf, ww; for (int m = 0; m < 3; m++) { anu = alam[OPS_ACC_MD0(m, 0)]; aaa = al[OPS_ACC_MD1(m, 0)]; ga = aaa * (gt[OPS_ACC_MD2(m, 1)] - gt[OPS_ACC_MD2(m, 0)]) / (pow(aaa, 2.0) + del2); qf = sqrt(con + pow(anu, 2.0)); cmp[OPS_ACC_MD3(m, 0)] = 0.50 * qf; ww = anu + cmp[OPS_ACC_MD3(m, 0)] * ga; qf = sqrt(con + pow(ww, 2.0)); cf[OPS_ACC_MD4(m, 0)] = qf; } } #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 #undef OPS_ACC_MD2 #undef OPS_ACC_MD3 #undef OPS_ACC_MD4 __global__ void ops_vars_kernel(const double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, double *__restrict arg3, double *__restrict arg4, int size0) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 3; arg1 += idx_x * 1 * 3; arg2 += idx_x * 1 * 3; arg3 += idx_x * 1 * 3; arg4 += idx_x * 1 * 3; if (idx_x < size0) { vars_kernel_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function void ops_par_loop_vars_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { // Timing double t1, t2, c1, c2; ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 5, range, 10)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(10, "vars_kernel"); OPS_kernels[10].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[1]; int end[1]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 1; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 1; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; if (xdim0 != xdim0_vars_kernel_h || xdim1 != xdim1_vars_kernel_h || xdim2 != xdim2_vars_kernel_h || xdim3 != xdim3_vars_kernel_h || xdim4 != xdim4_vars_kernel_h) { cudaMemcpyToSymbol(xdim0_vars_kernel, &xdim0, sizeof(int)); xdim0_vars_kernel_h = xdim0; cudaMemcpyToSymbol(xdim1_vars_kernel, &xdim1, sizeof(int)); xdim1_vars_kernel_h = xdim1; cudaMemcpyToSymbol(xdim2_vars_kernel, &xdim2, sizeof(int)); xdim2_vars_kernel_h = xdim2; cudaMemcpyToSymbol(xdim3_vars_kernel, &xdim3, sizeof(int)); xdim3_vars_kernel_h = xdim3; cudaMemcpyToSymbol(xdim4_vars_kernel, &xdim4, sizeof(int)); xdim4_vars_kernel_h = xdim4; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1); dim3 tblock(OPS_block_size_x, 1, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; char *p_a[5]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); p_a[4] = (char *)args[4].data_d + base4; ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args, 5, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[10].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_vars_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], x_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[10].time += t1 - t2; } ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[10].mpi_time += t2 - t1; OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg4); } }
25b5a76c285780e93d06b6b1151dba01d2d12f25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void scan_x(int *g_odata, int *g_idata, int n) { extern __shared__ int temp[]; // allocated on invocation int thid = threadIdx.x; int bid = blockIdx.x; int bdim = blockDim.x; int offset = 1; temp[2*thid] = g_idata[bid*bdim*2 + 2*thid]; // load input into shared memory temp[2*thid+1] = g_idata[bid*bdim*2 + 2*thid+1]; for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[bid*bdim*2 + 2*thid] = temp[2*thid+1]; // write results to device memory int second_ind = 2*thid+2; if(second_ind == bdim*2 ) { g_odata[bid*bdim*2 + 2*thid+1] = temp[2*thid+1] + g_idata[bid*bdim*2 + 2*thid+1]; } else { g_odata[bid*bdim*2 + 2*thid+1] = temp[2*thid+2]; } }
25b5a76c285780e93d06b6b1151dba01d2d12f25.cu
#include "includes.h" __global__ void scan_x(int *g_odata, int *g_idata, int n) { extern __shared__ int temp[]; // allocated on invocation int thid = threadIdx.x; int bid = blockIdx.x; int bdim = blockDim.x; int offset = 1; temp[2*thid] = g_idata[bid*bdim*2 + 2*thid]; // load input into shared memory temp[2*thid+1] = g_idata[bid*bdim*2 + 2*thid+1]; for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[bid*bdim*2 + 2*thid] = temp[2*thid+1]; // write results to device memory int second_ind = 2*thid+2; if(second_ind == bdim*2 ) { g_odata[bid*bdim*2 + 2*thid+1] = temp[2*thid+1] + g_idata[bid*bdim*2 + 2*thid+1]; } else { g_odata[bid*bdim*2 + 2*thid+1] = temp[2*thid+2]; } }
fa05b7cc3b4a6e9ddc9db950bbef0715faee35cb.hip
// !!! This is a file automatically generated by hipify!!! //====================================================================== /* get_block, assign threads into a 3D block. input: nthreads, number of threads. block_limit, max dimension of the block. output: block_size, size of the block. return value, 0 for all threads are assigned to the block; 1 for part of the threads are assigned. get_grid, assign threads into a 3D grid, which consists of blocks. input: nthreads, number of threads to assign. nthreads_max_per_block, max threads per block. grid_limit, max dimension of the grid. block_limit, max dimension of the block in the grid. output: grid_size, size of the grid. block_size, size of the block. return value, 0 for all threads are assigned to the grid; 1 for part of the threads are assigned. */ //====================================================================== //#include <cstddef> //#include <cmath> #include "hip/hip_runtime.h" //#include "rocblas.h" //// head; //int get_block( const unsigned long nthreads, const dim3 block_limit, dim3 *block_size ); //int get_grid( const unsigned long nthreads, const unsigned int nthreads_max_per_block, const dim3 grid_limit, const dim3 block_limit, dim3 *grid_size, dim3 *block_size ); int get_block( const unsigned long nthreads, const dim3 block_limit, dim3 *block_size ) { unsigned long v, v_xy, v_xyz; unsigned int zlimit, ylimit, z, y, x; int block_status(1); unsigned long nthreads_max(0); dim3 max_size(0,0,0); v = (unsigned long)block_limit.x * (unsigned long)block_limit.y * (unsigned long)block_limit.z; if ( v > nthreads ) v = nthreads; zlimit = ( v - 1 ) / ( (unsigned long)block_limit.x * (unsigned long)block_limit.y ) + 1; for ( z = ( ( 1 > zlimit ) ? 1 : zlimit ); z <= block_limit.z; z++ ) { v_xy = v / z; ylimit = ( v_xy - 1 ) / (unsigned long)block_limit.x + 1; for ( y = ( ( 1 > ylimit ) ? 1 : ylimit ); y <= block_limit.y; y++ ) { x = v_xy / y; if ( x < 1 ) continue; v_xyz = (unsigned long)x * (unsigned long)y * (unsigned long)z; if ( v_xyz > nthreads_max ) { nthreads_max = v_xyz; max_size.x = x; max_size.y = y; max_size.z = z; if ( v_xyz == v ) { block_status = 0; break; } } } if ( block_status == 0 ) break; } if ( nthreads_max == nthreads ) block_status = 0; else block_status = 1; (*block_size) = max_size; return block_status; } int get_grid( const unsigned long nthreads, const unsigned int nthreads_max_per_block, const dim3 grid_limit, const dim3 block_limit, dim3 *grid_size, dim3 *block_size ) { unsigned int nthreads_per_block_limit, nthreads_per_block; unsigned long nblocks; int grid_status(1); dim3 bdsize(0,0,0), gdsize(0,0,0); unsigned int nt; nthreads_per_block_limit = block_limit.x * block_limit.y * block_limit.z; if ( nthreads_per_block_limit > nthreads_max_per_block ) nthreads_per_block_limit = nthreads_max_per_block; for ( nt = nthreads_per_block_limit; nt >= 1; nt-- ) { get_block( nt, block_limit, &bdsize ); nthreads_per_block = bdsize.x * bdsize.y * bdsize.z; if ( ( nthreads % (unsigned long)nthreads_per_block ) != 0 ) continue; nblocks = nthreads / (unsigned long)nthreads_per_block; grid_status = get_block( nblocks, grid_limit, &gdsize ); if ( grid_status == 0 ) break; } if ( grid_status == 0 ) { (*grid_size) = gdsize; (*block_size) = bdsize; } return grid_status; } //#include <cstddef> //#include <cmath> //#include "hip/hip_runtime.h" //#include "rocblas.h" //// head; //int get_block( const size_t *nthreads, const dim3 *block_limit, dim3 *block_size ); //int get_grid( const size_t *nthreads, const size_t *nthreads_max_per_block, const dim3 *grid_limit, const dim3 *block_limit, dim3 *grid_size, dim3 *block_size ); //int get_block( const size_t *nthreads, const dim3 *block_limit, dim3 *block_size ) //{ // size_t v, x, y, z, v_xy, v_xyz; // size_t zlimit, ylimit; // // int block_status(1); // size_t nthreads_max(0); // dim3 max_size(0,0,0); // // v = (*block_limit).x * (*block_limit).y * (*block_limit).z; // if ( v > *nthreads ) // v = *nthreads; // // zlimit = ( v - 1 ) / ( (*block_limit).x * (*block_limit).y ) + 1; // for ( z = ( ( 1 > zlimit ) ? 1 : zlimit ); z <= (*block_limit).z; z++ ) // { // v_xy = v / z; // ylimit = ( v_xy - 1 ) / (*block_limit).x + 1; // for ( y = ( ( 1 > ylimit ) ? 1 : ylimit ); y <= (*block_limit).y; y++ ) // { // x = v_xy / y; // if ( x < 1 ) continue; // // v_xyz = x * y * z; // if ( v_xyz > nthreads_max ) // { // nthreads_max = v_xyz; // max_size.x = x; // max_size.y = y; // max_size.z = z; // // if ( v_xyz == v ) // { // block_status = 0; // break; // } // } // } // if ( block_status == 0 ) // break; // } // // if ( nthreads_max == *nthreads ) // block_status = 0; // else // block_status = 1; // (*block_size) = max_size; // // return block_status; //} //int get_grid( const size_t *nthreads, const size_t *nthreads_max_per_block, const dim3 *grid_limit, const dim3 *block_limit, dim3 *grid_size, dim3 *block_size ) //{ // size_t nthreads_per_block_limit, nthreads_per_block, nblocks; // // int grid_status(1); // dim3 bdsize(0,0,0), gdsize(0,0,0); // size_t nt; // // nthreads_per_block_limit = (*block_limit).x * (*block_limit).y * (*block_limit).z; // if ( nthreads_per_block_limit > *nthreads_max_per_block ) // nthreads_per_block_limit = *nthreads_max_per_block; // // for ( nt = nthreads_per_block_limit; nt >= 1; nt-- ) // { // get_block( &nt, block_limit, &bdsize ); // nthreads_per_block = bdsize.x * bdsize.y * bdsize.z; // if ( ( *nthreads % nthreads_per_block ) != 0 ) continue; // // nblocks = *nthreads / nthreads_per_block; // grid_status = get_block( &nblocks, grid_limit, &gdsize ); // if ( grid_status == 0 ) break; // } // // if ( grid_status == 0 ) // { // (*grid_size) = gdsize; // (*block_size) = bdsize; // } // // return grid_status; //}
fa05b7cc3b4a6e9ddc9db950bbef0715faee35cb.cu
//====================================================================== /* get_block, assign threads into a 3D block. input: nthreads, number of threads. block_limit, max dimension of the block. output: block_size, size of the block. return value, 0 for all threads are assigned to the block; 1 for part of the threads are assigned. get_grid, assign threads into a 3D grid, which consists of blocks. input: nthreads, number of threads to assign. nthreads_max_per_block, max threads per block. grid_limit, max dimension of the grid. block_limit, max dimension of the block in the grid. output: grid_size, size of the grid. block_size, size of the block. return value, 0 for all threads are assigned to the grid; 1 for part of the threads are assigned. */ //====================================================================== //#include <cstddef> //#include <cmath> #include "cuda_runtime.h" //#include "cublas_v2.h" //// head; //int get_block( const unsigned long nthreads, const dim3 block_limit, dim3 *block_size ); //int get_grid( const unsigned long nthreads, const unsigned int nthreads_max_per_block, const dim3 grid_limit, const dim3 block_limit, dim3 *grid_size, dim3 *block_size ); int get_block( const unsigned long nthreads, const dim3 block_limit, dim3 *block_size ) { unsigned long v, v_xy, v_xyz; unsigned int zlimit, ylimit, z, y, x; int block_status(1); unsigned long nthreads_max(0); dim3 max_size(0,0,0); v = (unsigned long)block_limit.x * (unsigned long)block_limit.y * (unsigned long)block_limit.z; if ( v > nthreads ) v = nthreads; zlimit = ( v - 1 ) / ( (unsigned long)block_limit.x * (unsigned long)block_limit.y ) + 1; for ( z = ( ( 1 > zlimit ) ? 1 : zlimit ); z <= block_limit.z; z++ ) { v_xy = v / z; ylimit = ( v_xy - 1 ) / (unsigned long)block_limit.x + 1; for ( y = ( ( 1 > ylimit ) ? 1 : ylimit ); y <= block_limit.y; y++ ) { x = v_xy / y; if ( x < 1 ) continue; v_xyz = (unsigned long)x * (unsigned long)y * (unsigned long)z; if ( v_xyz > nthreads_max ) { nthreads_max = v_xyz; max_size.x = x; max_size.y = y; max_size.z = z; if ( v_xyz == v ) { block_status = 0; break; } } } if ( block_status == 0 ) break; } if ( nthreads_max == nthreads ) block_status = 0; else block_status = 1; (*block_size) = max_size; return block_status; } int get_grid( const unsigned long nthreads, const unsigned int nthreads_max_per_block, const dim3 grid_limit, const dim3 block_limit, dim3 *grid_size, dim3 *block_size ) { unsigned int nthreads_per_block_limit, nthreads_per_block; unsigned long nblocks; int grid_status(1); dim3 bdsize(0,0,0), gdsize(0,0,0); unsigned int nt; nthreads_per_block_limit = block_limit.x * block_limit.y * block_limit.z; if ( nthreads_per_block_limit > nthreads_max_per_block ) nthreads_per_block_limit = nthreads_max_per_block; for ( nt = nthreads_per_block_limit; nt >= 1; nt-- ) { get_block( nt, block_limit, &bdsize ); nthreads_per_block = bdsize.x * bdsize.y * bdsize.z; if ( ( nthreads % (unsigned long)nthreads_per_block ) != 0 ) continue; nblocks = nthreads / (unsigned long)nthreads_per_block; grid_status = get_block( nblocks, grid_limit, &gdsize ); if ( grid_status == 0 ) break; } if ( grid_status == 0 ) { (*grid_size) = gdsize; (*block_size) = bdsize; } return grid_status; } //#include <cstddef> //#include <cmath> //#include "cuda_runtime.h" //#include "cublas_v2.h" //// head; //int get_block( const size_t *nthreads, const dim3 *block_limit, dim3 *block_size ); //int get_grid( const size_t *nthreads, const size_t *nthreads_max_per_block, const dim3 *grid_limit, const dim3 *block_limit, dim3 *grid_size, dim3 *block_size ); //int get_block( const size_t *nthreads, const dim3 *block_limit, dim3 *block_size ) //{ // size_t v, x, y, z, v_xy, v_xyz; // size_t zlimit, ylimit; // // int block_status(1); // size_t nthreads_max(0); // dim3 max_size(0,0,0); // // v = (*block_limit).x * (*block_limit).y * (*block_limit).z; // if ( v > *nthreads ) // v = *nthreads; // // zlimit = ( v - 1 ) / ( (*block_limit).x * (*block_limit).y ) + 1; // for ( z = ( ( 1 > zlimit ) ? 1 : zlimit ); z <= (*block_limit).z; z++ ) // { // v_xy = v / z; // ylimit = ( v_xy - 1 ) / (*block_limit).x + 1; // for ( y = ( ( 1 > ylimit ) ? 1 : ylimit ); y <= (*block_limit).y; y++ ) // { // x = v_xy / y; // if ( x < 1 ) continue; // // v_xyz = x * y * z; // if ( v_xyz > nthreads_max ) // { // nthreads_max = v_xyz; // max_size.x = x; // max_size.y = y; // max_size.z = z; // // if ( v_xyz == v ) // { // block_status = 0; // break; // } // } // } // if ( block_status == 0 ) // break; // } // // if ( nthreads_max == *nthreads ) // block_status = 0; // else // block_status = 1; // (*block_size) = max_size; // // return block_status; //} //int get_grid( const size_t *nthreads, const size_t *nthreads_max_per_block, const dim3 *grid_limit, const dim3 *block_limit, dim3 *grid_size, dim3 *block_size ) //{ // size_t nthreads_per_block_limit, nthreads_per_block, nblocks; // // int grid_status(1); // dim3 bdsize(0,0,0), gdsize(0,0,0); // size_t nt; // // nthreads_per_block_limit = (*block_limit).x * (*block_limit).y * (*block_limit).z; // if ( nthreads_per_block_limit > *nthreads_max_per_block ) // nthreads_per_block_limit = *nthreads_max_per_block; // // for ( nt = nthreads_per_block_limit; nt >= 1; nt-- ) // { // get_block( &nt, block_limit, &bdsize ); // nthreads_per_block = bdsize.x * bdsize.y * bdsize.z; // if ( ( *nthreads % nthreads_per_block ) != 0 ) continue; // // nblocks = *nthreads / nthreads_per_block; // grid_status = get_block( &nblocks, grid_limit, &gdsize ); // if ( grid_status == 0 ) break; // } // // if ( grid_status == 0 ) // { // (*grid_size) = gdsize; // (*block_size) = bdsize; // } // // return grid_status; //}
0de66437b2d823a319a9b4e5b998e9ac49ca89cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } #include <stdlib.h> #include <vector> #include <iostream> using std::vector; using std::iostream; //////////////////////////////////////////////////////////////////////////// #if !defined(SIFTGPU_STATIC) && !defined(SIFTGPU_DLL_RUNTIME) // SIFTGPU_STATIC comes from compiler #define SIFTGPU_DLL_RUNTIME // Load at runtime if the above macro defined // comment the macro above to use static linking #endif //////////////////////////////////////////////////////////////////////////// // define REMOTE_SIFTGPU to run computation in multi-process (Or remote) mode // in order to run on a remote machine, you need to start the server manually // This mode allows you use Multi-GPUs by creating multiple servers // #define REMOTE_SIFTGPU // #define REMOTE_SERVER NULL // #define REMOTE_SERVER_PORT 7777 /////////////////////////////////////////////////////////////////////////// //#define DEBUG_SIFTGPU //define this to use the debug version in windows #ifdef _WIN32 #ifdef SIFTGPU_DLL_RUNTIME #define WIN32_LEAN_AND_MEAN #include <windows.h> #define FREE_MYLIB FreeLibrary #define GET_MYPROC GetProcAddress #else //define this to get dll import definition for win32 #define SIFTGPU_DLL #ifdef _DEBUG #pragma comment(lib, "../../lib/siftgpu_d.lib") #else #pragma comment(lib, "../../lib/siftgpu.lib") #endif #endif #else #ifdef SIFTGPU_DLL_RUNTIME #include <dlfcn.h> #define FREE_MYLIB dlclose #define GET_MYPROC dlsym #endif #endif #include "../SiftGPU/SiftGPU.h" int main() { #ifdef SIFTGPU_DLL_RUNTIME #ifdef _WIN32 #ifdef _DEBUG HMODULE hsiftgpu = LoadLibrary("siftgpu_d.dll"); #else HMODULE hsiftgpu = LoadLibrary("siftgpu.dll"); #endif #else void * hsiftgpu = dlopen("libsiftgpu.so", RTLD_LAZY); #endif if (hsiftgpu == NULL) return 0; #ifdef REMOTE_SIFTGPU ComboSiftGPU* (*pCreateRemoteSiftGPU) (int, char*) = NULL; pCreateRemoteSiftGPU = (ComboSiftGPU* (*) (int, char*)) GET_MYPROC(hsiftgpu, "CreateRemoteSiftGPU"); ComboSiftGPU * combo = pCreateRemoteSiftGPU(REMOTE_SERVER_PORT, REMOTE_SERVER); SiftGPU* sift = combo; SiftMatchGPU* matcher = combo; #else SiftGPU* (*pCreateNewSiftGPU)(int) = NULL; SiftMatchGPU* (*pCreateNewSiftMatchGPU)(int) = NULL; pCreateNewSiftGPU = (SiftGPU* (*) (int)) GET_MYPROC(hsiftgpu, "CreateNewSiftGPU"); pCreateNewSiftMatchGPU = (SiftMatchGPU* (*)(int)) GET_MYPROC(hsiftgpu, "CreateNewSiftMatchGPU"); SiftGPU* sift = pCreateNewSiftGPU(1); SiftMatchGPU* matcher = pCreateNewSiftMatchGPU(4096); #endif #elif defined(REMOTE_SIFTGPU) ComboSiftGPU * combo = CreateRemoteSiftGPU(REMOTE_SERVER_PORT, REMOTE_SERVER); SiftGPU* sift = combo; SiftMatchGPU* matcher = combo; #else //this will use overloaded new operators SiftGPU *sift = new SiftGPU; SiftMatchGPU *matcher = new SiftMatchGPU(4096); #endif vector<float > descriptors1(1), descriptors2(1); vector<SiftGPU::SiftKeypoint> keys1(1), keys2(1); int num1 = 0, num2 = 0; //process parameters //The following parameters are default in V340 //-m, up to 2 orientations for each feature (change to single orientation by using -m 1) //-s enable subpixel subscale (disable by using -s 0) char * argv[] = { "-fo", "-1", "-v", "1" };// //-fo -1 staring from -1 octave //-v 1 only print out # feature and overall time //-loweo add a (.5, .5) offset //-tc <num> set a soft limit to number of detected features //NEW: parameters for GPU-selection //1. CUDA. Use parameter "-cuda", "[device_id]" //2. OpenGL. Use "-Display", "display_name" to select monitor/GPU (XLIB/GLUT) // on windows the display name would be something like \\.\DISPLAY4 ////////////////////////////////////////////////////////////////////////////////////// //You use CUDA for nVidia graphic cards by specifying //-cuda : cuda implementation (fastest for smaller images) // CUDA-implementation allows you to create multiple instances for multiple threads // Checkout src\TestWin\MultiThreadSIFT ///////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////Two Important Parameters/////////////////////////// // First, texture reallocation happens when image size increases, and too many // reallocation may lead to allocatoin failure. You should be careful when using // siftgpu on a set of images with VARYING imag sizes. It is recommended that you // preset the allocation size to the largest width and largest height by using function // AllocationPyramid or prameter '-p' (e.g. "-p", "1024x768"). // Second, there is a parameter you may not be aware of: the allowed maximum working // dimension. All the SIFT octaves that needs a larger texture size will be skipped. // The default prameter is 2560 for the unpacked implementation and 3200 for the packed. // Those two default parameter is tuned to for 768MB of graphic memory. You should adjust // it for your own GPU memory. You can also use this to keep/skip the small featuers. // To change this, call function SetMaxDimension or use parameter "-maxd". // // NEW: by default SiftGPU will try to fit the cap of GPU memory, and reduce the working // dimension so as to not allocate too much. This feature can be disabled by -nomc ////////////////////////////////////////////////////////////////////////////////////// int argc = sizeof(argv) / sizeof(char*); sift->ParseParam(argc, argv); /////////////////////////////////////////////////////////////////////// //Only the following parameters can be changed after initialization (by calling ParseParam). //-dw, -ofix, -ofix-not, -fo, -unn, -maxd, -b //to change other parameters at runtime, you need to first unload the dynamically loaded libaray //reload the libarary, then create a new siftgpu instance //Create a context for computation, and SiftGPU will be initialized automatically //The same context can be used by SiftMatchGPU if (sift->CreateContextGL() != SiftGPU::SIFTGPU_FULL_SUPPORTED) return 0; if (sift->RunSIFT("../data/800-1.jpg")) { //Call SaveSIFT to save result to file, the format is the same as Lowe's //sift->SaveSIFT("../data/800-1.sift"); //Note that saving ASCII format is slow //get feature count num1 = sift->GetFeatureNum(); //allocate memory keys1.resize(num1); descriptors1.resize(128 * num1); //reading back feature vectors is faster than writing files //if you dont need keys or descriptors, just put NULLs here sift->GetFeatureVector(&keys1[0], &descriptors1[0]); //this can be used to write your own sift file. } //You can have at most one OpenGL-based SiftGPU (per process). //Normally, you should just create one, and reuse on all images. if (sift->RunSIFT("../data/640-1.jpg")) { num2 = sift->GetFeatureNum(); keys2.resize(num2); descriptors2.resize(128 * num2); sift->GetFeatureVector(&keys2[0], &descriptors2[0]); } //Testing code to check how it works when image size varies //sift->RunSIFT("../data/256.jpg");sift->SaveSIFT("../data/256.sift.1"); //sift->RunSIFT("../data/1024.jpg"); //this will result in pyramid reallocation //sift->RunSIFT("../data/256.jpg"); sift->SaveSIFT("../data/256.sift.2"); //two sets of features for 256.jpg may have different order due to implementation //************************************************************************* /////compute descriptors for user-specified keypoints (with or without orientations) //Method1, set new keypoints for the image you've just processed with siftgpu //say vector<SiftGPU::SiftKeypoint> mykeys; //sift->RunSIFT(mykeys.size(), &mykeys[0]); //sift->RunSIFT(num2, &keys2[0], 1); sift->SaveSIFT("../data/640-1.sift.2"); //sift->RunSIFT(num2, &keys2[0], 0); sift->SaveSIFT("../data/640-1.sift.3"); //Method2, set keypoints for the next coming image //The difference of with method 1 is that method 1 skips gaussian filtering //SiftGPU::SiftKeypoint mykeys[100]; //for(int i = 0; i < 100; ++i){ // mykeys[i].s = 1.0f;mykeys[i].o = 0.0f; // mykeys[i].x = (i%10)*10.0f+50.0f; // mykeys[i].y = (i/10)*10.0f+50.0f; //} //sift->SetKeypointList(100, mykeys, 0); //sift->RunSIFT("../data/800-1.jpg"); sift->SaveSIFT("../data/800-1.sift.2"); //### for comparing with method1: //sift->RunSIFT("../data/800-1.jpg"); //sift->RunSIFT(100, mykeys, 0); sift->SaveSIFT("../data/800-1.sift.3"); //********************************************************************************* //**********************GPU SIFT MATCHING********************************* //**************************select shader language************************* //SiftMatchGPU will use the same shader lanaguage as SiftGPU by default //Before initialization, you can choose between glsl, and CUDA(if compiled). //matcher->SetLanguage(SiftMatchGPU::SIFTMATCH_CUDA); // +i for the (i+1)-th device //Verify current OpenGL Context and initialize the Matcher; //If you don't have an OpenGL Context, call matcher->CreateContextGL instead; matcher->VerifyContextGL(); //must call once //Set descriptors to match, the first argument must be either 0 or 1 //if you want to use more than 4096 or less than 4096 //call matcher->SetMaxSift() to change the limit before calling setdescriptor matcher->SetDescriptors(0, num1, &descriptors1[0]); //image 1 matcher->SetDescriptors(1, num2, &descriptors2[0]); //image 2 //match and get result. int(*match_buf)[2] = new int[num1][2]; //use the default thresholds. Check the declaration in SiftGPU.h int num_match = matcher->GetSiftMatch(num1, match_buf); std::cout << num_match << " sift matches were found;\n"; //enumerate all the feature matches for (int i = 0; i < num_match; ++i) { //How to get the feature matches: //SiftGPU::SiftKeypoint & key1 = keys1[match_buf[i][0]]; //SiftGPU::SiftKeypoint & key2 = keys2[match_buf[i][1]]; //key1 in the first image matches with key2 in the second image } //*****************GPU Guided SIFT MATCHING*************** //example: define a homography, and use default threshold 32 to search in a 64x64 window //float h[3][3] = {{0.8f, 0, 0}, {0, 0.8f, 0}, {0, 0, 1.0f}}; //matcher->SetFeatureLocation(0, &keys1[0]); //SetFeatureLocaiton after SetDescriptors //matcher->SetFeatureLocation(1, &keys2[0]); //num_match = matcher->GetGuidedSiftMatch(num1, match_buf, h, NULL); //std::cout << num_match << " guided sift matches were found;\n"; //if you can want to use a Fundamental matrix, check the function definition // clean up.. delete[] match_buf; #ifdef REMOTE_SIFTGPU delete combo; #else delete sift; delete matcher; #endif #ifdef SIFTGPU_DLL_RUNTIME FREE_MYLIB(hsiftgpu); #endif return 1; } /* const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; } */
0de66437b2d823a319a9b4e5b998e9ac49ca89cf.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } #include <stdlib.h> #include <vector> #include <iostream> using std::vector; using std::iostream; //////////////////////////////////////////////////////////////////////////// #if !defined(SIFTGPU_STATIC) && !defined(SIFTGPU_DLL_RUNTIME) // SIFTGPU_STATIC comes from compiler #define SIFTGPU_DLL_RUNTIME // Load at runtime if the above macro defined // comment the macro above to use static linking #endif //////////////////////////////////////////////////////////////////////////// // define REMOTE_SIFTGPU to run computation in multi-process (Or remote) mode // in order to run on a remote machine, you need to start the server manually // This mode allows you use Multi-GPUs by creating multiple servers // #define REMOTE_SIFTGPU // #define REMOTE_SERVER NULL // #define REMOTE_SERVER_PORT 7777 /////////////////////////////////////////////////////////////////////////// //#define DEBUG_SIFTGPU //define this to use the debug version in windows #ifdef _WIN32 #ifdef SIFTGPU_DLL_RUNTIME #define WIN32_LEAN_AND_MEAN #include <windows.h> #define FREE_MYLIB FreeLibrary #define GET_MYPROC GetProcAddress #else //define this to get dll import definition for win32 #define SIFTGPU_DLL #ifdef _DEBUG #pragma comment(lib, "../../lib/siftgpu_d.lib") #else #pragma comment(lib, "../../lib/siftgpu.lib") #endif #endif #else #ifdef SIFTGPU_DLL_RUNTIME #include <dlfcn.h> #define FREE_MYLIB dlclose #define GET_MYPROC dlsym #endif #endif #include "../SiftGPU/SiftGPU.h" int main() { #ifdef SIFTGPU_DLL_RUNTIME #ifdef _WIN32 #ifdef _DEBUG HMODULE hsiftgpu = LoadLibrary("siftgpu_d.dll"); #else HMODULE hsiftgpu = LoadLibrary("siftgpu.dll"); #endif #else void * hsiftgpu = dlopen("libsiftgpu.so", RTLD_LAZY); #endif if (hsiftgpu == NULL) return 0; #ifdef REMOTE_SIFTGPU ComboSiftGPU* (*pCreateRemoteSiftGPU) (int, char*) = NULL; pCreateRemoteSiftGPU = (ComboSiftGPU* (*) (int, char*)) GET_MYPROC(hsiftgpu, "CreateRemoteSiftGPU"); ComboSiftGPU * combo = pCreateRemoteSiftGPU(REMOTE_SERVER_PORT, REMOTE_SERVER); SiftGPU* sift = combo; SiftMatchGPU* matcher = combo; #else SiftGPU* (*pCreateNewSiftGPU)(int) = NULL; SiftMatchGPU* (*pCreateNewSiftMatchGPU)(int) = NULL; pCreateNewSiftGPU = (SiftGPU* (*) (int)) GET_MYPROC(hsiftgpu, "CreateNewSiftGPU"); pCreateNewSiftMatchGPU = (SiftMatchGPU* (*)(int)) GET_MYPROC(hsiftgpu, "CreateNewSiftMatchGPU"); SiftGPU* sift = pCreateNewSiftGPU(1); SiftMatchGPU* matcher = pCreateNewSiftMatchGPU(4096); #endif #elif defined(REMOTE_SIFTGPU) ComboSiftGPU * combo = CreateRemoteSiftGPU(REMOTE_SERVER_PORT, REMOTE_SERVER); SiftGPU* sift = combo; SiftMatchGPU* matcher = combo; #else //this will use overloaded new operators SiftGPU *sift = new SiftGPU; SiftMatchGPU *matcher = new SiftMatchGPU(4096); #endif vector<float > descriptors1(1), descriptors2(1); vector<SiftGPU::SiftKeypoint> keys1(1), keys2(1); int num1 = 0, num2 = 0; //process parameters //The following parameters are default in V340 //-m, up to 2 orientations for each feature (change to single orientation by using -m 1) //-s enable subpixel subscale (disable by using -s 0) char * argv[] = { "-fo", "-1", "-v", "1" };// //-fo -1 staring from -1 octave //-v 1 only print out # feature and overall time //-loweo add a (.5, .5) offset //-tc <num> set a soft limit to number of detected features //NEW: parameters for GPU-selection //1. CUDA. Use parameter "-cuda", "[device_id]" //2. OpenGL. Use "-Display", "display_name" to select monitor/GPU (XLIB/GLUT) // on windows the display name would be something like \\.\DISPLAY4 ////////////////////////////////////////////////////////////////////////////////////// //You use CUDA for nVidia graphic cards by specifying //-cuda : cuda implementation (fastest for smaller images) // CUDA-implementation allows you to create multiple instances for multiple threads // Checkout src\TestWin\MultiThreadSIFT ///////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////Two Important Parameters/////////////////////////// // First, texture reallocation happens when image size increases, and too many // reallocation may lead to allocatoin failure. You should be careful when using // siftgpu on a set of images with VARYING imag sizes. It is recommended that you // preset the allocation size to the largest width and largest height by using function // AllocationPyramid or prameter '-p' (e.g. "-p", "1024x768"). // Second, there is a parameter you may not be aware of: the allowed maximum working // dimension. All the SIFT octaves that needs a larger texture size will be skipped. // The default prameter is 2560 for the unpacked implementation and 3200 for the packed. // Those two default parameter is tuned to for 768MB of graphic memory. You should adjust // it for your own GPU memory. You can also use this to keep/skip the small featuers. // To change this, call function SetMaxDimension or use parameter "-maxd". // // NEW: by default SiftGPU will try to fit the cap of GPU memory, and reduce the working // dimension so as to not allocate too much. This feature can be disabled by -nomc ////////////////////////////////////////////////////////////////////////////////////// int argc = sizeof(argv) / sizeof(char*); sift->ParseParam(argc, argv); /////////////////////////////////////////////////////////////////////// //Only the following parameters can be changed after initialization (by calling ParseParam). //-dw, -ofix, -ofix-not, -fo, -unn, -maxd, -b //to change other parameters at runtime, you need to first unload the dynamically loaded libaray //reload the libarary, then create a new siftgpu instance //Create a context for computation, and SiftGPU will be initialized automatically //The same context can be used by SiftMatchGPU if (sift->CreateContextGL() != SiftGPU::SIFTGPU_FULL_SUPPORTED) return 0; if (sift->RunSIFT("../data/800-1.jpg")) { //Call SaveSIFT to save result to file, the format is the same as Lowe's //sift->SaveSIFT("../data/800-1.sift"); //Note that saving ASCII format is slow //get feature count num1 = sift->GetFeatureNum(); //allocate memory keys1.resize(num1); descriptors1.resize(128 * num1); //reading back feature vectors is faster than writing files //if you dont need keys or descriptors, just put NULLs here sift->GetFeatureVector(&keys1[0], &descriptors1[0]); //this can be used to write your own sift file. } //You can have at most one OpenGL-based SiftGPU (per process). //Normally, you should just create one, and reuse on all images. if (sift->RunSIFT("../data/640-1.jpg")) { num2 = sift->GetFeatureNum(); keys2.resize(num2); descriptors2.resize(128 * num2); sift->GetFeatureVector(&keys2[0], &descriptors2[0]); } //Testing code to check how it works when image size varies //sift->RunSIFT("../data/256.jpg");sift->SaveSIFT("../data/256.sift.1"); //sift->RunSIFT("../data/1024.jpg"); //this will result in pyramid reallocation //sift->RunSIFT("../data/256.jpg"); sift->SaveSIFT("../data/256.sift.2"); //two sets of features for 256.jpg may have different order due to implementation //************************************************************************* /////compute descriptors for user-specified keypoints (with or without orientations) //Method1, set new keypoints for the image you've just processed with siftgpu //say vector<SiftGPU::SiftKeypoint> mykeys; //sift->RunSIFT(mykeys.size(), &mykeys[0]); //sift->RunSIFT(num2, &keys2[0], 1); sift->SaveSIFT("../data/640-1.sift.2"); //sift->RunSIFT(num2, &keys2[0], 0); sift->SaveSIFT("../data/640-1.sift.3"); //Method2, set keypoints for the next coming image //The difference of with method 1 is that method 1 skips gaussian filtering //SiftGPU::SiftKeypoint mykeys[100]; //for(int i = 0; i < 100; ++i){ // mykeys[i].s = 1.0f;mykeys[i].o = 0.0f; // mykeys[i].x = (i%10)*10.0f+50.0f; // mykeys[i].y = (i/10)*10.0f+50.0f; //} //sift->SetKeypointList(100, mykeys, 0); //sift->RunSIFT("../data/800-1.jpg"); sift->SaveSIFT("../data/800-1.sift.2"); //### for comparing with method1: //sift->RunSIFT("../data/800-1.jpg"); //sift->RunSIFT(100, mykeys, 0); sift->SaveSIFT("../data/800-1.sift.3"); //********************************************************************************* //**********************GPU SIFT MATCHING********************************* //**************************select shader language************************* //SiftMatchGPU will use the same shader lanaguage as SiftGPU by default //Before initialization, you can choose between glsl, and CUDA(if compiled). //matcher->SetLanguage(SiftMatchGPU::SIFTMATCH_CUDA); // +i for the (i+1)-th device //Verify current OpenGL Context and initialize the Matcher; //If you don't have an OpenGL Context, call matcher->CreateContextGL instead; matcher->VerifyContextGL(); //must call once //Set descriptors to match, the first argument must be either 0 or 1 //if you want to use more than 4096 or less than 4096 //call matcher->SetMaxSift() to change the limit before calling setdescriptor matcher->SetDescriptors(0, num1, &descriptors1[0]); //image 1 matcher->SetDescriptors(1, num2, &descriptors2[0]); //image 2 //match and get result. int(*match_buf)[2] = new int[num1][2]; //use the default thresholds. Check the declaration in SiftGPU.h int num_match = matcher->GetSiftMatch(num1, match_buf); std::cout << num_match << " sift matches were found;\n"; //enumerate all the feature matches for (int i = 0; i < num_match; ++i) { //How to get the feature matches: //SiftGPU::SiftKeypoint & key1 = keys1[match_buf[i][0]]; //SiftGPU::SiftKeypoint & key2 = keys2[match_buf[i][1]]; //key1 in the first image matches with key2 in the second image } //*****************GPU Guided SIFT MATCHING*************** //example: define a homography, and use default threshold 32 to search in a 64x64 window //float h[3][3] = {{0.8f, 0, 0}, {0, 0.8f, 0}, {0, 0, 1.0f}}; //matcher->SetFeatureLocation(0, &keys1[0]); //SetFeatureLocaiton after SetDescriptors //matcher->SetFeatureLocation(1, &keys2[0]); //num_match = matcher->GetGuidedSiftMatch(num1, match_buf, h, NULL); //std::cout << num_match << " guided sift matches were found;\n"; //if you can want to use a Fundamental matrix, check the function definition // clean up.. delete[] match_buf; #ifdef REMOTE_SIFTGPU delete combo; #else delete sift; delete matcher; #endif #ifdef SIFTGPU_DLL_RUNTIME FREE_MYLIB(hsiftgpu); #endif return 1; } /* const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; } */
1711d1ff4c661450ac5e399744933c9fc0b44e63.hip
// !!! This is a file automatically generated by hipify!!! #include <kernels/gpu/prewhiten.h> #include <algorithm> #include "global/operator_factory.h" #include "global/fp16_operator_factory.h" #include "backend/name.h" #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <device_launch_parameters.h> #include "kernels/gpu/math_gpu.h" #include "kernels/gpu/gpu_kernel.h" namespace ts { namespace gpu { template<typename T> __global__ static void mean_kernel(const int N, T *x) { int index = blockDim.x * blockIdx.x + threadIdx.x; for (; index < 1; index += blockDim.x * gridDim.x) { x[index] /= N; } } #ifdef TS_USE_CUDA_FP16 template<> __global__ void mean_kernel<half>(const int N, half *x) { int index = blockDim.x * blockIdx.x + threadIdx.x; half half_N = half(float(N)); for (; index < 1; index += blockDim.x * gridDim.x) { x[index] /= half_N; } } #endif template<typename T> __global__ static void dev_kernel(const int N, const T *x,T* mean, T * z) { int index = blockDim.x * blockIdx.x + threadIdx.x; __shared__ T cache[CUDA_THREAD_NUM]; int cache_index = threadIdx.x; T temp = T(0.f); for (; index < N; index += blockDim.x * gridDim.x) { T sub_tmp = x[index] - *mean; temp += sub_tmp * sub_tmp; } cache[cache_index] = temp; __syncthreads(); unsigned int floor_pow = blockDim.x; if (floor_pow & (floor_pow - 1)) { while (floor_pow & (floor_pow - 1)) { floor_pow &= (floor_pow - 1); } if (cache_index >= floor_pow) { cache[cache_index - floor_pow] += cache[cache_index]; } __syncthreads(); } for (int i = floor_pow / 2; i > 0; i /= 2) { if (cache_index < i) { cache[cache_index] += cache[cache_index + i]; } __syncthreads(); } if (cache_index == 0) { z[blockIdx.x] = cache[0]; } } template<typename T> __global__ static void std_dev_kernel(const int N, T *x) { int index = blockDim.x * blockIdx.x + threadIdx.x; for (; index < 1; index += gridDim.x * blockDim.x) { x[index] = sqrt(x[index] / N); x[index] = max(x[index], T(1 / sqrt(T(N)))); x[index] = T(1) / x[index]; } } #ifdef TS_USE_CUDA_FP16 template<> __global__ void std_dev_kernel<half>(const int N, half *x) { int index = blockDim.x * blockIdx.x + threadIdx.x; half half_N = half(float(N)); half one(1.f); for (; index < 1; index += gridDim.x * blockDim.x) { x[index] = hsqrt(x[index] / half_N); half temp = one / hsqrt(half_N); x[index] = x[index] > temp ? x[index] : temp; x[index] = one / x[index]; } } #endif template<typename T> __global__ static void prewhiten_kernel(const int N, T *x, T* mean,T * dev_rec) { int index = blockDim.x * blockIdx.x + threadIdx.x; for (; index < N; index += gridDim.x * blockDim.x) { x[index] -= *mean; x[index] *= *dev_rec; } } template<typename T> static void gpu_pre_whiten_compute_run(const Tensor &x, Tensor &out, MemoryDevice& mem_device) { auto output_shape = out.sizes(); const T *input_data = x.data<T>(); T *output_data = out.data<T>(); int count = out.count(); memcpy(output_data, out.device(), count * sizeof(T), input_data, x.device(), count * sizeof(T)); //memcpy(output_data, input_data, count * sizeof(T)); // fot batch int batch = x.size(0); count /= batch; auto batch_outout_data = output_data; int grid_size = CUDA_BLOCK(count, CUDA_THREAD_NUM); int block_size = CUDA_THREAD_NUM; Tensor buffer_tensor = Tensor(Tensor::InFlow::DEVICE, out.dtype(), {1 + 1 + block_size}); T *mean = buffer_tensor.data<T>(); T *std_dev = mean + 1; T *dev_buffer = std_dev + 1; T *at = nullptr; for (int n = 0; n < batch; ++n) { at = batch_outout_data; math<T>::sum(count, at,mean); RUN_KERNEL(mean_kernel<T>, 1, 1, count,mean); at = batch_outout_data; RUN_KERNEL(dev_kernel<T>, grid_size, block_size, count, at, mean, dev_buffer); math<T>::sum(grid_size, dev_buffer, std_dev); RUN_KERNEL(std_dev_kernel<T>, 1, 1, count, std_dev); at = batch_outout_data; RUN_KERNEL(prewhiten_kernel<T>, grid_size, block_size, count,at,mean, std_dev); batch_outout_data += count; } } void PreWhiten::prewhiten(const Tensor &x, Tensor &out) { auto dtype = out.dtype(); auto running_mem_device = this->running_memory_device(); switch (dtype) { #define DECLARE_TYPE_AND_RUN(DTYPE, TYPE) \ case DTYPE: { gpu_pre_whiten_compute_run<TYPE>(x, out, running_mem_device); break; } // DECLARE_TYPE_AND_RUN(INT8, int8_t); // DECLARE_TYPE_AND_RUN(UINT8, uint8_t); // DECLARE_TYPE_AND_RUN(INT16, int16_t); // DECLARE_TYPE_AND_RUN(UINT16, uint16_t); // DECLARE_TYPE_AND_RUN(INT32, int32_t); // DECLARE_TYPE_AND_RUN(UINT32, uint32_t); // DECLARE_TYPE_AND_RUN(INT64, int64_t); // DECLARE_TYPE_AND_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_TYPE_AND_RUN(FLOAT16, half); #endif DECLARE_TYPE_AND_RUN(FLOAT32, float); DECLARE_TYPE_AND_RUN(FLOAT64, double); #undef DECLARE_TYPE_AND_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } } } using namespace ts; using namespace gpu; TS_REGISTER_OPERATOR(PreWhiten, GPU, name::layer::prewhiten()) #ifdef TS_USE_CUDA_FP16 TS_REGISTER_FP16_OPERATOR(PreWhiten, GPU, name::layer::prewhiten()) #endif
1711d1ff4c661450ac5e399744933c9fc0b44e63.cu
#include <kernels/gpu/prewhiten.h> #include <algorithm> #include "global/operator_factory.h" #include "global/fp16_operator_factory.h" #include "backend/name.h" #include <cuda_runtime.h> #include <cuda_fp16.h> #include <device_launch_parameters.h> #include "kernels/gpu/math_gpu.h" #include "kernels/gpu/gpu_kernel.h" namespace ts { namespace gpu { template<typename T> __global__ static void mean_kernel(const int N, T *x) { int index = blockDim.x * blockIdx.x + threadIdx.x; for (; index < 1; index += blockDim.x * gridDim.x) { x[index] /= N; } } #ifdef TS_USE_CUDA_FP16 template<> __global__ void mean_kernel<half>(const int N, half *x) { int index = blockDim.x * blockIdx.x + threadIdx.x; half half_N = half(float(N)); for (; index < 1; index += blockDim.x * gridDim.x) { x[index] /= half_N; } } #endif template<typename T> __global__ static void dev_kernel(const int N, const T *x,T* mean, T * z) { int index = blockDim.x * blockIdx.x + threadIdx.x; __shared__ T cache[CUDA_THREAD_NUM]; int cache_index = threadIdx.x; T temp = T(0.f); for (; index < N; index += blockDim.x * gridDim.x) { T sub_tmp = x[index] - *mean; temp += sub_tmp * sub_tmp; } cache[cache_index] = temp; __syncthreads(); unsigned int floor_pow = blockDim.x; if (floor_pow & (floor_pow - 1)) { while (floor_pow & (floor_pow - 1)) { floor_pow &= (floor_pow - 1); } if (cache_index >= floor_pow) { cache[cache_index - floor_pow] += cache[cache_index]; } __syncthreads(); } for (int i = floor_pow / 2; i > 0; i /= 2) { if (cache_index < i) { cache[cache_index] += cache[cache_index + i]; } __syncthreads(); } if (cache_index == 0) { z[blockIdx.x] = cache[0]; } } template<typename T> __global__ static void std_dev_kernel(const int N, T *x) { int index = blockDim.x * blockIdx.x + threadIdx.x; for (; index < 1; index += gridDim.x * blockDim.x) { x[index] = sqrt(x[index] / N); x[index] = max(x[index], T(1 / sqrt(T(N)))); x[index] = T(1) / x[index]; } } #ifdef TS_USE_CUDA_FP16 template<> __global__ void std_dev_kernel<half>(const int N, half *x) { int index = blockDim.x * blockIdx.x + threadIdx.x; half half_N = half(float(N)); half one(1.f); for (; index < 1; index += gridDim.x * blockDim.x) { x[index] = hsqrt(x[index] / half_N); half temp = one / hsqrt(half_N); x[index] = x[index] > temp ? x[index] : temp; x[index] = one / x[index]; } } #endif template<typename T> __global__ static void prewhiten_kernel(const int N, T *x, T* mean,T * dev_rec) { int index = blockDim.x * blockIdx.x + threadIdx.x; for (; index < N; index += gridDim.x * blockDim.x) { x[index] -= *mean; x[index] *= *dev_rec; } } template<typename T> static void gpu_pre_whiten_compute_run(const Tensor &x, Tensor &out, MemoryDevice& mem_device) { auto output_shape = out.sizes(); const T *input_data = x.data<T>(); T *output_data = out.data<T>(); int count = out.count(); memcpy(output_data, out.device(), count * sizeof(T), input_data, x.device(), count * sizeof(T)); //memcpy(output_data, input_data, count * sizeof(T)); // fot batch int batch = x.size(0); count /= batch; auto batch_outout_data = output_data; int grid_size = CUDA_BLOCK(count, CUDA_THREAD_NUM); int block_size = CUDA_THREAD_NUM; Tensor buffer_tensor = Tensor(Tensor::InFlow::DEVICE, out.dtype(), {1 + 1 + block_size}); T *mean = buffer_tensor.data<T>(); T *std_dev = mean + 1; T *dev_buffer = std_dev + 1; T *at = nullptr; for (int n = 0; n < batch; ++n) { at = batch_outout_data; math<T>::sum(count, at,mean); RUN_KERNEL(mean_kernel<T>, 1, 1, count,mean); at = batch_outout_data; RUN_KERNEL(dev_kernel<T>, grid_size, block_size, count, at, mean, dev_buffer); math<T>::sum(grid_size, dev_buffer, std_dev); RUN_KERNEL(std_dev_kernel<T>, 1, 1, count, std_dev); at = batch_outout_data; RUN_KERNEL(prewhiten_kernel<T>, grid_size, block_size, count,at,mean, std_dev); batch_outout_data += count; } } void PreWhiten::prewhiten(const Tensor &x, Tensor &out) { auto dtype = out.dtype(); auto running_mem_device = this->running_memory_device(); switch (dtype) { #define DECLARE_TYPE_AND_RUN(DTYPE, TYPE) \ case DTYPE: { gpu_pre_whiten_compute_run<TYPE>(x, out, running_mem_device); break; } // DECLARE_TYPE_AND_RUN(INT8, int8_t); // DECLARE_TYPE_AND_RUN(UINT8, uint8_t); // DECLARE_TYPE_AND_RUN(INT16, int16_t); // DECLARE_TYPE_AND_RUN(UINT16, uint16_t); // DECLARE_TYPE_AND_RUN(INT32, int32_t); // DECLARE_TYPE_AND_RUN(UINT32, uint32_t); // DECLARE_TYPE_AND_RUN(INT64, int64_t); // DECLARE_TYPE_AND_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_TYPE_AND_RUN(FLOAT16, half); #endif DECLARE_TYPE_AND_RUN(FLOAT32, float); DECLARE_TYPE_AND_RUN(FLOAT64, double); #undef DECLARE_TYPE_AND_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } } } using namespace ts; using namespace gpu; TS_REGISTER_OPERATOR(PreWhiten, GPU, name::layer::prewhiten()) #ifdef TS_USE_CUDA_FP16 TS_REGISTER_FP16_OPERATOR(PreWhiten, GPU, name::layer::prewhiten()) #endif
f0b6f442cedda3432c2f9e635fbfda434cb85cf1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<cuda.h> #include<cuda_runtime.h> #define N 10 using namespace std; __global__ void mul(int* a_d, int n){ // printf("%d %d %d\n", blockIdx.x,blockDim.x,threadIdx.x); int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < n){ a_d[index] *= 5; } } int main(){ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int *a, *a_d; int size = N * sizeof(int); a = (int*) malloc(size); cout << "Enter " << N << " numbers: "; for(int i=0; i<N; i++){ cin>>a[i]; } hipMalloc(&a_d, size); hipMemcpy(a_d, a, size, hipMemcpyHostToDevice); hipEventRecord(start); hipLaunchKernelGGL(( mul), dim3(1),dim3(10), 0, 0, a_d,N); hipDeviceSynchronize(); hipEventRecord(stop); hipMemcpy(a, a_d, size, hipMemcpyDeviceToHost); cout<<"Matrix After Multiplying:\n"; for(int i=0; i<N; i++){ cout<<a[i]<<" "; } float millis = 0; hipEventElapsedTime(&millis, start, stop); cout << "\nElasped Time: " << millis << endl; return 0; }
f0b6f442cedda3432c2f9e635fbfda434cb85cf1.cu
#include<iostream> #include<cuda.h> #include<cuda_runtime.h> #define N 10 using namespace std; __global__ void mul(int* a_d, int n){ // printf("%d %d %d\n", blockIdx.x,blockDim.x,threadIdx.x); int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < n){ a_d[index] *= 5; } } int main(){ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int *a, *a_d; int size = N * sizeof(int); a = (int*) malloc(size); cout << "Enter " << N << " numbers: "; for(int i=0; i<N; i++){ cin>>a[i]; } cudaMalloc(&a_d, size); cudaMemcpy(a_d, a, size, cudaMemcpyHostToDevice); cudaEventRecord(start); mul<<<1,10>>>(a_d,N); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaMemcpy(a, a_d, size, cudaMemcpyDeviceToHost); cout<<"Matrix After Multiplying:\n"; for(int i=0; i<N; i++){ cout<<a[i]<<" "; } float millis = 0; cudaEventElapsedTime(&millis, start, stop); cout << "\nElasped Time: " << millis << endl; return 0; }
b20957c4a4818f2eb5a73cb01e40299bf2dda254.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <strings/utilities.cuh> #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/strings/translate.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/sort.h> #include <algorithm> namespace cudf { namespace strings { namespace detail { using translate_table = thrust::pair<char_utf8, char_utf8>; namespace { /** * @brief This is the translate functor for replacing individual characters * in each string. */ struct translate_fn { column_device_view const d_strings; rmm::device_uvector<translate_table>::iterator table_begin; rmm::device_uvector<translate_table>::iterator table_end; int32_t* d_offsets{}; char* d_chars{}; __device__ void operator()(size_type idx) { if (d_strings.is_null(idx)) { if (!d_chars) d_offsets[idx] = 0; return; } string_view const d_str = d_strings.element<string_view>(idx); size_type bytes = d_str.size_bytes(); char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr; for (auto chr : d_str) { auto const entry = thrust::lower_bound(thrust::seq, table_begin, table_end, translate_table{chr, 0}, [](auto const& lhs, auto const& rhs) { return lhs.first < rhs.first; }); if (entry != table_end && entry->first == chr) { bytes -= bytes_in_char_utf8(chr); chr = entry->second; if (chr) // if null, skip the character bytes += bytes_in_char_utf8(chr); } if (chr && out_ptr) out_ptr += from_char_utf8(chr, out_ptr); } if (!d_chars) d_offsets[idx] = bytes; } }; } // namespace // std::unique_ptr<column> translate( strings_column_view const& strings, std::vector<std::pair<char_utf8, char_utf8>> const& chars_table, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) { if (strings.is_empty()) return make_empty_strings_column(stream, mr); size_type table_size = static_cast<size_type>(chars_table.size()); // convert input table thrust::host_vector<translate_table> htable(table_size); std::transform(chars_table.begin(), chars_table.end(), htable.begin(), [](auto entry) { return translate_table{entry.first, entry.second}; }); // The size of this table is usually much less than 100 so it is was // found to be more efficient to sort on the CPU than the GPU. thrust::sort(htable.begin(), htable.end(), [](auto const& lhs, auto const& rhs) { return lhs.first < rhs.first; }); // copy translate table to device memory rmm::device_uvector<translate_table> table(htable.size(), stream); CUDA_TRY(hipMemcpyAsync(table.data(), htable.data(), sizeof(translate_table) * htable.size(), hipMemcpyHostToDevice, stream.value())); auto d_strings = column_device_view::create(strings.parent(), stream); auto children = make_strings_children(translate_fn{*d_strings, table.begin(), table.end()}, strings.size(), strings.null_count(), stream, mr); return make_strings_column(strings.size(), std::move(children.first), std::move(children.second), strings.null_count(), cudf::detail::copy_bitmask(strings.parent(), stream, mr), stream, mr); } } // namespace detail // external APIs std::unique_ptr<column> translate(strings_column_view const& strings, std::vector<std::pair<uint32_t, uint32_t>> const& chars_table, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::translate(strings, chars_table, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
b20957c4a4818f2eb5a73cb01e40299bf2dda254.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <strings/utilities.cuh> #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/strings/translate.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/sort.h> #include <algorithm> namespace cudf { namespace strings { namespace detail { using translate_table = thrust::pair<char_utf8, char_utf8>; namespace { /** * @brief This is the translate functor for replacing individual characters * in each string. */ struct translate_fn { column_device_view const d_strings; rmm::device_uvector<translate_table>::iterator table_begin; rmm::device_uvector<translate_table>::iterator table_end; int32_t* d_offsets{}; char* d_chars{}; __device__ void operator()(size_type idx) { if (d_strings.is_null(idx)) { if (!d_chars) d_offsets[idx] = 0; return; } string_view const d_str = d_strings.element<string_view>(idx); size_type bytes = d_str.size_bytes(); char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr; for (auto chr : d_str) { auto const entry = thrust::lower_bound(thrust::seq, table_begin, table_end, translate_table{chr, 0}, [](auto const& lhs, auto const& rhs) { return lhs.first < rhs.first; }); if (entry != table_end && entry->first == chr) { bytes -= bytes_in_char_utf8(chr); chr = entry->second; if (chr) // if null, skip the character bytes += bytes_in_char_utf8(chr); } if (chr && out_ptr) out_ptr += from_char_utf8(chr, out_ptr); } if (!d_chars) d_offsets[idx] = bytes; } }; } // namespace // std::unique_ptr<column> translate( strings_column_view const& strings, std::vector<std::pair<char_utf8, char_utf8>> const& chars_table, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) { if (strings.is_empty()) return make_empty_strings_column(stream, mr); size_type table_size = static_cast<size_type>(chars_table.size()); // convert input table thrust::host_vector<translate_table> htable(table_size); std::transform(chars_table.begin(), chars_table.end(), htable.begin(), [](auto entry) { return translate_table{entry.first, entry.second}; }); // The size of this table is usually much less than 100 so it is was // found to be more efficient to sort on the CPU than the GPU. thrust::sort(htable.begin(), htable.end(), [](auto const& lhs, auto const& rhs) { return lhs.first < rhs.first; }); // copy translate table to device memory rmm::device_uvector<translate_table> table(htable.size(), stream); CUDA_TRY(cudaMemcpyAsync(table.data(), htable.data(), sizeof(translate_table) * htable.size(), cudaMemcpyHostToDevice, stream.value())); auto d_strings = column_device_view::create(strings.parent(), stream); auto children = make_strings_children(translate_fn{*d_strings, table.begin(), table.end()}, strings.size(), strings.null_count(), stream, mr); return make_strings_column(strings.size(), std::move(children.first), std::move(children.second), strings.null_count(), cudf::detail::copy_bitmask(strings.parent(), stream, mr), stream, mr); } } // namespace detail // external APIs std::unique_ptr<column> translate(strings_column_view const& strings, std::vector<std::pair<uint32_t, uint32_t>> const& chars_table, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::translate(strings, chars_table, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
6f695b378595d9d27c682399b24885ba3842fbda.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"cuda_runtime.h" #include"device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #define row 500000 #define col 10 #define rows 5000 #define cols 10 int aba[row][col]; int aa[rows][cols]; double cc[rows][rows]; double cpu_out[rows][rows]; int k; int a, b, c, d; double *d_c = 0; int *d_a = 0; int *d_i = 0; __global__ void distance(const int * dev_a, double * dev_c, const int * dev_i) { int th_num = blockIdx.x * 128 + threadIdx.x; double sum = 0; for (int k = th_num + 1; k<rows; k++) { for (int c = 0; c < cols; c++) sum += (dev_a[th_num*cols + c] - dev_i[k*cols + c])*(dev_a[th_num*cols + c] - dev_i[k*cols + c]); dev_c[th_num] = sqrt(sum); } // printf ("Sum: %d \t " , sum); } __global__ void sorting(double *dev_c, double *sort, double K) { int temp; int i; i = blockIdx.x * 128 + threadIdx.x; for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { if (dev_c[cols*r + c]<dev_c[cols* i + c]) { temp = dev_c[cols*r + c]; dev_c[cols*r + c] = dev_c[cols* i + c]; dev_c[cols* i + c] = temp; } } } } int main() { printf(" enter the number of K nearest neighbors :"); scanf("%d", &k); FILE *myFile; myFile = fopen("name.csv ", "w+"); if (myFile == NULL) { printf("Error Reading File\n"); exit(0); } char buffer[1024]; int i = 0, j = 0; char *record, *line; while ((line = fgets(buffer, sizeof(buffer), myFile)) != NULL) { j = 0; record = strtok(line, " ,"); while (record != NULL) { // printf("%d \t %d \t %d \n" , ( cols * i ) + j , i , j ); aba[i][j] = atoi(record); record = strtok(NULL, " ,"); j++; } i++; } fclose(myFile); int input[cols]; for (int i = 0; i <cols; i++) { int x; printf("enter input %d\n", i); scanf("%f\n", &input[i]); if (input[i] == 1001001) { for (x = 0; x < row; x++) { aba[x][i] = 1001001; } } } for (i = 0; i < cols; i++) { printf("%d", input[i]); } clock_t start; start = clock(); for (a = 0; a < 199; a++) { for (b = 0; b <5000; b++) { c = a * 5000 + b; for (d = 0; d <cols; d++) { aa[b][d] = aba[c][d]; printf(" %d\n", aa[b][d]); } scanf("%d", &k); } hipError_t cudaStatus; cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed !"); return 1; } cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed ! Do you have a CUDAcapable GPU installed ?"); goto Error; } else printf("Working \n"); cudaStatus = hipMalloc((void **)&d_a, rows*cols * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed !"); goto Error; } else printf(" Success ! ! ! \n"); cudaStatus = hipMalloc((void **)&d_i, cols * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed !"); goto Error; } else printf(" Success ! ! ! \n"); cudaStatus = hipMemcpy(d_i, input, cols * sizeof(int *), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed !"); goto Error; } else printf(" Success ! ! ! \n"); cudaStatus = hipMemcpy(d_a, aa, rows*cols * sizeof(int *), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed !"); goto Error; } else printf(" Success ! ! ! \n"); cudaStatus = hipMalloc((void **)&d_c, rows* rows * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed !"); goto Error; } else printf(" Success ! ! ! \n"); double *sort = 0; cudaStatus = hipMalloc((void **)&sort, rows* rows * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed !"); goto Error; } else printf(" Success ! ! ! \n"); int threads = 128; while (rows%threads != 0) threads++; printf("TH: %d \n", threads); //return 0; dim3 threadsPerBlock(threads); dim3 numBlocks(rows / threadsPerBlock.x); distance << <numBlocks, threadsPerBlock >> > (d_a, d_c, d_i); sorting << <numBlocks, threadsPerBlock >> > (d_c, sort, k); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKern launch failed : %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel !\n", cudaStatus); scanf("%d", &k); goto Error; } //return cudaStatus ; cudaStatus = hipMemcpy(cc, d_c, rows*rows * sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed : %s\n", hipGetErrorString(cudaStatus)); goto Error; } } printf("GPU Time Taken: %f \n", (double)(clock() - start) / CLOCKS_PER_SEC); scanf("%d", &k); for (int l = 0; l <= k; l++) { for (i = 0; i < rows; i++) { for (int j = 0; j < rows; j++) { printf("%f \t ", cc[(rows * i) + j]); } } } Error: printf (" Exiting . . \n"); scanf("%d", &k); hipFree(d_c); hipFree(d_a); hipFree(d_i); }
6f695b378595d9d27c682399b24885ba3842fbda.cu
#include"cuda_runtime.h" #include"device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #define row 500000 #define col 10 #define rows 5000 #define cols 10 int aba[row][col]; int aa[rows][cols]; double cc[rows][rows]; double cpu_out[rows][rows]; int k; int a, b, c, d; double *d_c = 0; int *d_a = 0; int *d_i = 0; __global__ void distance(const int * dev_a, double * dev_c, const int * dev_i) { int th_num = blockIdx.x * 128 + threadIdx.x; double sum = 0; for (int k = th_num + 1; k<rows; k++) { for (int c = 0; c < cols; c++) sum += (dev_a[th_num*cols + c] - dev_i[k*cols + c])*(dev_a[th_num*cols + c] - dev_i[k*cols + c]); dev_c[th_num] = sqrt(sum); } // printf ("Sum: %d \t " , sum); } __global__ void sorting(double *dev_c, double *sort, double K) { int temp; int i; i = blockIdx.x * 128 + threadIdx.x; for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { if (dev_c[cols*r + c]<dev_c[cols* i + c]) { temp = dev_c[cols*r + c]; dev_c[cols*r + c] = dev_c[cols* i + c]; dev_c[cols* i + c] = temp; } } } } int main() { printf(" enter the number of K nearest neighbors :"); scanf("%d", &k); FILE *myFile; myFile = fopen("name.csv ", "w+"); if (myFile == NULL) { printf("Error Reading File\n"); exit(0); } char buffer[1024]; int i = 0, j = 0; char *record, *line; while ((line = fgets(buffer, sizeof(buffer), myFile)) != NULL) { j = 0; record = strtok(line, " ,"); while (record != NULL) { // printf("%d \t %d \t %d \n" , ( cols * i ) + j , i , j ); aba[i][j] = atoi(record); record = strtok(NULL, " ,"); j++; } i++; } fclose(myFile); int input[cols]; for (int i = 0; i <cols; i++) { int x; printf("enter input %d\n", i); scanf("%f\n", &input[i]); if (input[i] == 1001001) { for (x = 0; x < row; x++) { aba[x][i] = 1001001; } } } for (i = 0; i < cols; i++) { printf("%d", input[i]); } clock_t start; start = clock(); for (a = 0; a < 199; a++) { for (b = 0; b <5000; b++) { c = a * 5000 + b; for (d = 0; d <cols; d++) { aa[b][d] = aba[c][d]; printf(" %d\n", aa[b][d]); } scanf("%d", &k); } cudaError_t cudaStatus; cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed !"); return 1; } cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed ! Do you have a CUDA−capable GPU installed ?"); goto Error; } else printf("Working \n"); cudaStatus = cudaMalloc((void **)&d_a, rows*cols * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed !"); goto Error; } else printf(" Success ! ! ! \n"); cudaStatus = cudaMalloc((void **)&d_i, cols * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed !"); goto Error; } else printf(" Success ! ! ! \n"); cudaStatus = cudaMemcpy(d_i, input, cols * sizeof(int *), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed !"); goto Error; } else printf(" Success ! ! ! \n"); cudaStatus = cudaMemcpy(d_a, aa, rows*cols * sizeof(int *), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed !"); goto Error; } else printf(" Success ! ! ! \n"); cudaStatus = cudaMalloc((void **)&d_c, rows* rows * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed !"); goto Error; } else printf(" Success ! ! ! \n"); double *sort = 0; cudaStatus = cudaMalloc((void **)&sort, rows* rows * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed !"); goto Error; } else printf(" Success ! ! ! \n"); int threads = 128; while (rows%threads != 0) threads++; printf("TH: %d \n", threads); //return 0; dim3 threadsPerBlock(threads); dim3 numBlocks(rows / threadsPerBlock.x); distance << <numBlocks, threadsPerBlock >> > (d_a, d_c, d_i); sorting << <numBlocks, threadsPerBlock >> > (d_c, sort, k); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKern launch failed : %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel !\n", cudaStatus); scanf("%d", &k); goto Error; } //return cudaStatus ; cudaStatus = cudaMemcpy(cc, d_c, rows*rows * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed : %s\n", cudaGetErrorString(cudaStatus)); goto Error; } } printf("GPU Time Taken: %f \n", (double)(clock() - start) / CLOCKS_PER_SEC); scanf("%d", &k); for (int l = 0; l <= k; l++) { for (i = 0; i < rows; i++) { for (int j = 0; j < rows; j++) { printf("%f \t ", cc[(rows * i) + j]); } } } Error: printf (" Exiting . . \n"); scanf("%d", &k); cudaFree(d_c); cudaFree(d_a); cudaFree(d_i); }
5b38c2934aec1b058bdc09a9cc411ed174e4dcd0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE template <typename T, typename C> __global__ void awkward_index_rpad_and_clip_axis1(T* tostarts, C* tostops, int64_t target, int64_t length, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < length) { tostarts[thread_id] = thread_id * target; tostops[thread_id] = (thread_id + 1) * target; } } }
5b38c2934aec1b058bdc09a9cc411ed174e4dcd0.cu
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE template <typename T, typename C> __global__ void awkward_index_rpad_and_clip_axis1(T* tostarts, C* tostops, int64_t target, int64_t length, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < length) { tostarts[thread_id] = thread_id * target; tostops[thread_id] = (thread_id + 1) * target; } } }
a099c446832a094bf41606b02c9bd422da46cc4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <pthread.h> typedef struct { int n_vector; int n_buffer; int *d_buffer; int *d2_buffer; int id; } thread_arg_t; pthread_mutex_t the_mutex; pthread_cond_t condc, condp; int aux = 0; __global__ void kernel_create(int *data){ int ix = blockIdx.x*blockDim.x + threadIdx.x; //int i; data[ix] = data[ix]; __syncthreads(); } void call_consumer(int n_buffer, int *data); void *launch_kernel_create(void* _arg){ thread_arg_t *arg = (thread_arg_t*)_arg; int n_vector = arg->n_vector; int n_buffer = arg->n_buffer; int *d_buffer = arg->d_buffer; int *d2_buffer = arg->d2_buffer; hipStream_t stream_0; int i; int sum; int index; int cont; int *vector; int *buffer; dim3 grid, block; block.x = 1024; grid.x = (n_buffer + block.x - 1) / block.x; hipSetDevice(0); vector = (int*) malloc(sizeof(int)*n_vector); buffer = (int*) malloc(sizeof(int)*n_buffer); for(i=0;i<n_vector;i++){ vector[i] = i+1; } index = 0; cont = 0; sum = 0; while(index<n_vector){ //lockar mutex 2 pthread_mutex_lock(&the_mutex); while(aux != 0) pthread_cond_wait(&condp, &the_mutex); for(i=0;i<(n_buffer);i++){ buffer[i] = vector[i + index]; sum += buffer[i]*2; } index = index + n_buffer; hipMemcpy(d_buffer,buffer,n_buffer*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_create), dim3(grid),dim3(block), 0, 0, d_buffer); hipMemcpyPeerAsync(d2_buffer,1,d_buffer,0,n_buffer*sizeof(int),stream_0); cont++; aux = 1; //liberar mutex 2 pthread_cond_signal(&condc); pthread_mutex_unlock(&the_mutex); } printf("Soma total na CPU: %d Cont: %d\n",sum,cont-1); hipFree(d_buffer); return 0; } void *launch_kernel_modify(void* _arg){ thread_arg_t *arg = (thread_arg_t*)_arg; int n_vector = arg->n_vector; int n_buffer = arg->n_buffer; int *d2_buffer = arg->d2_buffer; int *buffer2; int acessivel = 0; int index,i,j; int sum = 0; buffer2 = (int*) malloc(sizeof(int)*n_buffer); dim3 grid, block; block.x = 1024; grid.x = (n_buffer + block.x - 1) / block.x; hipDeviceCanAccessPeer(&acessivel,1,0); if(acessivel){ int d_sum; hipSetDevice(1); hipDeviceEnablePeerAccess(0,0); index = n_vector/n_buffer; for(i=0;i<index;i++){ //lockar mutex 1 pthread_mutex_lock(&the_mutex); while(aux == 0) pthread_cond_wait(&condc, &the_mutex); hipSetDevice(1); //kernel_modify<<<grid,block>>>(d2_buffer); call_consumer(n_buffer,d2_buffer); hipMemcpy(buffer2,d2_buffer,sizeof(int)*n_buffer,hipMemcpyDeviceToHost); for(j=0;j<n_buffer;j++){ //printf("(%d,%d) ",j,buffer2[j]); sum+=buffer2[j]; } aux = 0; //liberar mutex 1 pthread_cond_signal(&condp); pthread_mutex_unlock(&the_mutex); } hipFree(&d_sum); hipFree(d2_buffer); printf("Somatorio na GPU: %d\n",sum); } return 0; } int main(void){ int i; int gpu1 = 0; int gpu2 = 1; int n_buffer = 10; int n_vector = 1000; int *d_buffer; int *d2_buffer; pthread_t thread[2]; thread_arg_t args[2]; pthread_mutex_init(&the_mutex, NULL); pthread_cond_init(&condc, NULL); /* Initialize consumer condition variable */ pthread_cond_init(&condp, NULL); hipSetDevice(gpu1); hipMalloc(&d_buffer,sizeof(int)*n_buffer); hipSetDevice(gpu2); hipMalloc(&d2_buffer,sizeof(int)*n_buffer); hipSetDevice(gpu1); args[0].id = 0; args[0].d_buffer = d_buffer; args[0].d2_buffer = d2_buffer; args[0].n_vector = n_vector; args[0].n_buffer = n_buffer; pthread_create(&(thread[0]), NULL, launch_kernel_create, &(args[0])); hipSetDevice(gpu2); args[1].id = 0; args[1].n_buffer = n_buffer; args[1].n_vector = n_vector; args[1].d2_buffer = d2_buffer; pthread_create(&(thread[1]), NULL, launch_kernel_modify, &(args[1])); for(i=0;i<2;i++){ pthread_join(thread[i],NULL); } pthread_mutex_destroy(&the_mutex); /* Free up the_mutex */ pthread_cond_destroy(&condc); /* Free up consumer condition variable */ pthread_cond_destroy(&condp); hipSetDevice(1); hipDeviceDisablePeerAccess(0); return 0; }
a099c446832a094bf41606b02c9bd422da46cc4b.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <pthread.h> typedef struct { int n_vector; int n_buffer; int *d_buffer; int *d2_buffer; int id; } thread_arg_t; pthread_mutex_t the_mutex; pthread_cond_t condc, condp; int aux = 0; __global__ void kernel_create(int *data){ int ix = blockIdx.x*blockDim.x + threadIdx.x; //int i; data[ix] = data[ix]; __syncthreads(); } void call_consumer(int n_buffer, int *data); void *launch_kernel_create(void* _arg){ thread_arg_t *arg = (thread_arg_t*)_arg; int n_vector = arg->n_vector; int n_buffer = arg->n_buffer; int *d_buffer = arg->d_buffer; int *d2_buffer = arg->d2_buffer; cudaStream_t stream_0; int i; int sum; int index; int cont; int *vector; int *buffer; dim3 grid, block; block.x = 1024; grid.x = (n_buffer + block.x - 1) / block.x; cudaSetDevice(0); vector = (int*) malloc(sizeof(int)*n_vector); buffer = (int*) malloc(sizeof(int)*n_buffer); for(i=0;i<n_vector;i++){ vector[i] = i+1; } index = 0; cont = 0; sum = 0; while(index<n_vector){ //lockar mutex 2 pthread_mutex_lock(&the_mutex); while(aux != 0) pthread_cond_wait(&condp, &the_mutex); for(i=0;i<(n_buffer);i++){ buffer[i] = vector[i + index]; sum += buffer[i]*2; } index = index + n_buffer; cudaMemcpy(d_buffer,buffer,n_buffer*sizeof(int),cudaMemcpyHostToDevice); kernel_create<<<grid,block>>>(d_buffer); cudaMemcpyPeerAsync(d2_buffer,1,d_buffer,0,n_buffer*sizeof(int),stream_0); cont++; aux = 1; //liberar mutex 2 pthread_cond_signal(&condc); pthread_mutex_unlock(&the_mutex); } printf("Soma total na CPU: %d Cont: %d\n",sum,cont-1); cudaFree(d_buffer); return 0; } void *launch_kernel_modify(void* _arg){ thread_arg_t *arg = (thread_arg_t*)_arg; int n_vector = arg->n_vector; int n_buffer = arg->n_buffer; int *d2_buffer = arg->d2_buffer; int *buffer2; int acessivel = 0; int index,i,j; int sum = 0; buffer2 = (int*) malloc(sizeof(int)*n_buffer); dim3 grid, block; block.x = 1024; grid.x = (n_buffer + block.x - 1) / block.x; cudaDeviceCanAccessPeer(&acessivel,1,0); if(acessivel){ int d_sum; cudaSetDevice(1); cudaDeviceEnablePeerAccess(0,0); index = n_vector/n_buffer; for(i=0;i<index;i++){ //lockar mutex 1 pthread_mutex_lock(&the_mutex); while(aux == 0) pthread_cond_wait(&condc, &the_mutex); cudaSetDevice(1); //kernel_modify<<<grid,block>>>(d2_buffer); call_consumer(n_buffer,d2_buffer); cudaMemcpy(buffer2,d2_buffer,sizeof(int)*n_buffer,cudaMemcpyDeviceToHost); for(j=0;j<n_buffer;j++){ //printf("(%d,%d) ",j,buffer2[j]); sum+=buffer2[j]; } aux = 0; //liberar mutex 1 pthread_cond_signal(&condp); pthread_mutex_unlock(&the_mutex); } cudaFree(&d_sum); cudaFree(d2_buffer); printf("Somatorio na GPU: %d\n",sum); } return 0; } int main(void){ int i; int gpu1 = 0; int gpu2 = 1; int n_buffer = 10; int n_vector = 1000; int *d_buffer; int *d2_buffer; pthread_t thread[2]; thread_arg_t args[2]; pthread_mutex_init(&the_mutex, NULL); pthread_cond_init(&condc, NULL); /* Initialize consumer condition variable */ pthread_cond_init(&condp, NULL); cudaSetDevice(gpu1); cudaMalloc(&d_buffer,sizeof(int)*n_buffer); cudaSetDevice(gpu2); cudaMalloc(&d2_buffer,sizeof(int)*n_buffer); cudaSetDevice(gpu1); args[0].id = 0; args[0].d_buffer = d_buffer; args[0].d2_buffer = d2_buffer; args[0].n_vector = n_vector; args[0].n_buffer = n_buffer; pthread_create(&(thread[0]), NULL, launch_kernel_create, &(args[0])); cudaSetDevice(gpu2); args[1].id = 0; args[1].n_buffer = n_buffer; args[1].n_vector = n_vector; args[1].d2_buffer = d2_buffer; pthread_create(&(thread[1]), NULL, launch_kernel_modify, &(args[1])); for(i=0;i<2;i++){ pthread_join(thread[i],NULL); } pthread_mutex_destroy(&the_mutex); /* Free up the_mutex */ pthread_cond_destroy(&condc); /* Free up consumer condition variable */ pthread_cond_destroy(&condp); cudaSetDevice(1); cudaDeviceDisablePeerAccess(0); return 0; }
c5b9893145a3b82fa2ce984b06600b70427422d8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include <time.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __global__ functions 2) Enable a simulation of password cracking in the absence of library with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o pw_crack pw_crack.cu To Run: ./pw_crack > results.txt Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ /**************************************************************************** This function returns 1 if the attempt at cracking the password is identical to the plain text password string stored in the program. Otherwise,it returns 0. *****************************************************************************/ __device__ int is_a_match(char *attempt) { char password1[] = "KD56"; char password2[] = "AC16"; char password3[] = "EF01"; char password4[] = "KL25"; char *c = attempt; char *r = attempt; char *k = attempt; char *n = attempt; char *pswd1 = password1; char *pswd2 = password2; char *pswd3 = password3; char *pswd4 = password4; while(*c == *pswd1) { if(*c== '\0') { printf("Found password: %s\n",password1); break; } c++; pswd1++; } while(*r == *pswd2) { if(*r == '\0') { printf("Found password: %s\n",password2); break; } r++; pswd2++; } while(*k == *pswd3) { if(*k == '\0') { printf("Found password: %s\n",password3); break; } k++; pswd3++; } while(*n == *pswd4) { if(*n == '\0') { printf("Found password: %s\n",password4); return 1; } n++; pswd4++; } return 0; } /**************************************************************************** The kernel function assume that there will be only one thread and uses nested loops to generate all possible passwords and test whether they match the hidden password. *****************************************************************************/ __global__ void kernel() { char p,s; char password[5]; password[4] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char firstValue = i; char secondValue = j; password[0] = firstValue; password[1] = secondValue; for(p='0'; p<='9'; p++){ for(s='0'; s<='9'; s++){ password[2] = p; password[3] = s; if(is_a_match(password)) { //printf("Success"); } else { //printf("tried: %s\n", password); } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipLaunchKernelGGL(( kernel) , dim3(26),dim3(26), 0, 0, ); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
c5b9893145a3b82fa2ce984b06600b70427422d8.cu
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __global__ functions 2) Enable a simulation of password cracking in the absence of library with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o pw_crack pw_crack.cu To Run: ./pw_crack > results.txt Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ /**************************************************************************** This function returns 1 if the attempt at cracking the password is identical to the plain text password string stored in the program. Otherwise,it returns 0. *****************************************************************************/ __device__ int is_a_match(char *attempt) { char password1[] = "KD56"; char password2[] = "AC16"; char password3[] = "EF01"; char password4[] = "KL25"; char *c = attempt; char *r = attempt; char *k = attempt; char *n = attempt; char *pswd1 = password1; char *pswd2 = password2; char *pswd3 = password3; char *pswd4 = password4; while(*c == *pswd1) { if(*c== '\0') { printf("Found password: %s\n",password1); break; } c++; pswd1++; } while(*r == *pswd2) { if(*r == '\0') { printf("Found password: %s\n",password2); break; } r++; pswd2++; } while(*k == *pswd3) { if(*k == '\0') { printf("Found password: %s\n",password3); break; } k++; pswd3++; } while(*n == *pswd4) { if(*n == '\0') { printf("Found password: %s\n",password4); return 1; } n++; pswd4++; } return 0; } /**************************************************************************** The kernel function assume that there will be only one thread and uses nested loops to generate all possible passwords and test whether they match the hidden password. *****************************************************************************/ __global__ void kernel() { char p,s; char password[5]; password[4] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char firstValue = i; char secondValue = j; password[0] = firstValue; password[1] = secondValue; for(p='0'; p<='9'; p++){ for(s='0'; s<='9'; s++){ password[2] = p; password[3] = s; if(is_a_match(password)) { //printf("Success"); } else { //printf("tried: %s\n", password); } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); kernel <<<26,26>>>(); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
73678bb991ca0daab8059713081d5018105943cf.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include "freshman.h" __global__ void printThreadIndex(float *A,const int nx,const int ny) { int ix=threadIdx.x+blockIdx.x*blockDim.x; int iy=threadIdx.y+blockIdx.y*blockDim.y; unsigned int idx=iy*nx+ix; printf("thread_id(%d,%d) block_id(%d,%d) coordinate(%d,%d)" "global index %2d ival %2d\n",threadIdx.x,threadIdx.y, blockIdx.x,blockIdx.y,ix,iy,idx,A[idx]); } int main(int argc,char** argv) { initDevice(0); int nx=8,ny=6; int nxy=nx*ny; int nBytes=nxy*sizeof(float); //Malloc float* A_host=(float*)malloc(nBytes); initialData(A_host,nxy); printMatrix(A_host,nx,ny); //hipMalloc float *A_dev=NULL; CHECK(hipMalloc((void**)&A_dev,nBytes)); hipMemcpy(A_dev,A_host,nBytes,hipMemcpyHostToDevice); dim3 block(4,2); dim3 grid((nx-1)/block.x+1,(ny-1)/block.y+1); hipLaunchKernelGGL(( printThreadIndex), dim3(grid),dim3(block), 0, 0, A_dev,nx,ny); CHECK(hipDeviceSynchronize()); hipFree(A_dev); free(A_host); hipDeviceReset(); return 0; }
73678bb991ca0daab8059713081d5018105943cf.cu
#include <cuda_runtime.h> #include <stdio.h> #include "freshman.h" __global__ void printThreadIndex(float *A,const int nx,const int ny) { int ix=threadIdx.x+blockIdx.x*blockDim.x; int iy=threadIdx.y+blockIdx.y*blockDim.y; unsigned int idx=iy*nx+ix; printf("thread_id(%d,%d) block_id(%d,%d) coordinate(%d,%d)" "global index %2d ival %2d\n",threadIdx.x,threadIdx.y, blockIdx.x,blockIdx.y,ix,iy,idx,A[idx]); } int main(int argc,char** argv) { initDevice(0); int nx=8,ny=6; int nxy=nx*ny; int nBytes=nxy*sizeof(float); //Malloc float* A_host=(float*)malloc(nBytes); initialData(A_host,nxy); printMatrix(A_host,nx,ny); //cudaMalloc float *A_dev=NULL; CHECK(cudaMalloc((void**)&A_dev,nBytes)); cudaMemcpy(A_dev,A_host,nBytes,cudaMemcpyHostToDevice); dim3 block(4,2); dim3 grid((nx-1)/block.x+1,(ny-1)/block.y+1); printThreadIndex<<<grid,block>>>(A_dev,nx,ny); CHECK(cudaDeviceSynchronize()); cudaFree(A_dev); free(A_host); cudaDeviceReset(); return 0; }
e7be9265446eddcc1747824142d22ae792eeb463.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 15 #define F 8 #define ITERATIONS (unsigned)( 2000 ) #define ITERATIONS2 REPLACE_ITERATIONS #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 128 #define SETS 64 #define ASSOC 6 #define SIMD_WIDTH 32 // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){ if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation //int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int); //unsigned j=0, k=0; int m_sum=N; // m_sum = A[tid*F]; for (unsigned j=0; j<ITERATIONS2; j++){ for(unsigned k=0; k<ITERATIONS; ++k){ m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]; } m_sum+=j; } C[tid]=m_sum; __syncthreads(); } // Host code int main(){ printf("Power Microbenchmarks\n"); //int N = LINE_SIZE*SETS*ASSOC; unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F)); size_t size = N * sizeof(int); // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); //h_B = (float*)malloc(size); //if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); //RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A, size) ); //checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); //checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutStopTimer(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) hipFree(d_A); //if (d_B) // hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
e7be9265446eddcc1747824142d22ae792eeb463.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 15 #define F 8 #define ITERATIONS (unsigned)( 2000 ) #define ITERATIONS2 REPLACE_ITERATIONS #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 128 #define SETS 64 #define ASSOC 6 #define SIMD_WIDTH 32 // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ){ if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation //int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int); //unsigned j=0, k=0; int m_sum=N; // m_sum = A[tid*F]; for (unsigned j=0; j<ITERATIONS2; j++){ for(unsigned k=0; k<ITERATIONS; ++k){ m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]; } m_sum+=j; } C[tid]=m_sum; __syncthreads(); } // Host code int main(){ printf("Power Microbenchmarks\n"); //int N = LINE_SIZE*SETS*ASSOC; unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F)); size_t size = N * sizeof(int); // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); //h_B = (float*)malloc(size); //if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); //RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A, size) ); //checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); //checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutStopTimer(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) cudaFree(d_A); //if (d_B) // cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
c49963865e07a3379fad23ac2d07fe3819a36587.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2015-2019, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <hip/hip_runtime.h> #include <forge.h> #define USE_FORGE_CUDA_COPY_HELPERS #include <fg/compute_copy.h> #define PI 3.14159265359 const unsigned DIMX = 640; const unsigned DIMY = 480; const float MINIMUM = 1.0f; const float MAXIMUM = 20.f; const float STEP = 2.0f; const float NELEMS = (MAXIMUM - MINIMUM + 1) / STEP; const unsigned DPOINTS[] = {5, 5, 5, 15, 15, 5, 15, 15}; void generatePoints(float* points, float* dirs); inline int divup(int a, int b) { return (a + b - 1) / b; } int main(void) { unsigned* dpoints; float* points; float* dirs; /* * First Forge call should be a window creation call * so that necessary OpenGL context is created for any * other forge::* object to be created successfully */ forge::Window wnd(DIMX, DIMY, "Vector Field Demo"); wnd.makeCurrent(); forge::Chart chart(FG_CHART_2D); chart.setAxesLimits(MINIMUM - 1.0f, MAXIMUM, MINIMUM - 1.0f, MAXIMUM); chart.setAxesTitles("x-axis", "y-axis"); forge::Plot divPoints = chart.plot(4, forge::u32, FG_PLOT_SCATTER, FG_MARKER_CIRCLE); divPoints.setColor(0.9f, 0.9f, 0.0f, 1.f); divPoints.setLegend("Convergence Points"); divPoints.setMarkerSize(24); size_t npoints = (size_t)(NELEMS * NELEMS); forge::VectorField field = chart.vectorField((unsigned)(npoints), forge::f32); field.setColor(0.f, 0.6f, 0.3f, 1.f); FORGE_CUDA_CHECK(hipMalloc((void**)&dpoints, 8 * sizeof(unsigned))); FORGE_CUDA_CHECK(hipMalloc((void**)&points, 2 * npoints * sizeof(float))); FORGE_CUDA_CHECK(hipMalloc((void**)&dirs, 2 * npoints * sizeof(float))); GfxHandle* handles[3]; createGLBuffer(&handles[0], divPoints.vertices(), FORGE_VERTEX_BUFFER); createGLBuffer(&handles[1], field.vertices(), FORGE_VERTEX_BUFFER); createGLBuffer(&handles[2], field.directions(), FORGE_VERTEX_BUFFER); FORGE_CUDA_CHECK(hipMemcpy(dpoints, DPOINTS, 8 * sizeof(unsigned), hipMemcpyHostToDevice)); generatePoints(points, dirs); copyToGLBuffer(handles[0], (ComputeResourceHandle)dpoints, divPoints.verticesSize()); copyToGLBuffer(handles[1], (ComputeResourceHandle)points, field.verticesSize()); copyToGLBuffer(handles[2], (ComputeResourceHandle)dirs, field.directionsSize()); do { wnd.draw(chart); } while (!wnd.close()); // destroy GL-CUDA interop buffers releaseGLBuffer(handles[0]); releaseGLBuffer(handles[1]); releaseGLBuffer(handles[2]); // destroy CUDA handles FORGE_CUDA_CHECK(hipFree(dpoints)); FORGE_CUDA_CHECK(hipFree(points)); FORGE_CUDA_CHECK(hipFree(dirs)); return 0; } __global__ void pointGenKernel(float* points, float* dirs, int nelems, float minimum, float step) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < nelems && j < nelems) { int id = i + j * nelems; float x = minimum + i * step; float y = minimum + j * step; points[2 * id + 0] = x; points[2 * id + 1] = y; dirs[2 * id + 0] = sinf(2.0f * PI * x / 10.f); dirs[2 * id + 1] = sinf(2.0f * PI * y / 10.f); } } void generatePoints(float* points, float* dirs) { static dim3 threads(8, 8); dim3 blocks(divup((int)(NELEMS), threads.x), divup((int)(NELEMS), threads.y)); // clang-format off hipLaunchKernelGGL(( pointGenKernel), dim3(blocks), dim3(threads), 0, 0, points, dirs, (int)(NELEMS), MINIMUM, STEP); // clang-format on }
c49963865e07a3379fad23ac2d07fe3819a36587.cu
/******************************************************* * Copyright (c) 2015-2019, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <cuda_runtime.h> #include <forge.h> #define USE_FORGE_CUDA_COPY_HELPERS #include <fg/compute_copy.h> #define PI 3.14159265359 const unsigned DIMX = 640; const unsigned DIMY = 480; const float MINIMUM = 1.0f; const float MAXIMUM = 20.f; const float STEP = 2.0f; const float NELEMS = (MAXIMUM - MINIMUM + 1) / STEP; const unsigned DPOINTS[] = {5, 5, 5, 15, 15, 5, 15, 15}; void generatePoints(float* points, float* dirs); inline int divup(int a, int b) { return (a + b - 1) / b; } int main(void) { unsigned* dpoints; float* points; float* dirs; /* * First Forge call should be a window creation call * so that necessary OpenGL context is created for any * other forge::* object to be created successfully */ forge::Window wnd(DIMX, DIMY, "Vector Field Demo"); wnd.makeCurrent(); forge::Chart chart(FG_CHART_2D); chart.setAxesLimits(MINIMUM - 1.0f, MAXIMUM, MINIMUM - 1.0f, MAXIMUM); chart.setAxesTitles("x-axis", "y-axis"); forge::Plot divPoints = chart.plot(4, forge::u32, FG_PLOT_SCATTER, FG_MARKER_CIRCLE); divPoints.setColor(0.9f, 0.9f, 0.0f, 1.f); divPoints.setLegend("Convergence Points"); divPoints.setMarkerSize(24); size_t npoints = (size_t)(NELEMS * NELEMS); forge::VectorField field = chart.vectorField((unsigned)(npoints), forge::f32); field.setColor(0.f, 0.6f, 0.3f, 1.f); FORGE_CUDA_CHECK(cudaMalloc((void**)&dpoints, 8 * sizeof(unsigned))); FORGE_CUDA_CHECK(cudaMalloc((void**)&points, 2 * npoints * sizeof(float))); FORGE_CUDA_CHECK(cudaMalloc((void**)&dirs, 2 * npoints * sizeof(float))); GfxHandle* handles[3]; createGLBuffer(&handles[0], divPoints.vertices(), FORGE_VERTEX_BUFFER); createGLBuffer(&handles[1], field.vertices(), FORGE_VERTEX_BUFFER); createGLBuffer(&handles[2], field.directions(), FORGE_VERTEX_BUFFER); FORGE_CUDA_CHECK(cudaMemcpy(dpoints, DPOINTS, 8 * sizeof(unsigned), cudaMemcpyHostToDevice)); generatePoints(points, dirs); copyToGLBuffer(handles[0], (ComputeResourceHandle)dpoints, divPoints.verticesSize()); copyToGLBuffer(handles[1], (ComputeResourceHandle)points, field.verticesSize()); copyToGLBuffer(handles[2], (ComputeResourceHandle)dirs, field.directionsSize()); do { wnd.draw(chart); } while (!wnd.close()); // destroy GL-CUDA interop buffers releaseGLBuffer(handles[0]); releaseGLBuffer(handles[1]); releaseGLBuffer(handles[2]); // destroy CUDA handles FORGE_CUDA_CHECK(cudaFree(dpoints)); FORGE_CUDA_CHECK(cudaFree(points)); FORGE_CUDA_CHECK(cudaFree(dirs)); return 0; } __global__ void pointGenKernel(float* points, float* dirs, int nelems, float minimum, float step) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < nelems && j < nelems) { int id = i + j * nelems; float x = minimum + i * step; float y = minimum + j * step; points[2 * id + 0] = x; points[2 * id + 1] = y; dirs[2 * id + 0] = sinf(2.0f * PI * x / 10.f); dirs[2 * id + 1] = sinf(2.0f * PI * y / 10.f); } } void generatePoints(float* points, float* dirs) { static dim3 threads(8, 8); dim3 blocks(divup((int)(NELEMS), threads.x), divup((int)(NELEMS), threads.y)); // clang-format off pointGenKernel<<<blocks, threads>>>(points, dirs, (int)(NELEMS), MINIMUM, STEP); // clang-format on }
d3e8e777f65ccaea53af1eae3ab0da537b384393.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <torch/extension.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #define BLOCK_SIZE 256 using std::min; using std::max; #include "box_convolution.h" // for `enum class Parameter` namespace gpu { // TODO use constant memory when possible // namespace constant { // __constant__ float xMinFrac[1536], xMaxFrac[1536]; // __constant__ float yMinFrac[1536], yMaxFrac[1536]; // __constant__ int xMinInt[1536], xMaxInt[1536]; // __constant__ int yMinInt[1536], yMaxInt[1536]; // __constant__ float area[1536]; // } template <typename T, size_t N> using CudaAcsr = const at::PackedTensorAccessor<T, N, at::RestrictPtrTraits, int32_t>; // overload for "truncated"/"rounded" mode template <bool normalize, typename scalar_t> __global__ void boxConvUpdateOutputKernel( CudaAcsr<scalar_t,3> inputInt, CudaAcsr<scalar_t,5> output, const int32_t * __restrict__ xMinInt, const int32_t * __restrict__ xMaxInt, const int32_t * __restrict__ yMinInt, const int32_t * __restrict__ yMaxInt, const scalar_t * __restrict__ area) { // `output` size: `batch_size x in_planes x num_filters x h x w` const int32_t y = blockDim.x * blockIdx.x + threadIdx.x; const int32_t x = blockDim.y * blockIdx.y + threadIdx.y; const int32_t inPlaneIdx = blockIdx.z / output.size(2); const int32_t paramIdx = blockIdx.z % (output.size(1) * output.size(2)); const int32_t h = output.size(3); const int32_t w = output.size(4); const auto inputIntPlane = inputInt[inPlaneIdx]; if (x < h and y < w) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int32_t t = max(0, min(x+xMinInt[paramIdx], h)); const int32_t b = max(0, min(x+xMaxInt[paramIdx], h)); const int32_t l = max(0, min(y+yMinInt[paramIdx], w)); const int32_t r = max(0, min(y+yMaxInt[paramIdx], w)); scalar_t outValue = 0; outValue += inputIntPlane[b][r]; outValue -= inputIntPlane[t][r]; outValue -= inputIntPlane[b][l]; outValue += inputIntPlane[t][l]; // TODO error: expression must be a modifiable lvalue output.data()[(blockIdx.z * h + x) * w + y] = outValue * (normalize ? area[paramIdx] : static_cast<scalar_t>(1)); } } // overload for "exact" mode template <bool normalize, typename scalar_t> __global__ void boxConvUpdateOutputKernel( CudaAcsr<scalar_t,3> inputInt, CudaAcsr<scalar_t,5> output, const int32_t * __restrict__ xMinInt, const int32_t * __restrict__ xMaxInt, const int32_t * __restrict__ yMinInt, const int32_t * __restrict__ yMaxInt, const scalar_t * __restrict__ xMinFrac, const scalar_t * __restrict__ xMaxFrac, const scalar_t * __restrict__ yMinFrac, const scalar_t * __restrict__ yMaxFrac, const scalar_t * __restrict__ area) { const int32_t y = blockDim.x * blockIdx.x + threadIdx.x; const int32_t x = blockDim.y * blockIdx.y + threadIdx.y; const int32_t inPlaneIdx = blockIdx.z / output.size(2); const int32_t paramIdx = blockIdx.z % (output.size(1) * output.size(2)); const int32_t h = output.size(3); const int32_t w = output.size(4); const auto inputIntPlane = inputInt[inPlaneIdx]; if (x < h and y < w) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int xMinCurr = xMinInt[paramIdx]; const int xMaxCurr = xMaxInt[paramIdx]; const int yMinCurr = yMinInt[paramIdx]; const int yMaxCurr = yMaxInt[paramIdx]; const scalar_t xMinCurrFrac = xMinFrac[paramIdx]; const scalar_t xMaxCurrFrac = xMaxFrac[paramIdx]; const scalar_t yMinCurrFrac = yMinFrac[paramIdx]; const scalar_t yMaxCurrFrac = yMaxFrac[paramIdx]; const int32_t t = max(0, min(x+xMinCurr, h)); const int32_t b = max(0, min(x+xMaxCurr, h)); const int32_t l = max(0, min(y+yMinCurr, w)); const int32_t r = max(0, min(y+yMaxCurr, w)); const int32_t bAdv = max(0, min(x+xMaxCurr+1, h)); const int32_t rAdv = max(0, min(y+yMaxCurr+1, w)); const int32_t tAdv = max(0, min(x+xMinCurr-1, h)); const int32_t lAdv = max(0, min(y+yMinCurr-1, w)); scalar_t outValue; // -- main area outValue = inputIntPlane[b][r] - inputIntPlane[t][r] - inputIntPlane[b][l] + inputIntPlane[t][l]; // -- xMax border outValue += ( inputIntPlane[bAdv][r] - inputIntPlane[b ][r] - inputIntPlane[bAdv][l] + inputIntPlane[b ][l]) * xMaxCurrFrac; // -- yMax border outValue += ( inputIntPlane[b][rAdv] - inputIntPlane[b][r ] - inputIntPlane[t][rAdv] + inputIntPlane[t][r ]) * yMaxCurrFrac; // -- xMin border outValue += ( inputIntPlane[t ][r] - inputIntPlane[tAdv][r] - inputIntPlane[t ][l] + inputIntPlane[tAdv][l]) * xMinCurrFrac; // -- yMin border outValue += ( inputIntPlane[b][l ] - inputIntPlane[b][lAdv] - inputIntPlane[t][l ] + inputIntPlane[t][lAdv]) * yMinCurrFrac; // -- corner pixels // Note: before, I used plain `input` to access corner values // with lower memory access overhead. Moved to `input_integrated` // to get rid of an extra input to this function. if (not ((x+xMaxCurr >= h) | (y+yMaxCurr >= w) | (x+xMaxCurr < 0) | (y+yMaxCurr < 0))) { outValue += xMaxCurrFrac * yMaxCurrFrac * ( inputIntPlane[b+1][r+1] - inputIntPlane[b ][r+1] - inputIntPlane[b+1][r ] + inputIntPlane[b ][r ]); } if (not ((x+xMinCurr > h) | (y+yMaxCurr >= w) | (x+xMinCurr <= 0) | (y+yMaxCurr < 0))) { outValue += xMinCurrFrac * yMaxCurrFrac * ( inputIntPlane[t ][r+1] - inputIntPlane[t-1][r+1] - inputIntPlane[t ][r ] + inputIntPlane[t-1][r ]); } if (not ((x+xMaxCurr >= h) | (y+yMinCurr > w) | (x+xMaxCurr < 0) | (y+yMinCurr <= 0))) { outValue += xMaxCurrFrac * yMinCurrFrac * ( inputIntPlane[b+1][l ] - inputIntPlane[b ][l ] - inputIntPlane[b+1][l-1] + inputIntPlane[b ][l-1]); } if (not ((x+xMinCurr > h) | (y+yMinCurr > w) | (x+xMinCurr <= 0) | (y+yMinCurr <= 0))) { outValue += xMinCurrFrac * yMinCurrFrac * ( inputIntPlane[t ][l ] - inputIntPlane[t-1][l ] - inputIntPlane[t ][l-1] + inputIntPlane[t-1][l-1]); } // TODO error: expression must be a modifiable lvalue output.data()[(blockIdx.z * h + x) * w + y] = outValue * (normalize ? area[paramIdx] : static_cast<scalar_t>(1)); } } // TODO put split params and area into constant memory template <bool normalize, bool exact> void boxConvUpdateOutput( at::Tensor & xMinInt , at::Tensor & xMaxInt , at::Tensor & yMinInt , at::Tensor & yMaxInt , at::Tensor & xMinFrac, at::Tensor & xMaxFrac, at::Tensor & yMinFrac, at::Tensor & yMaxFrac, at::Tensor & area, at::Tensor & input_integrated, at::Tensor & output) { const int h = output.size(-2); const int w = output.size(-1); const int totalOutputChannels = output.numel() / (h * w); const dim3 blockSize(32, 32, 1); const dim3 gridSize( (w + blockSize.x - 1) / blockSize.x, (h + blockSize.y - 1) / blockSize.y, (totalOutputChannels + blockSize.z - 1) / blockSize.z); AT_DISPATCH_FLOATING_TYPES_AND_HALF(output.type(), "gpu::boxConvUpdateOutput", ([&] { auto inputIntFlattened = input_integrated.view({-1, h+1, w+1}); auto inputIntAcsr = inputIntFlattened.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int32_t>(); auto outputAcsr = output.packed_accessor<scalar_t, 5, at::RestrictPtrTraits, int32_t>(); if (exact) { hipLaunchKernelGGL(( boxConvUpdateOutputKernel <normalize>) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), inputIntAcsr, outputAcsr, xMinInt.data<int32_t>(), xMaxInt.data<int32_t>(), yMinInt.data<int32_t>(), yMaxInt.data<int32_t>(), xMinFrac.data<scalar_t>(), xMaxFrac.data<scalar_t>(), yMinFrac.data<scalar_t>(), yMaxFrac.data<scalar_t>(), normalize ? area.data<scalar_t>() : nullptr); } else { hipLaunchKernelGGL(( boxConvUpdateOutputKernel <normalize>) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), inputIntAcsr, outputAcsr, xMinInt.data<int32_t>(), xMaxInt.data<int32_t>(), yMinInt.data<int32_t>(), yMaxInt.data<int32_t>(), normalize ? area.data<scalar_t>() : nullptr); } THCudaCheck(hipGetLastError()); })); } // explicitly instantiate template void boxConvUpdateOutput<true, true>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); template void boxConvUpdateOutput<false, true>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); template void boxConvUpdateOutput<true, false>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); template void boxConvUpdateOutput<false, false>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); }
d3e8e777f65ccaea53af1eae3ab0da537b384393.cu
#include <torch/extension.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #define BLOCK_SIZE 256 using std::min; using std::max; #include "box_convolution.h" // for `enum class Parameter` namespace gpu { // TODO use constant memory when possible // namespace constant { // __constant__ float xMinFrac[1536], xMaxFrac[1536]; // __constant__ float yMinFrac[1536], yMaxFrac[1536]; // __constant__ int xMinInt[1536], xMaxInt[1536]; // __constant__ int yMinInt[1536], yMaxInt[1536]; // __constant__ float area[1536]; // } template <typename T, size_t N> using CudaAcsr = const at::PackedTensorAccessor<T, N, at::RestrictPtrTraits, int32_t>; // overload for "truncated"/"rounded" mode template <bool normalize, typename scalar_t> __global__ void boxConvUpdateOutputKernel( CudaAcsr<scalar_t,3> inputInt, CudaAcsr<scalar_t,5> output, const int32_t * __restrict__ xMinInt, const int32_t * __restrict__ xMaxInt, const int32_t * __restrict__ yMinInt, const int32_t * __restrict__ yMaxInt, const scalar_t * __restrict__ area) { // `output` size: `batch_size x in_planes x num_filters x h x w` const int32_t y = blockDim.x * blockIdx.x + threadIdx.x; const int32_t x = blockDim.y * blockIdx.y + threadIdx.y; const int32_t inPlaneIdx = blockIdx.z / output.size(2); const int32_t paramIdx = blockIdx.z % (output.size(1) * output.size(2)); const int32_t h = output.size(3); const int32_t w = output.size(4); const auto inputIntPlane = inputInt[inPlaneIdx]; if (x < h and y < w) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int32_t t = max(0, min(x+xMinInt[paramIdx], h)); const int32_t b = max(0, min(x+xMaxInt[paramIdx], h)); const int32_t l = max(0, min(y+yMinInt[paramIdx], w)); const int32_t r = max(0, min(y+yMaxInt[paramIdx], w)); scalar_t outValue = 0; outValue += inputIntPlane[b][r]; outValue -= inputIntPlane[t][r]; outValue -= inputIntPlane[b][l]; outValue += inputIntPlane[t][l]; // TODO error: expression must be a modifiable lvalue output.data()[(blockIdx.z * h + x) * w + y] = outValue * (normalize ? area[paramIdx] : static_cast<scalar_t>(1)); } } // overload for "exact" mode template <bool normalize, typename scalar_t> __global__ void boxConvUpdateOutputKernel( CudaAcsr<scalar_t,3> inputInt, CudaAcsr<scalar_t,5> output, const int32_t * __restrict__ xMinInt, const int32_t * __restrict__ xMaxInt, const int32_t * __restrict__ yMinInt, const int32_t * __restrict__ yMaxInt, const scalar_t * __restrict__ xMinFrac, const scalar_t * __restrict__ xMaxFrac, const scalar_t * __restrict__ yMinFrac, const scalar_t * __restrict__ yMaxFrac, const scalar_t * __restrict__ area) { const int32_t y = blockDim.x * blockIdx.x + threadIdx.x; const int32_t x = blockDim.y * blockIdx.y + threadIdx.y; const int32_t inPlaneIdx = blockIdx.z / output.size(2); const int32_t paramIdx = blockIdx.z % (output.size(1) * output.size(2)); const int32_t h = output.size(3); const int32_t w = output.size(4); const auto inputIntPlane = inputInt[inPlaneIdx]; if (x < h and y < w) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int xMinCurr = xMinInt[paramIdx]; const int xMaxCurr = xMaxInt[paramIdx]; const int yMinCurr = yMinInt[paramIdx]; const int yMaxCurr = yMaxInt[paramIdx]; const scalar_t xMinCurrFrac = xMinFrac[paramIdx]; const scalar_t xMaxCurrFrac = xMaxFrac[paramIdx]; const scalar_t yMinCurrFrac = yMinFrac[paramIdx]; const scalar_t yMaxCurrFrac = yMaxFrac[paramIdx]; const int32_t t = max(0, min(x+xMinCurr, h)); const int32_t b = max(0, min(x+xMaxCurr, h)); const int32_t l = max(0, min(y+yMinCurr, w)); const int32_t r = max(0, min(y+yMaxCurr, w)); const int32_t bAdv = max(0, min(x+xMaxCurr+1, h)); const int32_t rAdv = max(0, min(y+yMaxCurr+1, w)); const int32_t tAdv = max(0, min(x+xMinCurr-1, h)); const int32_t lAdv = max(0, min(y+yMinCurr-1, w)); scalar_t outValue; // -- main area outValue = inputIntPlane[b][r] - inputIntPlane[t][r] - inputIntPlane[b][l] + inputIntPlane[t][l]; // -- xMax border outValue += ( inputIntPlane[bAdv][r] - inputIntPlane[b ][r] - inputIntPlane[bAdv][l] + inputIntPlane[b ][l]) * xMaxCurrFrac; // -- yMax border outValue += ( inputIntPlane[b][rAdv] - inputIntPlane[b][r ] - inputIntPlane[t][rAdv] + inputIntPlane[t][r ]) * yMaxCurrFrac; // -- xMin border outValue += ( inputIntPlane[t ][r] - inputIntPlane[tAdv][r] - inputIntPlane[t ][l] + inputIntPlane[tAdv][l]) * xMinCurrFrac; // -- yMin border outValue += ( inputIntPlane[b][l ] - inputIntPlane[b][lAdv] - inputIntPlane[t][l ] + inputIntPlane[t][lAdv]) * yMinCurrFrac; // -- corner pixels // Note: before, I used plain `input` to access corner values // with lower memory access overhead. Moved to `input_integrated` // to get rid of an extra input to this function. if (not ((x+xMaxCurr >= h) | (y+yMaxCurr >= w) | (x+xMaxCurr < 0) | (y+yMaxCurr < 0))) { outValue += xMaxCurrFrac * yMaxCurrFrac * ( inputIntPlane[b+1][r+1] - inputIntPlane[b ][r+1] - inputIntPlane[b+1][r ] + inputIntPlane[b ][r ]); } if (not ((x+xMinCurr > h) | (y+yMaxCurr >= w) | (x+xMinCurr <= 0) | (y+yMaxCurr < 0))) { outValue += xMinCurrFrac * yMaxCurrFrac * ( inputIntPlane[t ][r+1] - inputIntPlane[t-1][r+1] - inputIntPlane[t ][r ] + inputIntPlane[t-1][r ]); } if (not ((x+xMaxCurr >= h) | (y+yMinCurr > w) | (x+xMaxCurr < 0) | (y+yMinCurr <= 0))) { outValue += xMaxCurrFrac * yMinCurrFrac * ( inputIntPlane[b+1][l ] - inputIntPlane[b ][l ] - inputIntPlane[b+1][l-1] + inputIntPlane[b ][l-1]); } if (not ((x+xMinCurr > h) | (y+yMinCurr > w) | (x+xMinCurr <= 0) | (y+yMinCurr <= 0))) { outValue += xMinCurrFrac * yMinCurrFrac * ( inputIntPlane[t ][l ] - inputIntPlane[t-1][l ] - inputIntPlane[t ][l-1] + inputIntPlane[t-1][l-1]); } // TODO error: expression must be a modifiable lvalue output.data()[(blockIdx.z * h + x) * w + y] = outValue * (normalize ? area[paramIdx] : static_cast<scalar_t>(1)); } } // TODO put split params and area into constant memory template <bool normalize, bool exact> void boxConvUpdateOutput( at::Tensor & xMinInt , at::Tensor & xMaxInt , at::Tensor & yMinInt , at::Tensor & yMaxInt , at::Tensor & xMinFrac, at::Tensor & xMaxFrac, at::Tensor & yMinFrac, at::Tensor & yMaxFrac, at::Tensor & area, at::Tensor & input_integrated, at::Tensor & output) { const int h = output.size(-2); const int w = output.size(-1); const int totalOutputChannels = output.numel() / (h * w); const dim3 blockSize(32, 32, 1); const dim3 gridSize( (w + blockSize.x - 1) / blockSize.x, (h + blockSize.y - 1) / blockSize.y, (totalOutputChannels + blockSize.z - 1) / blockSize.z); AT_DISPATCH_FLOATING_TYPES_AND_HALF(output.type(), "gpu::boxConvUpdateOutput", ([&] { auto inputIntFlattened = input_integrated.view({-1, h+1, w+1}); auto inputIntAcsr = inputIntFlattened.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int32_t>(); auto outputAcsr = output.packed_accessor<scalar_t, 5, at::RestrictPtrTraits, int32_t>(); if (exact) { boxConvUpdateOutputKernel <normalize> <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>> ( inputIntAcsr, outputAcsr, xMinInt.data<int32_t>(), xMaxInt.data<int32_t>(), yMinInt.data<int32_t>(), yMaxInt.data<int32_t>(), xMinFrac.data<scalar_t>(), xMaxFrac.data<scalar_t>(), yMinFrac.data<scalar_t>(), yMaxFrac.data<scalar_t>(), normalize ? area.data<scalar_t>() : nullptr); } else { boxConvUpdateOutputKernel <normalize> <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>> ( inputIntAcsr, outputAcsr, xMinInt.data<int32_t>(), xMaxInt.data<int32_t>(), yMinInt.data<int32_t>(), yMaxInt.data<int32_t>(), normalize ? area.data<scalar_t>() : nullptr); } THCudaCheck(cudaGetLastError()); })); } // explicitly instantiate template void boxConvUpdateOutput<true, true>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); template void boxConvUpdateOutput<false, true>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); template void boxConvUpdateOutput<true, false>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); template void boxConvUpdateOutput<false, false>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); }
0471cd0d272f00883f1c93764ea3b6e178a9676c.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" #include <hip/hip_runtime.h> #include <cstdio> #include <chrono> using namespace std; //Code used from examples and modified for activity //Adrian Biller A01018940 //matrix multiplication with 2D 2D //inicialization of matrices void initialData(int *ip, const int size) { int i; for(i = 0; i < size; i++) { ip[i] = i+1; } return; } //printing arrays void printArray(int * arr, int size) { int totalSize = size * size; int row = 1; for(int x = 0; x < totalSize; x++){ printf("%d ", arr[x]); if((size * row)-1 == x){ row++; printf("\n"); } } } //multiplication of matrices using cpu void multiplyMatrixOnHost(int *A, int *B, int *C, const int nx, const int ny) { for(int i = 0; i < nx; i++){ for(int j = 0; j < nx ; j++){ for(int k = 0; k < nx; k++){ C[i*nx+j] += A[i*nx+k] * B[k*nx+j]; } } } return; } //checking result of gpu and comparing them with cpu matrix void checkResult(int *hostRef, int *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("host %f gpu %f\n", hostRef[i], gpuRef[i]); break; } } if (match) printf("Arrays match.\n\n"); else printf("Arrays do not match.\n\n"); } //matrix calculation using cpu __global__ void multMatrixOnGPU2D(int *MatA, int *MatB, int *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny){ for(int k = 0; k < nx; k++){ MatC[ix * nx + iy] += MatA[ix * nx + k] * MatB[k * nx + iy]; } } } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; hipDeviceProp_t deviceProp; SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev), "Error device prop"); printf("Using Device %d: %s\n", dev, deviceProp.name); SAFE_CALL(hipSetDevice(dev), "Error setting device"); // set up data size of matrix // int nx = 1 << 12; // int ny = 1 << 12; int nx = 4000; int ny = 4000; int nxy = nx * ny; int nBytes = nxy * sizeof(int); printf("Matrix size: nx %d ny %d\n", nx, ny); // malloc host memory int *h_A, *h_B, *hostRef, *gpuRef; h_A = (int *)malloc(nBytes); h_B = (int *)malloc(nBytes); hostRef = (int *)malloc(nBytes); gpuRef = (int *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); // printArray(h_A, nx); // printf("\n"); // printArray(h_B, nx); // printf("\n"); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add matrix at host side for result SAFE_CALLs auto start_cpu = chrono::high_resolution_clock::now(); // multiplyMatrixOnHost(h_A, h_B, hostRef, nx, ny); auto end_cpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu; printf("multiplyMatrixOnHost elapsed %f ms\n", duration_ms.count()); // malloc device global memory int *d_MatA, *d_MatB, *d_MatC; SAFE_CALL(hipMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA"); SAFE_CALL(hipMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB"); SAFE_CALL(hipMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC"); // transfer data from host to device SAFE_CALL(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice), "Error copying d_MatA"); SAFE_CALL(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice), "Error copying d_MatB"); SAFE_CALL(hipMemset(d_MatC, 0, nBytes), "Error setting d_MatC to zeros"); // invoke kernel at host side int dimx = 32; int dimy = 32; dim3 block(dimx, dimy); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); start_cpu = chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( multMatrixOnGPU2D), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny); SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel"); end_cpu = chrono::high_resolution_clock::now(); duration_ms = end_cpu - start_cpu; printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, duration_ms.count()); // SAFE_CALL kernel error SAFE_CALL(hipGetLastError(), "Error with last error"); // copy kernel result back to host side SAFE_CALL(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC"); // printArray(hostRef, nx); // printf("Host\n"); // printArray(gpuRef, nx); // printf("GPU\n"); // // check device results // checkResult(hostRef, gpuRef, nxy); // free device global memory SAFE_CALL(hipFree(d_MatA), "Error freeing memory"); SAFE_CALL(hipFree(d_MatB), "Error freeing memory"); SAFE_CALL(hipFree(d_MatC), "Error freeing memory"); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device SAFE_CALL(hipDeviceReset(), "Error reseting"); return (0); }
0471cd0d272f00883f1c93764ea3b6e178a9676c.cu
#include "common.h" #include <cuda_runtime.h> #include <cstdio> #include <chrono> using namespace std; //Code used from examples and modified for activity //Adrian Biller A01018940 //matrix multiplication with 2D 2D //inicialization of matrices void initialData(int *ip, const int size) { int i; for(i = 0; i < size; i++) { ip[i] = i+1; } return; } //printing arrays void printArray(int * arr, int size) { int totalSize = size * size; int row = 1; for(int x = 0; x < totalSize; x++){ printf("%d ", arr[x]); if((size * row)-1 == x){ row++; printf("\n"); } } } //multiplication of matrices using cpu void multiplyMatrixOnHost(int *A, int *B, int *C, const int nx, const int ny) { for(int i = 0; i < nx; i++){ for(int j = 0; j < nx ; j++){ for(int k = 0; k < nx; k++){ C[i*nx+j] += A[i*nx+k] * B[k*nx+j]; } } } return; } //checking result of gpu and comparing them with cpu matrix void checkResult(int *hostRef, int *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("host %f gpu %f\n", hostRef[i], gpuRef[i]); break; } } if (match) printf("Arrays match.\n\n"); else printf("Arrays do not match.\n\n"); } //matrix calculation using cpu __global__ void multMatrixOnGPU2D(int *MatA, int *MatB, int *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny){ for(int k = 0; k < nx; k++){ MatC[ix * nx + iy] += MatA[ix * nx + k] * MatB[k * nx + iy]; } } } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; cudaDeviceProp deviceProp; SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop"); printf("Using Device %d: %s\n", dev, deviceProp.name); SAFE_CALL(cudaSetDevice(dev), "Error setting device"); // set up data size of matrix // int nx = 1 << 12; // int ny = 1 << 12; int nx = 4000; int ny = 4000; int nxy = nx * ny; int nBytes = nxy * sizeof(int); printf("Matrix size: nx %d ny %d\n", nx, ny); // malloc host memory int *h_A, *h_B, *hostRef, *gpuRef; h_A = (int *)malloc(nBytes); h_B = (int *)malloc(nBytes); hostRef = (int *)malloc(nBytes); gpuRef = (int *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); // printArray(h_A, nx); // printf("\n"); // printArray(h_B, nx); // printf("\n"); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add matrix at host side for result SAFE_CALLs auto start_cpu = chrono::high_resolution_clock::now(); // multiplyMatrixOnHost(h_A, h_B, hostRef, nx, ny); auto end_cpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu; printf("multiplyMatrixOnHost elapsed %f ms\n", duration_ms.count()); // malloc device global memory int *d_MatA, *d_MatB, *d_MatC; SAFE_CALL(cudaMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA"); SAFE_CALL(cudaMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB"); SAFE_CALL(cudaMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC"); // transfer data from host to device SAFE_CALL(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatA"); SAFE_CALL(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatB"); SAFE_CALL(cudaMemset(d_MatC, 0, nBytes), "Error setting d_MatC to zeros"); // invoke kernel at host side int dimx = 32; int dimy = 32; dim3 block(dimx, dimy); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); start_cpu = chrono::high_resolution_clock::now(); multMatrixOnGPU2D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny); SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel"); end_cpu = chrono::high_resolution_clock::now(); duration_ms = end_cpu - start_cpu; printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, duration_ms.count()); // SAFE_CALL kernel error SAFE_CALL(cudaGetLastError(), "Error with last error"); // copy kernel result back to host side SAFE_CALL(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC"); // printArray(hostRef, nx); // printf("Host\n"); // printArray(gpuRef, nx); // printf("GPU\n"); // // check device results // checkResult(hostRef, gpuRef, nxy); // free device global memory SAFE_CALL(cudaFree(d_MatA), "Error freeing memory"); SAFE_CALL(cudaFree(d_MatB), "Error freeing memory"); SAFE_CALL(cudaFree(d_MatC), "Error freeing memory"); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device SAFE_CALL(cudaDeviceReset(), "Error reseting"); return (0); }
f2646ef9600d29bd6cfe2e937990f163c561019e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Demonstration of inline PTX (assembly language) usage in CUDA kernels */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> __global__ void sequence_gpu(int *d_ptr, int length) { int elemID = blockIdx.x * blockDim.x + threadIdx.x; if (elemID < length) { unsigned int laneid; //This command gets the lane ID within the current warp asm("mov.u32 %0, %%laneid;" : "=r"(laneid)); d_ptr[elemID] = laneid; } } void sequence_cpu(int *h_ptr, int length) { for (int elemID=0; elemID<length; elemID++) { h_ptr[elemID] = elemID % 32; } } int main(int argc, char **argv) { printf("CUDA inline PTX assembler sample\n"); const int N = 1000; int dev = findCudaDevice(argc, (const char **) argv); if (dev == -1) { return EXIT_FAILURE; } int *d_ptr; checkCudaErrors(hipMalloc(&d_ptr, N * sizeof(int))); int *h_ptr; checkCudaErrors(hipHostMalloc(&h_ptr, N * sizeof(int))); dim3 cudaBlockSize(256,1,1); dim3 cudaGridSize((N + cudaBlockSize.x - 1) / cudaBlockSize.x, 1, 1); hipLaunchKernelGGL(( sequence_gpu), dim3(cudaGridSize), dim3(cudaBlockSize), 0, 0, d_ptr, N); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); sequence_cpu(h_ptr, N); int *h_d_ptr; checkCudaErrors(hipHostMalloc(&h_d_ptr, N *sizeof(int))); checkCudaErrors(hipMemcpy(h_d_ptr, d_ptr, N *sizeof(int), hipMemcpyDeviceToHost)); bool bValid = true; for (int i=0; i<N && bValid; i++) { if (h_ptr[i] != h_d_ptr[i]) { bValid = false; } } printf("Test %s.\n", bValid ? "Successful" : "Failed"); checkCudaErrors(hipFree(d_ptr)); checkCudaErrors(hipHostFree(h_ptr)); checkCudaErrors(hipHostFree(h_d_ptr)); checkCudaErrors(hipDeviceReset()); return bValid ? EXIT_SUCCESS: EXIT_FAILURE; }
f2646ef9600d29bd6cfe2e937990f163c561019e.cu
/* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Demonstration of inline PTX (assembly language) usage in CUDA kernels */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> __global__ void sequence_gpu(int *d_ptr, int length) { int elemID = blockIdx.x * blockDim.x + threadIdx.x; if (elemID < length) { unsigned int laneid; //This command gets the lane ID within the current warp asm("mov.u32 %0, %%laneid;" : "=r"(laneid)); d_ptr[elemID] = laneid; } } void sequence_cpu(int *h_ptr, int length) { for (int elemID=0; elemID<length; elemID++) { h_ptr[elemID] = elemID % 32; } } int main(int argc, char **argv) { printf("CUDA inline PTX assembler sample\n"); const int N = 1000; int dev = findCudaDevice(argc, (const char **) argv); if (dev == -1) { return EXIT_FAILURE; } int *d_ptr; checkCudaErrors(cudaMalloc(&d_ptr, N * sizeof(int))); int *h_ptr; checkCudaErrors(cudaMallocHost(&h_ptr, N * sizeof(int))); dim3 cudaBlockSize(256,1,1); dim3 cudaGridSize((N + cudaBlockSize.x - 1) / cudaBlockSize.x, 1, 1); sequence_gpu<<<cudaGridSize, cudaBlockSize>>>(d_ptr, N); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); sequence_cpu(h_ptr, N); int *h_d_ptr; checkCudaErrors(cudaMallocHost(&h_d_ptr, N *sizeof(int))); checkCudaErrors(cudaMemcpy(h_d_ptr, d_ptr, N *sizeof(int), cudaMemcpyDeviceToHost)); bool bValid = true; for (int i=0; i<N && bValid; i++) { if (h_ptr[i] != h_d_ptr[i]) { bValid = false; } } printf("Test %s.\n", bValid ? "Successful" : "Failed"); checkCudaErrors(cudaFree(d_ptr)); checkCudaErrors(cudaFreeHost(h_ptr)); checkCudaErrors(cudaFreeHost(h_d_ptr)); checkCudaErrors(cudaDeviceReset()); return bValid ? EXIT_SUCCESS: EXIT_FAILURE; }
eac150910f61316768669acd5aabb038e0373600.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * */ // Force_Atlas2 tests // Author: Hugo Linsenmaier hlinsenmaier@nvidia.com #include <utilities/high_res_clock.h> #include <utilities/base_fixture.hpp> #include <utilities/test_utilities.hpp> #include <layout/trust_worthiness.h> #include <cugraph/algorithms.hpp> #include <cugraph/legacy/graph.hpp> #include <raft/error.hpp> #include <rmm/exec_policy.hpp> #include <hip/hip_runtime_api.h> #include <fstream> #include <iostream> // iterations for perf tests static int PERF_MULTIPLIER = 5; typedef struct Force_Atlas2_Usecase_t { std::string matrix_file; float score; Force_Atlas2_Usecase_t(const std::string& a, const float b) { // assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir(); if ((a != "") && (a[0] != '/')) { matrix_file = rapidsDatasetRootDir + "/" + a; } else { matrix_file = a; } score = b; } Force_Atlas2_Usecase_t& operator=(const Force_Atlas2_Usecase_t& rhs) { matrix_file = rhs.matrix_file; score = rhs.score; return *this; } } Force_Atlas2_Usecase; class Tests_Force_Atlas2 : public ::testing::TestWithParam<Force_Atlas2_Usecase> { public: Tests_Force_Atlas2() {} static void SetupTestCase() {} static void TearDownTestCase() { if (cugraph::test::g_perf) { for (unsigned int i = 0; i < force_atlas2_time.size(); ++i) { std::cout << force_atlas2_time[i] / PERF_MULTIPLIER << std::endl; } } } virtual void SetUp() {} virtual void TearDown() {} static std::vector<double> force_atlas2_time; void compute_rank() {} void trustworthiness(float* X, float* Y) { return; } template <typename T> void run_current_test(const Force_Atlas2_Usecase& param) { const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + cugraph::test::getFileName(param.matrix_file) + std::string("_") + ss.str().c_str(); int m, k, nnz; MM_typecode mc; HighResClock hr_clock; double time_tmp; FILE* fpin = fopen(param.matrix_file.c_str(), "r"); ASSERT_NE(fpin, nullptr) << "fopen (" << param.matrix_file << ") failure."; ASSERT_EQ(cugraph::test::mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz), 0) << "could not read Matrix Market file properties" << "\n"; ASSERT_TRUE(mm_is_matrix(mc)); ASSERT_TRUE(mm_is_coordinate(mc)); ASSERT_FALSE(mm_is_complex(mc)); ASSERT_FALSE(mm_is_skew(mc)); // Allocate memory on host std::vector<int> cooRowInd(nnz), cooColInd(nnz); std::vector<T> cooVal(nnz); std::vector<std::vector<int>> adj_matrix(m, std::vector<int>(m)); std::vector<float> force_atlas2(m * 2); raft::handle_t const handle; auto stream = handle.get_stream(); // device alloc rmm::device_uvector<float> pos(m * 2, stream); // Read ASSERT_EQ((cugraph::test::mm_to_coo<int, T>( fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], &cooVal[0], NULL)), 0) << "could not read matrix data" << "\n"; ASSERT_EQ(fclose(fpin), 0); // Build Adjacency Matrix for (int i = 0; i < nnz; ++i) { auto row = cooRowInd[i]; auto col = cooColInd[i]; adj_matrix[row][col] = 1; } // Allocate COO on device rmm::device_uvector<int> srcs_v(nnz, stream); rmm::device_uvector<int> dests_v(nnz, stream); rmm::device_uvector<T> weights_v(nnz, stream); int* srcs = srcs_v.data(); int* dests = dests_v.data(); T* weights = weights_v.data(); // FIXME: RAFT error handling mechanism should be used instead CUDA_TRY(hipMemcpy(srcs, &cooRowInd[0], sizeof(int) * nnz, hipMemcpyDefault)); CUDA_TRY(hipMemcpy(dests, &cooColInd[0], sizeof(int) * nnz, hipMemcpyDefault)); CUDA_TRY(hipMemcpy(weights, &cooVal[0], sizeof(T) * nnz, hipMemcpyDefault)); cugraph::legacy::GraphCOOView<int, int, T> G(srcs, dests, weights, m, nnz); const int max_iter = 500; float* x_start = nullptr; float* y_start = nullptr; bool outbound_attraction_distribution = false; bool lin_log_mode = false; bool prevent_overlapping = false; const float edge_weight_influence = 1.0; const float jitter_tolerance = 1.0; bool optimize = true; const float theta = 1.0; const float scaling_ratio = 2.0; bool strong_gravity_mode = false; const float gravity = 1.0; bool verbose = false; if (cugraph::test::g_perf) { hr_clock.start(); for (int i = 0; i < PERF_MULTIPLIER; ++i) { cugraph::force_atlas2<int, int, T>(handle, G, pos.data(), max_iter, x_start, y_start, outbound_attraction_distribution, lin_log_mode, prevent_overlapping, edge_weight_influence, jitter_tolerance, optimize, theta, scaling_ratio, strong_gravity_mode, gravity, verbose); hipDeviceSynchronize(); } hr_clock.stop(&time_tmp); force_atlas2_time.push_back(time_tmp); } else { hipProfilerStart(); cugraph::force_atlas2<int, int, T>(handle, G, pos.data(), max_iter, x_start, y_start, outbound_attraction_distribution, lin_log_mode, prevent_overlapping, edge_weight_influence, jitter_tolerance, optimize, theta, scaling_ratio, strong_gravity_mode, gravity, verbose); hipProfilerStop(); hipDeviceSynchronize(); } // Copy pos to host std::vector<float> h_pos(m * 2); CUDA_TRY(hipMemcpy(&h_pos[0], pos.data(), sizeof(float) * m * 2, hipMemcpyDeviceToHost)); // Transpose the data std::vector<std::vector<double>> C_contiguous_embedding(m, std::vector<double>(2)); for (int i = 0; i < m; i++) { for (int j = 0; j < 2; j++) C_contiguous_embedding[i][j] = h_pos[j * m + i]; } // Test trustworthiness double score_bh = trustworthiness_score(adj_matrix, C_contiguous_embedding, m, 2, 5); printf("score: %f\n", score_bh); ASSERT_GT(score_bh, param.score); } }; std::vector<double> Tests_Force_Atlas2::force_atlas2_time; TEST_P(Tests_Force_Atlas2, CheckFP32_T) { run_current_test<float>(GetParam()); } TEST_P(Tests_Force_Atlas2, CheckFP64_T) { run_current_test<double>(GetParam()); } // --gtest_filter=*simple_test* INSTANTIATE_TEST_SUITE_P(simple_test, Tests_Force_Atlas2, ::testing::Values(Force_Atlas2_Usecase("test/datasets/karate.mtx", 0.73), Force_Atlas2_Usecase("test/datasets/dolphins.mtx", 0.69), Force_Atlas2_Usecase("test/datasets/polbooks.mtx", 0.76), Force_Atlas2_Usecase("test/datasets/netscience.mtx", 0.80))); CUGRAPH_TEST_PROGRAM_MAIN()
eac150910f61316768669acd5aabb038e0373600.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * */ // Force_Atlas2 tests // Author: Hugo Linsenmaier hlinsenmaier@nvidia.com #include <utilities/high_res_clock.h> #include <utilities/base_fixture.hpp> #include <utilities/test_utilities.hpp> #include <layout/trust_worthiness.h> #include <cugraph/algorithms.hpp> #include <cugraph/legacy/graph.hpp> #include <raft/error.hpp> #include <rmm/exec_policy.hpp> #include <cuda_profiler_api.h> #include <fstream> #include <iostream> // iterations for perf tests static int PERF_MULTIPLIER = 5; typedef struct Force_Atlas2_Usecase_t { std::string matrix_file; float score; Force_Atlas2_Usecase_t(const std::string& a, const float b) { // assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir(); if ((a != "") && (a[0] != '/')) { matrix_file = rapidsDatasetRootDir + "/" + a; } else { matrix_file = a; } score = b; } Force_Atlas2_Usecase_t& operator=(const Force_Atlas2_Usecase_t& rhs) { matrix_file = rhs.matrix_file; score = rhs.score; return *this; } } Force_Atlas2_Usecase; class Tests_Force_Atlas2 : public ::testing::TestWithParam<Force_Atlas2_Usecase> { public: Tests_Force_Atlas2() {} static void SetupTestCase() {} static void TearDownTestCase() { if (cugraph::test::g_perf) { for (unsigned int i = 0; i < force_atlas2_time.size(); ++i) { std::cout << force_atlas2_time[i] / PERF_MULTIPLIER << std::endl; } } } virtual void SetUp() {} virtual void TearDown() {} static std::vector<double> force_atlas2_time; void compute_rank() {} void trustworthiness(float* X, float* Y) { return; } template <typename T> void run_current_test(const Force_Atlas2_Usecase& param) { const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream ss; std::string test_id = std::string(test_info->test_case_name()) + std::string(".") + std::string(test_info->name()) + std::string("_") + cugraph::test::getFileName(param.matrix_file) + std::string("_") + ss.str().c_str(); int m, k, nnz; MM_typecode mc; HighResClock hr_clock; double time_tmp; FILE* fpin = fopen(param.matrix_file.c_str(), "r"); ASSERT_NE(fpin, nullptr) << "fopen (" << param.matrix_file << ") failure."; ASSERT_EQ(cugraph::test::mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz), 0) << "could not read Matrix Market file properties" << "\n"; ASSERT_TRUE(mm_is_matrix(mc)); ASSERT_TRUE(mm_is_coordinate(mc)); ASSERT_FALSE(mm_is_complex(mc)); ASSERT_FALSE(mm_is_skew(mc)); // Allocate memory on host std::vector<int> cooRowInd(nnz), cooColInd(nnz); std::vector<T> cooVal(nnz); std::vector<std::vector<int>> adj_matrix(m, std::vector<int>(m)); std::vector<float> force_atlas2(m * 2); raft::handle_t const handle; auto stream = handle.get_stream(); // device alloc rmm::device_uvector<float> pos(m * 2, stream); // Read ASSERT_EQ((cugraph::test::mm_to_coo<int, T>( fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], &cooVal[0], NULL)), 0) << "could not read matrix data" << "\n"; ASSERT_EQ(fclose(fpin), 0); // Build Adjacency Matrix for (int i = 0; i < nnz; ++i) { auto row = cooRowInd[i]; auto col = cooColInd[i]; adj_matrix[row][col] = 1; } // Allocate COO on device rmm::device_uvector<int> srcs_v(nnz, stream); rmm::device_uvector<int> dests_v(nnz, stream); rmm::device_uvector<T> weights_v(nnz, stream); int* srcs = srcs_v.data(); int* dests = dests_v.data(); T* weights = weights_v.data(); // FIXME: RAFT error handling mechanism should be used instead CUDA_TRY(cudaMemcpy(srcs, &cooRowInd[0], sizeof(int) * nnz, cudaMemcpyDefault)); CUDA_TRY(cudaMemcpy(dests, &cooColInd[0], sizeof(int) * nnz, cudaMemcpyDefault)); CUDA_TRY(cudaMemcpy(weights, &cooVal[0], sizeof(T) * nnz, cudaMemcpyDefault)); cugraph::legacy::GraphCOOView<int, int, T> G(srcs, dests, weights, m, nnz); const int max_iter = 500; float* x_start = nullptr; float* y_start = nullptr; bool outbound_attraction_distribution = false; bool lin_log_mode = false; bool prevent_overlapping = false; const float edge_weight_influence = 1.0; const float jitter_tolerance = 1.0; bool optimize = true; const float theta = 1.0; const float scaling_ratio = 2.0; bool strong_gravity_mode = false; const float gravity = 1.0; bool verbose = false; if (cugraph::test::g_perf) { hr_clock.start(); for (int i = 0; i < PERF_MULTIPLIER; ++i) { cugraph::force_atlas2<int, int, T>(handle, G, pos.data(), max_iter, x_start, y_start, outbound_attraction_distribution, lin_log_mode, prevent_overlapping, edge_weight_influence, jitter_tolerance, optimize, theta, scaling_ratio, strong_gravity_mode, gravity, verbose); cudaDeviceSynchronize(); } hr_clock.stop(&time_tmp); force_atlas2_time.push_back(time_tmp); } else { cudaProfilerStart(); cugraph::force_atlas2<int, int, T>(handle, G, pos.data(), max_iter, x_start, y_start, outbound_attraction_distribution, lin_log_mode, prevent_overlapping, edge_weight_influence, jitter_tolerance, optimize, theta, scaling_ratio, strong_gravity_mode, gravity, verbose); cudaProfilerStop(); cudaDeviceSynchronize(); } // Copy pos to host std::vector<float> h_pos(m * 2); CUDA_TRY(cudaMemcpy(&h_pos[0], pos.data(), sizeof(float) * m * 2, cudaMemcpyDeviceToHost)); // Transpose the data std::vector<std::vector<double>> C_contiguous_embedding(m, std::vector<double>(2)); for (int i = 0; i < m; i++) { for (int j = 0; j < 2; j++) C_contiguous_embedding[i][j] = h_pos[j * m + i]; } // Test trustworthiness double score_bh = trustworthiness_score(adj_matrix, C_contiguous_embedding, m, 2, 5); printf("score: %f\n", score_bh); ASSERT_GT(score_bh, param.score); } }; std::vector<double> Tests_Force_Atlas2::force_atlas2_time; TEST_P(Tests_Force_Atlas2, CheckFP32_T) { run_current_test<float>(GetParam()); } TEST_P(Tests_Force_Atlas2, CheckFP64_T) { run_current_test<double>(GetParam()); } // --gtest_filter=*simple_test* INSTANTIATE_TEST_SUITE_P(simple_test, Tests_Force_Atlas2, ::testing::Values(Force_Atlas2_Usecase("test/datasets/karate.mtx", 0.73), Force_Atlas2_Usecase("test/datasets/dolphins.mtx", 0.69), Force_Atlas2_Usecase("test/datasets/polbooks.mtx", 0.76), Force_Atlas2_Usecase("test/datasets/netscience.mtx", 0.80))); CUGRAPH_TEST_PROGRAM_MAIN()
83e6cef769816272165f0e8f475d8037b03e3466.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define N 1024 // number of rows = number of columns #include <time.h> #include <stdio.h> __global__ void matrix_mult_kernel (int *a, int *b, int *c, int n); void init(int * input,int length); void print_matrix(int * matrix, int size); int main() { clock_t start, end; double cpu_time_used; int * h_a,*d_a; int * h_b,*d_b; int * h_c,*d_c; int data_length = N * N * sizeof(int); h_a=(int*)malloc(data_length); h_b=(int*)malloc(data_length); h_c=(int*)malloc(data_length); init(h_a,N*N); init(h_b,N*N); // Initialize matrices on the gpu hipMalloc(&d_a, data_length); hipMalloc(&d_b, data_length); hipMalloc(&d_c, data_length); hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { printf("cudaCheckError() failed line 29 : %s\n", hipGetErrorString( err ) ); exit( -1 ); } start = clock(); // Copy matrices to the gpu hipMemcpy(d_a, h_a, data_length, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, data_length, hipMemcpyHostToDevice); err = hipGetLastError(); if ( hipSuccess != err ) { printf("cudaCheckError() failed line 42: %s\n", hipGetErrorString( err ) ); exit( -1 ); } int blocksPerMatrixRow = 2; int threadsPerBlocks = N / blocksPerMatrixRow; // as we choose 2 blocks per one matrix row hipLaunchKernelGGL(( matrix_mult_kernel), dim3(dim3(blocksPerMatrixRow, N)), dim3(dim3(threadsPerBlocks)), 0, 0, d_a, d_b, d_c, N); err = hipGetLastError(); if ( hipSuccess != err ) { printf("cudaCheckError() failed line 51 %s\n", hipGetErrorString( err ) ); exit( -1 ); } // Copy output matrix h_c to the host memory hipMemcpy(h_c, d_c, data_length, hipMemcpyDeviceToHost); err = hipGetLastError(); if ( hipSuccess != err ) { printf("cudaCheckError() failed line 61: %s\n", hipGetErrorString( err ) ); exit( -1 ); } end = clock(); cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %f\n", cpu_time_used); // Free all alocated memory err = hipGetLastError(); if ( hipSuccess != err ) { printf("cudaCheckError() failed line 70: %s\n", hipGetErrorString( err ) ); exit( -1 ); } free(h_a); free(h_b); free(h_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); } void init(int * input, int size) { int i; for(i=0;i<size;i++) { input[i]=rand()%5; } } void print_matrix(int * matrix,int size) { printf("Matrix items: \n"); int i,j; for(i=0;i<size;i++) { for(j=0;j<size;j++) printf("%d,",matrix[i*size+j]); printf("\n"); } } __global__ void matrix_mult_kernel (int *a, int *b, int *c, int n) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int i, j, k; // Each thread calculate one cell of c for (i = y; i < y + 1; i++) { for (j = x; j < x + 1; j++) { for (k = 0; k < n; k++) { c[i*n+j] += a[i*n+k] * b[k*n+j]; } } } }
83e6cef769816272165f0e8f475d8037b03e3466.cu
#define N 1024 // number of rows = number of columns #include <time.h> #include <stdio.h> __global__ void matrix_mult_kernel (int *a, int *b, int *c, int n); void init(int * input,int length); void print_matrix(int * matrix, int size); int main() { clock_t start, end; double cpu_time_used; int * h_a,*d_a; int * h_b,*d_b; int * h_c,*d_c; int data_length = N * N * sizeof(int); h_a=(int*)malloc(data_length); h_b=(int*)malloc(data_length); h_c=(int*)malloc(data_length); init(h_a,N*N); init(h_b,N*N); // Initialize matrices on the gpu cudaMalloc(&d_a, data_length); cudaMalloc(&d_b, data_length); cudaMalloc(&d_c, data_length); cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) { printf("cudaCheckError() failed line 29 : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } start = clock(); // Copy matrices to the gpu cudaMemcpy(d_a, h_a, data_length, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, data_length, cudaMemcpyHostToDevice); err = cudaGetLastError(); if ( cudaSuccess != err ) { printf("cudaCheckError() failed line 42: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } int blocksPerMatrixRow = 2; int threadsPerBlocks = N / blocksPerMatrixRow; // as we choose 2 blocks per one matrix row matrix_mult_kernel<<< dim3(blocksPerMatrixRow, N), dim3(threadsPerBlocks)>>>(d_a, d_b, d_c, N); err = cudaGetLastError(); if ( cudaSuccess != err ) { printf("cudaCheckError() failed line 51 %s\n", cudaGetErrorString( err ) ); exit( -1 ); } // Copy output matrix h_c to the host memory cudaMemcpy(h_c, d_c, data_length, cudaMemcpyDeviceToHost); err = cudaGetLastError(); if ( cudaSuccess != err ) { printf("cudaCheckError() failed line 61: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } end = clock(); cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %f\n", cpu_time_used); // Free all alocated memory err = cudaGetLastError(); if ( cudaSuccess != err ) { printf("cudaCheckError() failed line 70: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } void init(int * input, int size) { int i; for(i=0;i<size;i++) { input[i]=rand()%5; } } void print_matrix(int * matrix,int size) { printf("Matrix items: \n"); int i,j; for(i=0;i<size;i++) { for(j=0;j<size;j++) printf("%d,",matrix[i*size+j]); printf("\n"); } } __global__ void matrix_mult_kernel (int *a, int *b, int *c, int n) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int i, j, k; // Each thread calculate one cell of c for (i = y; i < y + 1; i++) { for (j = x; j < x + 1; j++) { for (k = 0; k < n; k++) { c[i*n+j] += a[i*n+k] * b[k*n+j]; } } } }
41b2a8aa6bf461cff05dd4074c301e25707287e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { template <typename T> inline __device__ T SigmoidGPU(const T& x) { return 1.0f / (1.0f + __expf(-x)); } template <typename T> __global__ void YoloBoxHeadCudaKernel(const T* input, T* output, const int grid_size_x, const int grid_size_y, const int class_num, const int anchors_num) { int x_id = blockIdx.x * blockDim.x + threadIdx.x; int y_id = blockIdx.y * blockDim.y + threadIdx.y; int z_id = blockIdx.z * blockDim.z + threadIdx.z; if ((x_id >= grid_size_x) || (y_id >= grid_size_y) || (z_id >= anchors_num)) { return; } const int grids_num = grid_size_x * grid_size_y; const int bbindex = y_id * grid_size_x + x_id; // objectness output[bbindex + grids_num * (z_id * (5 + class_num) + 4)] = SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 4)]); // x output[bbindex + grids_num * (z_id * (5 + class_num) + 0)] = SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 0)]); // y output[bbindex + grids_num * (z_id * (5 + class_num) + 1)] = SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 1)]); // w output[bbindex + grids_num * (z_id * (5 + class_num) + 2)] = __expf(input[bbindex + grids_num * (z_id * (5 + class_num) + 2)]); // h output[bbindex + grids_num * (z_id * (5 + class_num) + 3)] = __expf(input[bbindex + grids_num * (z_id * (5 + class_num) + 3)]); // Probabilities of classes for (int i = 0; i < class_num; ++i) { output[bbindex + grids_num * (z_id * (5 + class_num) + (5 + i))] = SigmoidGPU( input[bbindex + grids_num * (z_id * (5 + class_num) + (5 + i))]); } } template <typename T> class YoloBoxHeadKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { using Tensor = phi::DenseTensor; auto* x = context.Input<phi::DenseTensor>("X"); auto* out = context.Output<phi::DenseTensor>("Out"); auto anchors = context.Attr<std::vector<int>>("anchors"); auto class_num = context.Attr<int>("class_num"); auto& device_ctx = context.template device_context<phi::GPUContext>(); auto x_dims = x->dims(); const int batch_size = x_dims[0]; const int h = x_dims[2]; const int w = x_dims[3]; const int grid_size_x = w; const int grid_size_y = h; const int anchors_num = anchors.size() / 2; const T* input_data = x->data<T>(); T* output_data = device_ctx.Alloc<T>(out, out->numel() * sizeof(T)); auto stream = device_ctx.stream(); const int volume = x_dims[1] * h * w; dim3 block(16, 16, 4); dim3 grid((grid_size_x / block.x) + 1, (grid_size_y / block.y) + 1, (anchors_num / block.z) + 1); for (int n = 0; n < batch_size; n++) { hipLaunchKernelGGL(( YoloBoxHeadCudaKernel), dim3(grid), dim3(block), 0, stream, input_data + n * volume, output_data + n * volume, grid_size_x, grid_size_y, class_num, anchors_num); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(yolo_box_head, ops::YoloBoxHeadKernel<float>);
41b2a8aa6bf461cff05dd4074c301e25707287e7.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { template <typename T> inline __device__ T SigmoidGPU(const T& x) { return 1.0f / (1.0f + __expf(-x)); } template <typename T> __global__ void YoloBoxHeadCudaKernel(const T* input, T* output, const int grid_size_x, const int grid_size_y, const int class_num, const int anchors_num) { int x_id = blockIdx.x * blockDim.x + threadIdx.x; int y_id = blockIdx.y * blockDim.y + threadIdx.y; int z_id = blockIdx.z * blockDim.z + threadIdx.z; if ((x_id >= grid_size_x) || (y_id >= grid_size_y) || (z_id >= anchors_num)) { return; } const int grids_num = grid_size_x * grid_size_y; const int bbindex = y_id * grid_size_x + x_id; // objectness output[bbindex + grids_num * (z_id * (5 + class_num) + 4)] = SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 4)]); // x output[bbindex + grids_num * (z_id * (5 + class_num) + 0)] = SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 0)]); // y output[bbindex + grids_num * (z_id * (5 + class_num) + 1)] = SigmoidGPU(input[bbindex + grids_num * (z_id * (5 + class_num) + 1)]); // w output[bbindex + grids_num * (z_id * (5 + class_num) + 2)] = __expf(input[bbindex + grids_num * (z_id * (5 + class_num) + 2)]); // h output[bbindex + grids_num * (z_id * (5 + class_num) + 3)] = __expf(input[bbindex + grids_num * (z_id * (5 + class_num) + 3)]); // Probabilities of classes for (int i = 0; i < class_num; ++i) { output[bbindex + grids_num * (z_id * (5 + class_num) + (5 + i))] = SigmoidGPU( input[bbindex + grids_num * (z_id * (5 + class_num) + (5 + i))]); } } template <typename T> class YoloBoxHeadKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { using Tensor = phi::DenseTensor; auto* x = context.Input<phi::DenseTensor>("X"); auto* out = context.Output<phi::DenseTensor>("Out"); auto anchors = context.Attr<std::vector<int>>("anchors"); auto class_num = context.Attr<int>("class_num"); auto& device_ctx = context.template device_context<phi::GPUContext>(); auto x_dims = x->dims(); const int batch_size = x_dims[0]; const int h = x_dims[2]; const int w = x_dims[3]; const int grid_size_x = w; const int grid_size_y = h; const int anchors_num = anchors.size() / 2; const T* input_data = x->data<T>(); T* output_data = device_ctx.Alloc<T>(out, out->numel() * sizeof(T)); auto stream = device_ctx.stream(); const int volume = x_dims[1] * h * w; dim3 block(16, 16, 4); dim3 grid((grid_size_x / block.x) + 1, (grid_size_y / block.y) + 1, (anchors_num / block.z) + 1); for (int n = 0; n < batch_size; n++) { YoloBoxHeadCudaKernel<<<grid, block, 0, stream>>>( input_data + n * volume, output_data + n * volume, grid_size_x, grid_size_y, class_num, anchors_num); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(yolo_box_head, ops::YoloBoxHeadKernel<float>);
755db6fdaf72fcddb007f4405be52ab4ec9fde07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zaxpycp.cu, normal z -> d, Tue Aug 30 09:38:27 2016 */ #include "magma_internal.h" #define NB 64 /******************************************************************************/ // adds x += r --and-- // copies r = b // each thread does one index, x[i] and r[i] __global__ void daxpycp_kernel( int m, double *r, double *x, const double *b) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_D_ADD( x[i], r[i] ); r[i] = b[i]; } } /***************************************************************************//** adds x += r --and-- copies r = b *******************************************************************************/ extern "C" void magmablas_daxpycp_q( magma_int_t m, magmaDouble_ptr r, magmaDouble_ptr x, magmaDouble_const_ptr b, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); hipLaunchKernelGGL(( daxpycp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, r, x, b ); }
755db6fdaf72fcddb007f4405be52ab4ec9fde07.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zaxpycp.cu, normal z -> d, Tue Aug 30 09:38:27 2016 */ #include "magma_internal.h" #define NB 64 /******************************************************************************/ // adds x += r --and-- // copies r = b // each thread does one index, x[i] and r[i] __global__ void daxpycp_kernel( int m, double *r, double *x, const double *b) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_D_ADD( x[i], r[i] ); r[i] = b[i]; } } /***************************************************************************//** adds x += r --and-- copies r = b *******************************************************************************/ extern "C" void magmablas_daxpycp_q( magma_int_t m, magmaDouble_ptr r, magmaDouble_ptr x, magmaDouble_const_ptr b, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); daxpycp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, r, x, b ); }
a056fb33f15a8907405e038c50e8ee5ac5b5a29f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "Examples.cuh" #include <device_launch_parameters.h> __global__ void hello_cuda() { printf("Hello Cuda\n"); } __global__ void print_thread_id() { printf("Hello Cuda tid[%d, %d, %d]\n", threadIdx.x + blockDim.x * blockIdx.x, threadIdx.y + blockDim.y * blockIdx.y, threadIdx.z + blockDim.z * blockIdx.z); //threadIdx.x, threadIdx.y, threadIdx.z); } __global__ void print_thread_variables() { printf("Thread{%d,%d,%d}, Block{%d,%d,%d}, BlockDim{%d,%d,%d}, GridDim{%d,%d,%d}\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z ); } __global__ void print_unique_thread_id_1D() { int tid = blockIdx.x * blockDim.x + threadIdx.x; printf("Thread{%d,%d,%d}, Block{%d,%d,%d}, tid{%d}\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, tid); } __global__ void print_unique_thread_id_3D(int * data) { int thread_count_in_block = blockDim.x * blockDim.y * blockDim.z; // Inside a block, threads access consecutive elements of array // (for z=0) 0 1 (for z=1) 4 5 // 2 3 6 7 int tid_in_block = blockDim.x * blockDim.y * threadIdx.z + blockDim.x * threadIdx.y + threadIdx.x; int bid_in_grid = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int gid = bid_in_grid * thread_count_in_block + tid_in_block; printf("gid{%d}, data[%d] = %d\n", gid, gid, data[gid]); }
a056fb33f15a8907405e038c50e8ee5ac5b5a29f.cu
#include <stdio.h> #include "Examples.cuh" #include <device_launch_parameters.h> __global__ void hello_cuda() { printf("Hello Cuda\n"); } __global__ void print_thread_id() { printf("Hello Cuda tid[%d, %d, %d]\n", threadIdx.x + blockDim.x * blockIdx.x, threadIdx.y + blockDim.y * blockIdx.y, threadIdx.z + blockDim.z * blockIdx.z); //threadIdx.x, threadIdx.y, threadIdx.z); } __global__ void print_thread_variables() { printf("Thread{%d,%d,%d}, Block{%d,%d,%d}, BlockDim{%d,%d,%d}, GridDim{%d,%d,%d}\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z ); } __global__ void print_unique_thread_id_1D() { int tid = blockIdx.x * blockDim.x + threadIdx.x; printf("Thread{%d,%d,%d}, Block{%d,%d,%d}, tid{%d}\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, tid); } __global__ void print_unique_thread_id_3D(int * data) { int thread_count_in_block = blockDim.x * blockDim.y * blockDim.z; // Inside a block, threads access consecutive elements of array // (for z=0) 0 1 (for z=1) 4 5 // 2 3 6 7 int tid_in_block = blockDim.x * blockDim.y * threadIdx.z + blockDim.x * threadIdx.y + threadIdx.x; int bid_in_grid = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int gid = bid_in_grid * thread_count_in_block + tid_in_block; printf("gid{%d}, data[%d] = %d\n", gid, gid, data[gid]); }
02347d66f6d794f85152bedf5eb7033547f23667.hip
// !!! This is a file automatically generated by hipify!!! #ifdef _WIN32 #define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <sys/time.h> // includes, kernels #include "scan_largearray_kernel.cu" #define DEFAULT_NUM_ELEMENTS 16777216 //#define DEFAULT_NUM_ELEMENTS 16000000 //#define DEFAULT_NUM_ELEMENTS 1400 #define MAX_RAND 3 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" unsigned int compare( const unsigned int* reference, const unsigned int* data, const unsigned int len); extern "C" void computeGold( unsigned int* reference, unsigned int* idata, const unsigned int len); bool CompareArrays(unsigned int *A, unsigned int *B, int size); void WriteFile(unsigned int* arr, char* file_name, int num_elements); int ReadParamsFile(int* params, char* file_name, int num_params); int ReadFile(unsigned int* arr, char* file_name, int num_elements); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { int num_read = 0; int* size = (int*)malloc(1 * sizeof(int)); unsigned int data2read = 1; int num_elements = 0; // Must support large, non-power-of-2 arrays // allocate host memory to store the input data unsigned int mem_size = sizeof( unsigned int) * num_elements; unsigned int* h_data = (unsigned int*) malloc(mem_size); // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Randomly generate input data and write the result to // file name specified by first argument // * Two arguments: Read the first argument which indicate the size of the array, // randomly generate input data and write the input data // to the second argument. (for generating random input data) // * Three arguments: Read the first file which indicate the size of the array, // then input data from the file name specified by 2nd argument and write the // SCAN output to file name specified by the 3rd argument. switch(argc-1) { case 2: // Determine size of array data2read = ReadParamsFile(size, argv[1], data2read); if(data2read != 1){ printf("Error reading parameter file\n"); exit(1); } num_elements = size[0]; // allocate host memory to store the input data mem_size = sizeof( unsigned int) * num_elements; h_data = (unsigned int*) malloc( mem_size); for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = (int)(rand() % MAX_RAND); } WriteFile(h_data, argv[2], num_elements); break; case 3: // Three Arguments data2read = ReadParamsFile(size, argv[1], data2read); if(data2read != 1){ printf("Error reading parameter file\n"); exit(1); } num_elements = size[0]; // allocate host memory to store the input data mem_size = sizeof( unsigned int) * num_elements; h_data = (unsigned int*) malloc( mem_size); num_read = ReadFile(h_data, argv[2], num_elements); if(num_read != num_elements) { printf("Error reading input file!\n"); exit(1); } break; default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 // Use DEFAULT_NUM_ELEMENTS num_elements num_elements = DEFAULT_NUM_ELEMENTS; // allocate host memory to store the input data mem_size = sizeof( unsigned int) * num_elements; h_data = (unsigned int*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; } // compute reference solution unsigned int* reference = (unsigned int*) malloc( mem_size); struct timeval start_time, end_time; gettimeofday(&start_time,NULL); computeGold( reference, h_data, num_elements); gettimeofday(&end_time,NULL); printf("Processing %u elements...\n", num_elements); double start_count = (double) start_time.tv_sec + 1.e-6 * (double) start_time.tv_usec; double end_count = (double) end_time.tv_sec + 1.e-6 * (double) end_time.tv_usec; double host_ms = (double)( (end_count - start_count) * 1000); printf("CPU Processing time: %lf (ms)\n", host_ms); // allocate device memory input and output arrays unsigned int* d_idata = NULL; unsigned int* d_odata = NULL; int padded_num_elements = TILE_SIZE*((num_elements+TILE_SIZE-1)/TILE_SIZE); int padded_mem_size = padded_num_elements *sizeof(unsigned int); // Make a padded copy of the input data unsigned int* padded_hdata = (unsigned int*) malloc(padded_mem_size); memcpy(padded_hdata, h_data, mem_size); memset(padded_hdata+num_elements, 0, padded_mem_size - mem_size); hipMalloc( (void**) &d_idata, padded_mem_size); hipMalloc( (void**) &d_odata, padded_mem_size); // copy host memory to device input array hipMemcpy( d_idata, padded_hdata, padded_mem_size, hipMemcpyHostToDevice); // initialize all the other device arrays to be safe hipMemcpy( d_odata, padded_hdata, padded_mem_size, hipMemcpyHostToDevice); free(padded_hdata); padded_hdata = NULL; // **===--------------- Allocate data structure here --------------===** // preallocBlockSums(num_elements); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement prescanArray(d_odata, d_idata, TILE_SIZE); // Run the prescan hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // **===-------------- Modify the body of this function -----------===** prescanArray(d_odata, d_idata, padded_num_elements); // **===-----------------------------------------------------------===** hipEventRecord(stop); hipEventSynchronize(stop); float device_ms = 0; hipEventElapsedTime(&device_ms, start, stop); printf("GPU Processing time: %f (ms)\n", device_ms); printf("Speedup: %fX\n", host_ms/device_ms); // **===--------------- Deallocate data structure here ------------===** // deallocBlockSums(); // **===-----------------------------------------------------------===** // copy result from device to host hipMemcpy( h_data, d_odata, sizeof(unsigned int) * num_elements, hipMemcpyDeviceToHost); if ((argc - 1) == 3) // Three Arguments, write result to file { WriteFile(h_data, argv[3], num_elements); } else if ((argc - 1) == 1) // One Argument, write result to file { WriteFile(h_data, argv[1], num_elements); } // Check if the result is equivalent to the expected soluion unsigned int result_regtest = CompareArrays( reference, h_data, num_elements); //for(unsigned int i = 0; i < num_elements; i++){ //printf("reference[%u] = %d, h_data[%u] = %d\n", i, reference[i], i, h_data[i]); //} printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); // cleanup memory free(h_data); free(reference); hipFree( d_odata); hipFree( d_idata); } // Read array in from file int ReadFile(unsigned int* arr, char* file_name, int num_elements) { FILE* input = fopen(file_name, "r"); if (input == NULL) { printf("Error opening file %s\n", file_name); exit(1); } for (unsigned i = 0; i < num_elements; i++) fscanf(input, "%u", &(arr[i])); return num_elements; } // Read params of input matrices int ReadParamsFile(int* params, char* file_name, int num_params) { FILE* input = fopen(file_name, "r"); if (input == NULL) { printf("Error opening file %s\n", file_name); exit(1); } for (unsigned i = 0; i < num_params; i++) fscanf(input, "%u", &(params[i])); return num_params; } // Write a 16x16 matrix to file void WriteFile(unsigned int* arr, char* file_name, int num_elements) { FILE* output = fopen(file_name, "w"); if (output == NULL) { printf("Error opening file %s\n", file_name); exit(1); } for (unsigned i = 0; i < num_elements; i++) { fprintf(output, "%f ", arr[i]); } } // returns true iff A and B have same elements in same order bool CompareArrays(unsigned int *A, unsigned int *B, int size) { for (unsigned i = 0; i < size; i++) if (fabs(A[i] - B[i]) > 0) return false; return true; }
02347d66f6d794f85152bedf5eb7033547f23667.cu
#ifdef _WIN32 #define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <sys/time.h> // includes, kernels #include "scan_largearray_kernel.cu" #define DEFAULT_NUM_ELEMENTS 16777216 //#define DEFAULT_NUM_ELEMENTS 16000000 //#define DEFAULT_NUM_ELEMENTS 1400 #define MAX_RAND 3 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" unsigned int compare( const unsigned int* reference, const unsigned int* data, const unsigned int len); extern "C" void computeGold( unsigned int* reference, unsigned int* idata, const unsigned int len); bool CompareArrays(unsigned int *A, unsigned int *B, int size); void WriteFile(unsigned int* arr, char* file_name, int num_elements); int ReadParamsFile(int* params, char* file_name, int num_params); int ReadFile(unsigned int* arr, char* file_name, int num_elements); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { int num_read = 0; int* size = (int*)malloc(1 * sizeof(int)); unsigned int data2read = 1; int num_elements = 0; // Must support large, non-power-of-2 arrays // allocate host memory to store the input data unsigned int mem_size = sizeof( unsigned int) * num_elements; unsigned int* h_data = (unsigned int*) malloc(mem_size); // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Randomly generate input data and write the result to // file name specified by first argument // * Two arguments: Read the first argument which indicate the size of the array, // randomly generate input data and write the input data // to the second argument. (for generating random input data) // * Three arguments: Read the first file which indicate the size of the array, // then input data from the file name specified by 2nd argument and write the // SCAN output to file name specified by the 3rd argument. switch(argc-1) { case 2: // Determine size of array data2read = ReadParamsFile(size, argv[1], data2read); if(data2read != 1){ printf("Error reading parameter file\n"); exit(1); } num_elements = size[0]; // allocate host memory to store the input data mem_size = sizeof( unsigned int) * num_elements; h_data = (unsigned int*) malloc( mem_size); for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = (int)(rand() % MAX_RAND); } WriteFile(h_data, argv[2], num_elements); break; case 3: // Three Arguments data2read = ReadParamsFile(size, argv[1], data2read); if(data2read != 1){ printf("Error reading parameter file\n"); exit(1); } num_elements = size[0]; // allocate host memory to store the input data mem_size = sizeof( unsigned int) * num_elements; h_data = (unsigned int*) malloc( mem_size); num_read = ReadFile(h_data, argv[2], num_elements); if(num_read != num_elements) { printf("Error reading input file!\n"); exit(1); } break; default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 // Use DEFAULT_NUM_ELEMENTS num_elements num_elements = DEFAULT_NUM_ELEMENTS; // allocate host memory to store the input data mem_size = sizeof( unsigned int) * num_elements; h_data = (unsigned int*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; } // compute reference solution unsigned int* reference = (unsigned int*) malloc( mem_size); struct timeval start_time, end_time; gettimeofday(&start_time,NULL); computeGold( reference, h_data, num_elements); gettimeofday(&end_time,NULL); printf("Processing %u elements...\n", num_elements); double start_count = (double) start_time.tv_sec + 1.e-6 * (double) start_time.tv_usec; double end_count = (double) end_time.tv_sec + 1.e-6 * (double) end_time.tv_usec; double host_ms = (double)( (end_count - start_count) * 1000); printf("CPU Processing time: %lf (ms)\n", host_ms); // allocate device memory input and output arrays unsigned int* d_idata = NULL; unsigned int* d_odata = NULL; int padded_num_elements = TILE_SIZE*((num_elements+TILE_SIZE-1)/TILE_SIZE); int padded_mem_size = padded_num_elements *sizeof(unsigned int); // Make a padded copy of the input data unsigned int* padded_hdata = (unsigned int*) malloc(padded_mem_size); memcpy(padded_hdata, h_data, mem_size); memset(padded_hdata+num_elements, 0, padded_mem_size - mem_size); cudaMalloc( (void**) &d_idata, padded_mem_size); cudaMalloc( (void**) &d_odata, padded_mem_size); // copy host memory to device input array cudaMemcpy( d_idata, padded_hdata, padded_mem_size, cudaMemcpyHostToDevice); // initialize all the other device arrays to be safe cudaMemcpy( d_odata, padded_hdata, padded_mem_size, cudaMemcpyHostToDevice); free(padded_hdata); padded_hdata = NULL; // **===--------------- Allocate data structure here --------------===** // preallocBlockSums(num_elements); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement prescanArray(d_odata, d_idata, TILE_SIZE); // Run the prescan cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // **===-------------- Modify the body of this function -----------===** prescanArray(d_odata, d_idata, padded_num_elements); // **===-----------------------------------------------------------===** cudaEventRecord(stop); cudaEventSynchronize(stop); float device_ms = 0; cudaEventElapsedTime(&device_ms, start, stop); printf("GPU Processing time: %f (ms)\n", device_ms); printf("Speedup: %fX\n", host_ms/device_ms); // **===--------------- Deallocate data structure here ------------===** // deallocBlockSums(); // **===-----------------------------------------------------------===** // copy result from device to host cudaMemcpy( h_data, d_odata, sizeof(unsigned int) * num_elements, cudaMemcpyDeviceToHost); if ((argc - 1) == 3) // Three Arguments, write result to file { WriteFile(h_data, argv[3], num_elements); } else if ((argc - 1) == 1) // One Argument, write result to file { WriteFile(h_data, argv[1], num_elements); } // Check if the result is equivalent to the expected soluion unsigned int result_regtest = CompareArrays( reference, h_data, num_elements); //for(unsigned int i = 0; i < num_elements; i++){ //printf("reference[%u] = %d, h_data[%u] = %d\n", i, reference[i], i, h_data[i]); //} printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); // cleanup memory free(h_data); free(reference); cudaFree( d_odata); cudaFree( d_idata); } // Read array in from file int ReadFile(unsigned int* arr, char* file_name, int num_elements) { FILE* input = fopen(file_name, "r"); if (input == NULL) { printf("Error opening file %s\n", file_name); exit(1); } for (unsigned i = 0; i < num_elements; i++) fscanf(input, "%u", &(arr[i])); return num_elements; } // Read params of input matrices int ReadParamsFile(int* params, char* file_name, int num_params) { FILE* input = fopen(file_name, "r"); if (input == NULL) { printf("Error opening file %s\n", file_name); exit(1); } for (unsigned i = 0; i < num_params; i++) fscanf(input, "%u", &(params[i])); return num_params; } // Write a 16x16 matrix to file void WriteFile(unsigned int* arr, char* file_name, int num_elements) { FILE* output = fopen(file_name, "w"); if (output == NULL) { printf("Error opening file %s\n", file_name); exit(1); } for (unsigned i = 0; i < num_elements; i++) { fprintf(output, "%f ", arr[i]); } } // returns true iff A and B have same elements in same order bool CompareArrays(unsigned int *A, unsigned int *B, int size) { for (unsigned i = 0; i < size; i++) if (fabs(A[i] - B[i]) > 0) return false; return true; }
da667c17f9b4b3c14086b5799be98c66c88260b9.hip
// !!! This is a file automatically generated by hipify!!! // // CasAES_CUDA.c // CasAES_CUDA // Created by Carter McCardwell on 11/11/14. // #include <stdint.h> #include <stdio.h> #include <time.h> #include <string.h> #include <hip/hip_runtime.h> const int Nb_h = 4; const int Nr_h = 14; const int Nk_h = 8; const uint8_t s_h[256] = { 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 }; uint8_t Rcon_h[256] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d }; __constant__ uint8_t s[256]; __constant__ int Nb; __constant__ int Nr; __constant__ int Nk; __constant__ uint32_t ek[60]; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void cudaDevAssist(hipError_t code, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"cudaDevAssistant: %s %d\n", hipGetErrorString(code), line); if (abort) exit(code); } } uint32_t sw(uint32_t word) { union { uint32_t word; uint8_t bytes[4]; } subWord __attribute__ ((aligned)); subWord.word = word; subWord.bytes[3] = s_h[subWord.bytes[3]]; subWord.bytes[2] = s_h[subWord.bytes[2]]; subWord.bytes[1] = s_h[subWord.bytes[1]]; subWord.bytes[0] = s_h[subWord.bytes[0]]; return subWord.word; } __device__ void sb(uint8_t* in) { for (int i = 0; i < 32; i++) { in[i] = s[in[i]]; } } __device__ void sb_st(uint8_t* in) { for (int i = 0; i < 16; i++) { in[i] = s[in[i]]; } } __device__ void mc(uint8_t* arr) { for (int i = 0; i < 4; i++) { uint8_t a[4]; uint8_t b[4]; uint8_t c; uint8_t h; for(c=0;c<4;c++) { a[c] = arr[(4*c+i)]; h = (uint8_t)((signed char)arr[(4*c+i)] >> 7); b[c] = arr[(4*c+i)] << 1; b[c] ^= 0x1B & h; } arr[(i)] = b[0] ^ a[3] ^ a[2] ^ b[1] ^ a[1]; arr[(4+i)] = b[1] ^ a[0] ^ a[3] ^ b[2] ^ a[2]; arr[(8+i)] = b[2] ^ a[1] ^ a[0] ^ b[3] ^ a[3]; arr[(12+i)] = b[3] ^ a[2] ^ a[1] ^ b[0] ^ a[0]; } } __device__ void sr(uint8_t* arr) { uint8_t out[16]; //On per-row basis (+1 shift ea row) //Row 1 out[0] = arr[0]; out[1] = arr[1]; out[2] = arr[2]; out[3] = arr[3]; //Row 2 out[4] = arr[5]; out[5] = arr[6]; out[6] = arr[7]; out[7] = arr[4]; //Row 3 out[8] = arr[10]; out[9] = arr[11]; out[10] = arr[8]; out[11] = arr[9]; //Row 4 out[12] = arr[15]; out[13] = arr[12]; out[14] = arr[13]; out[15] = arr[14]; for (int i = 0; i < 16; i++) { arr[i] = out[i]; } } uint32_t rw(uint32_t word) { union { uint8_t bytes[4]; uint32_t word; } subWord __attribute__ ((aligned)); subWord.word = word; uint8_t B0 = subWord.bytes[3], B1 = subWord.bytes[2], B2 = subWord.bytes[1], B3 = subWord.bytes[0]; subWord.bytes[3] = B1; //0 subWord.bytes[2] = B2; //1 subWord.bytes[1] = B3; //2 subWord.bytes[0] = B0; //3 return subWord.word; } void K_Exp(uint8_t* pk, uint32_t* out) { int i = 0; union { uint8_t bytes[4]; uint32_t word; } temp __attribute__ ((aligned)); union { uint8_t bytes[4]; uint32_t word; } univar[60] __attribute__ ((aligned)); for (i = 0; i < Nk_h; i++) { univar[i].bytes[3] = pk[i*4]; univar[i].bytes[2] = pk[i*4+1]; univar[i].bytes[1] = pk[i*4+2]; univar[i].bytes[0] = pk[i*4+3]; } for (i = Nk_h; i < Nb_h*(Nr_h+1); i++) { temp.word = univar[i-1].word; if (i % Nk_h == 0) { temp.word = (sw(rw(temp.word))); temp.bytes[3] = temp.bytes[3] ^ (Rcon_h[i/Nk_h]); } else if (Nk_h > 6 && i % Nk_h == 4) { temp.word = sw(temp.word); } if (i-4 % Nk_h == 0) { temp.word = sw(temp.word); } univar[i].word = univar[i-Nk_h].word ^ temp.word; } for (i = 0; i < 60; i++) { out[i] = univar[i].word; } } __device__ void ark(uint8_t* state, int strD, uint32_t* eK) { union { uint32_t word; uint8_t bytes[4]; } kb[4] __attribute__ ((aligned)); kb[0].word = eK[strD]; kb[1].word = eK[strD+1]; kb[2].word = eK[strD+2]; kb[3].word = eK[strD+3]; for (int i = 0; i < 4; i++) { state[i] = state[i] ^ kb[i].bytes[3]; state[i+4] = state[i+4] ^ kb[i].bytes[2]; state[i+8] = state[i+8] ^ kb[i].bytes[1]; state[i+12] = state[i+12] ^ kb[i].bytes[0]; } } __global__ void cudaRunner(uint8_t *in) { uint8_t state[16]; int localid = blockDim.x * blockIdx.x + threadIdx.x; //Data is shifted by 16 * ID of worker for (int i = 0; i < 16; i++) { state[i] = in[(localid*16)+i]; } ark(state, 0, ek); for (int i = 1; i < 14; i++) { sb_st(state); sr(state); mc(state); ark(state, i*Nb, ek); } sb_st(state); sr(state); ark(state, Nr*Nb, ek); for (int i = 0; i < 16; i++) { in[(localid*16)+i] = state[i]; } } int main(int argc, const char * argv[]) { printf("CasAES_CUDA Hyperthreaded AES-256 Encryption for CUDA processors - compiled 3/25/2015 Rev. 4\nCarter McCardwell, Northeastern University NUCAR - http://coe.neu.edu/~cmccardw - mccardwell.net\nPlease Wait...\n"); clock_t c_start, c_stop; c_start = clock(); FILE *infile; FILE *keyfile; FILE *outfile; infile = fopen(argv[2], "r"); if (infile == NULL) { printf("error (infile)\n"); return(1); } keyfile = fopen(argv[3], "rb"); if (keyfile == NULL) { printf("error (keyfile)\n"); return(1); } outfile = fopen(argv[4], "w"); if (outfile == NULL) { printf("error (outfile permission error, run with sudo)\n"); return(1); } //Hex info, or ASCII bool hexMode = false; if (strcmp(argv[1], "h") == 0) { hexMode = true; } else if (strcmp(argv[1], "a") == 0) { hexMode = false; } else { printf("error: first argument must be \'a\' for ASCII interpretation or \'h\' for hex interpretation\n"); return(1); } uint8_t key[32]; uint32_t ek_h[60]; for (int i = 0; i < 32; i++) { fscanf(keyfile, "%x", &key[i]); } K_Exp(key, ek_h); //send constants to GPU hipSetDevice(0); cudaDevAssist(hipMemcpyToSymbol(Nk, &Nk_h, sizeof(int), 0, hipMemcpyHostToDevice), 535, true); cudaDevAssist(hipMemcpyToSymbol(Nr, &Nr_h, sizeof(int), 0, hipMemcpyHostToDevice), 543, true); cudaDevAssist(hipMemcpyToSymbol(Nb, &Nb_h, sizeof(int), 0, hipMemcpyHostToDevice), 903, true); cudaDevAssist(hipMemcpyToSymbol(s, &s_h, 256*sizeof(uint8_t), 0, hipMemcpyHostToDevice), 920, true); cudaDevAssist(hipMemcpyToSymbol(ek, &ek_h, 60*sizeof(uint32_t), 0, hipMemcpyHostToDevice), 823, true); hipDeviceSynchronize(); const int BLOCKS = -1; //Not used const int RUNNING_THREADS = 512; uint8_t *devState = NULL; cudaDevAssist(hipMalloc((void**)&devState, RUNNING_THREADS*16*sizeof(uint8_t)), 425, true); uint8_t states[RUNNING_THREADS][16] = { 0x00 }; int ch = 0; int spawn = 0; int end = 1; while (end) { spawn = 0; for (int i = 0; i < RUNNING_THREADS; i++) //Dispatch many control threads that will report back to main (for now 5x) - 1 worker per state { spawn++; for (int ix = 0; ix < 16; ix++) { if (hexMode) { if (fscanf(infile, "%x", &states[i][ix]) != EOF) { ; } else { if (ix > 0) { for (int ixx = ix; ixx < 16; ixx++) { states[i][ixx] = 0x00; } } else { spawn--; } i = RUNNING_THREADS + 1; end = 0; break; } } else { ch = getc(infile); if (ch != EOF) { states[i][ix] = ch; } else { if (ix > 0) { for (int ixx = ix; ixx < 16; ixx++) { states[i][ixx] = 0x00; } } else { spawn--; } i = RUNNING_THREADS + 1; end = 0; break; } } } } //arrange data correctly for (int i = 0; i < spawn; i++) { uint8_t temp[16]; memcpy(&temp[0], &states[i][0], sizeof(uint8_t)); memcpy(&temp[4], &states[i][1], sizeof(uint8_t)); memcpy(&temp[8], &states[i][2], sizeof(uint8_t)); memcpy(&temp[12], &states[i][3], sizeof(uint8_t)); memcpy(&temp[1], &states[i][4], sizeof(uint8_t)); memcpy(&temp[5], &states[i][5], sizeof(uint8_t)); memcpy(&temp[9], &states[i][6], sizeof(uint8_t)); memcpy(&temp[13], &states[i][7], sizeof(uint8_t)); memcpy(&temp[2], &states[i][8], sizeof(uint8_t)); memcpy(&temp[6], &states[i][9], sizeof(uint8_t)); memcpy(&temp[10], &states[i][10], sizeof(uint8_t)); memcpy(&temp[14], &states[i][11], sizeof(uint8_t)); memcpy(&temp[3], &states[i][12], sizeof(uint8_t)); memcpy(&temp[7], &states[i][13], sizeof(uint8_t)); memcpy(&temp[11], &states[i][14], sizeof(uint8_t)); memcpy(&temp[15], &states[i][15], sizeof(uint8_t)); for (int c = 0; c < 16; c++) { memcpy(&states[i][c], &temp[c], sizeof(uint8_t)); } } //printf("\nCycle!: Spawn = %i", spawn); cudaDevAssist(hipMemcpy(devState, *states, spawn*16*sizeof(uint8_t), hipMemcpyHostToDevice), 426, true); cudaDevAssist(hipDeviceSynchronize(), 268, true); hipLaunchKernelGGL(( cudaRunner), dim3(1),dim3(spawn), 0, 0, devState); cudaDevAssist(hipDeviceSynchronize(), 270, true); cudaDevAssist(hipMemcpy(*states, devState, spawn*16*sizeof(uint8_t), hipMemcpyDeviceToHost), 431, true); //Write results to out for (int i = 0; i < spawn; i++) { for (int ix = 0; ix < 4; ix++) { char hex[3]; sprintf(hex, "%02x", states[i][ix]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+4]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+8]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+12]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } } } } c_stop = clock(); float diff = (((float)c_stop - (float)c_start) / CLOCKS_PER_SEC ) * 1000; printf("Done - Time taken: %f ms\n", diff); hipFree(devState); hipDeviceReset(); fclose(infile); fclose(outfile); fclose(keyfile); return 0; }
da667c17f9b4b3c14086b5799be98c66c88260b9.cu
// // CasAES_CUDA.c // CasAES_CUDA // Created by Carter McCardwell on 11/11/14. // #include <stdint.h> #include <stdio.h> #include <time.h> #include <string.h> #include <cuda_runtime.h> const int Nb_h = 4; const int Nr_h = 14; const int Nk_h = 8; const uint8_t s_h[256] = { 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 }; uint8_t Rcon_h[256] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d }; __constant__ uint8_t s[256]; __constant__ int Nb; __constant__ int Nr; __constant__ int Nk; __constant__ uint32_t ek[60]; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void cudaDevAssist(cudaError_t code, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"cudaDevAssistant: %s %d\n", cudaGetErrorString(code), line); if (abort) exit(code); } } uint32_t sw(uint32_t word) { union { uint32_t word; uint8_t bytes[4]; } subWord __attribute__ ((aligned)); subWord.word = word; subWord.bytes[3] = s_h[subWord.bytes[3]]; subWord.bytes[2] = s_h[subWord.bytes[2]]; subWord.bytes[1] = s_h[subWord.bytes[1]]; subWord.bytes[0] = s_h[subWord.bytes[0]]; return subWord.word; } __device__ void sb(uint8_t* in) { for (int i = 0; i < 32; i++) { in[i] = s[in[i]]; } } __device__ void sb_st(uint8_t* in) { for (int i = 0; i < 16; i++) { in[i] = s[in[i]]; } } __device__ void mc(uint8_t* arr) { for (int i = 0; i < 4; i++) { uint8_t a[4]; uint8_t b[4]; uint8_t c; uint8_t h; for(c=0;c<4;c++) { a[c] = arr[(4*c+i)]; h = (uint8_t)((signed char)arr[(4*c+i)] >> 7); b[c] = arr[(4*c+i)] << 1; b[c] ^= 0x1B & h; } arr[(i)] = b[0] ^ a[3] ^ a[2] ^ b[1] ^ a[1]; arr[(4+i)] = b[1] ^ a[0] ^ a[3] ^ b[2] ^ a[2]; arr[(8+i)] = b[2] ^ a[1] ^ a[0] ^ b[3] ^ a[3]; arr[(12+i)] = b[3] ^ a[2] ^ a[1] ^ b[0] ^ a[0]; } } __device__ void sr(uint8_t* arr) { uint8_t out[16]; //On per-row basis (+1 shift ea row) //Row 1 out[0] = arr[0]; out[1] = arr[1]; out[2] = arr[2]; out[3] = arr[3]; //Row 2 out[4] = arr[5]; out[5] = arr[6]; out[6] = arr[7]; out[7] = arr[4]; //Row 3 out[8] = arr[10]; out[9] = arr[11]; out[10] = arr[8]; out[11] = arr[9]; //Row 4 out[12] = arr[15]; out[13] = arr[12]; out[14] = arr[13]; out[15] = arr[14]; for (int i = 0; i < 16; i++) { arr[i] = out[i]; } } uint32_t rw(uint32_t word) { union { uint8_t bytes[4]; uint32_t word; } subWord __attribute__ ((aligned)); subWord.word = word; uint8_t B0 = subWord.bytes[3], B1 = subWord.bytes[2], B2 = subWord.bytes[1], B3 = subWord.bytes[0]; subWord.bytes[3] = B1; //0 subWord.bytes[2] = B2; //1 subWord.bytes[1] = B3; //2 subWord.bytes[0] = B0; //3 return subWord.word; } void K_Exp(uint8_t* pk, uint32_t* out) { int i = 0; union { uint8_t bytes[4]; uint32_t word; } temp __attribute__ ((aligned)); union { uint8_t bytes[4]; uint32_t word; } univar[60] __attribute__ ((aligned)); for (i = 0; i < Nk_h; i++) { univar[i].bytes[3] = pk[i*4]; univar[i].bytes[2] = pk[i*4+1]; univar[i].bytes[1] = pk[i*4+2]; univar[i].bytes[0] = pk[i*4+3]; } for (i = Nk_h; i < Nb_h*(Nr_h+1); i++) { temp.word = univar[i-1].word; if (i % Nk_h == 0) { temp.word = (sw(rw(temp.word))); temp.bytes[3] = temp.bytes[3] ^ (Rcon_h[i/Nk_h]); } else if (Nk_h > 6 && i % Nk_h == 4) { temp.word = sw(temp.word); } if (i-4 % Nk_h == 0) { temp.word = sw(temp.word); } univar[i].word = univar[i-Nk_h].word ^ temp.word; } for (i = 0; i < 60; i++) { out[i] = univar[i].word; } } __device__ void ark(uint8_t* state, int strD, uint32_t* eK) { union { uint32_t word; uint8_t bytes[4]; } kb[4] __attribute__ ((aligned)); kb[0].word = eK[strD]; kb[1].word = eK[strD+1]; kb[2].word = eK[strD+2]; kb[3].word = eK[strD+3]; for (int i = 0; i < 4; i++) { state[i] = state[i] ^ kb[i].bytes[3]; state[i+4] = state[i+4] ^ kb[i].bytes[2]; state[i+8] = state[i+8] ^ kb[i].bytes[1]; state[i+12] = state[i+12] ^ kb[i].bytes[0]; } } __global__ void cudaRunner(uint8_t *in) { uint8_t state[16]; int localid = blockDim.x * blockIdx.x + threadIdx.x; //Data is shifted by 16 * ID of worker for (int i = 0; i < 16; i++) { state[i] = in[(localid*16)+i]; } ark(state, 0, ek); for (int i = 1; i < 14; i++) { sb_st(state); sr(state); mc(state); ark(state, i*Nb, ek); } sb_st(state); sr(state); ark(state, Nr*Nb, ek); for (int i = 0; i < 16; i++) { in[(localid*16)+i] = state[i]; } } int main(int argc, const char * argv[]) { printf("CasAES_CUDA Hyperthreaded AES-256 Encryption for CUDA processors - compiled 3/25/2015 Rev. 4\nCarter McCardwell, Northeastern University NUCAR - http://coe.neu.edu/~cmccardw - mccardwell.net\nPlease Wait...\n"); clock_t c_start, c_stop; c_start = clock(); FILE *infile; FILE *keyfile; FILE *outfile; infile = fopen(argv[2], "r"); if (infile == NULL) { printf("error (infile)\n"); return(1); } keyfile = fopen(argv[3], "rb"); if (keyfile == NULL) { printf("error (keyfile)\n"); return(1); } outfile = fopen(argv[4], "w"); if (outfile == NULL) { printf("error (outfile permission error, run with sudo)\n"); return(1); } //Hex info, or ASCII bool hexMode = false; if (strcmp(argv[1], "h") == 0) { hexMode = true; } else if (strcmp(argv[1], "a") == 0) { hexMode = false; } else { printf("error: first argument must be \'a\' for ASCII interpretation or \'h\' for hex interpretation\n"); return(1); } uint8_t key[32]; uint32_t ek_h[60]; for (int i = 0; i < 32; i++) { fscanf(keyfile, "%x", &key[i]); } K_Exp(key, ek_h); //send constants to GPU cudaSetDevice(0); cudaDevAssist(cudaMemcpyToSymbol(Nk, &Nk_h, sizeof(int), 0, cudaMemcpyHostToDevice), 535, true); cudaDevAssist(cudaMemcpyToSymbol(Nr, &Nr_h, sizeof(int), 0, cudaMemcpyHostToDevice), 543, true); cudaDevAssist(cudaMemcpyToSymbol(Nb, &Nb_h, sizeof(int), 0, cudaMemcpyHostToDevice), 903, true); cudaDevAssist(cudaMemcpyToSymbol(s, &s_h, 256*sizeof(uint8_t), 0, cudaMemcpyHostToDevice), 920, true); cudaDevAssist(cudaMemcpyToSymbol(ek, &ek_h, 60*sizeof(uint32_t), 0, cudaMemcpyHostToDevice), 823, true); cudaThreadSynchronize(); const int BLOCKS = -1; //Not used const int RUNNING_THREADS = 512; uint8_t *devState = NULL; cudaDevAssist(cudaMalloc((void**)&devState, RUNNING_THREADS*16*sizeof(uint8_t)), 425, true); uint8_t states[RUNNING_THREADS][16] = { 0x00 }; int ch = 0; int spawn = 0; int end = 1; while (end) { spawn = 0; for (int i = 0; i < RUNNING_THREADS; i++) //Dispatch many control threads that will report back to main (for now 5x) - 1 worker per state { spawn++; for (int ix = 0; ix < 16; ix++) { if (hexMode) { if (fscanf(infile, "%x", &states[i][ix]) != EOF) { ; } else { if (ix > 0) { for (int ixx = ix; ixx < 16; ixx++) { states[i][ixx] = 0x00; } } else { spawn--; } i = RUNNING_THREADS + 1; end = 0; break; } } else { ch = getc(infile); if (ch != EOF) { states[i][ix] = ch; } else { if (ix > 0) { for (int ixx = ix; ixx < 16; ixx++) { states[i][ixx] = 0x00; } } else { spawn--; } i = RUNNING_THREADS + 1; end = 0; break; } } } } //arrange data correctly for (int i = 0; i < spawn; i++) { uint8_t temp[16]; memcpy(&temp[0], &states[i][0], sizeof(uint8_t)); memcpy(&temp[4], &states[i][1], sizeof(uint8_t)); memcpy(&temp[8], &states[i][2], sizeof(uint8_t)); memcpy(&temp[12], &states[i][3], sizeof(uint8_t)); memcpy(&temp[1], &states[i][4], sizeof(uint8_t)); memcpy(&temp[5], &states[i][5], sizeof(uint8_t)); memcpy(&temp[9], &states[i][6], sizeof(uint8_t)); memcpy(&temp[13], &states[i][7], sizeof(uint8_t)); memcpy(&temp[2], &states[i][8], sizeof(uint8_t)); memcpy(&temp[6], &states[i][9], sizeof(uint8_t)); memcpy(&temp[10], &states[i][10], sizeof(uint8_t)); memcpy(&temp[14], &states[i][11], sizeof(uint8_t)); memcpy(&temp[3], &states[i][12], sizeof(uint8_t)); memcpy(&temp[7], &states[i][13], sizeof(uint8_t)); memcpy(&temp[11], &states[i][14], sizeof(uint8_t)); memcpy(&temp[15], &states[i][15], sizeof(uint8_t)); for (int c = 0; c < 16; c++) { memcpy(&states[i][c], &temp[c], sizeof(uint8_t)); } } //printf("\nCycle!: Spawn = %i", spawn); cudaDevAssist(cudaMemcpy(devState, *states, spawn*16*sizeof(uint8_t), cudaMemcpyHostToDevice), 426, true); cudaDevAssist(cudaDeviceSynchronize(), 268, true); cudaRunner<<<1,spawn>>>(devState); cudaDevAssist(cudaDeviceSynchronize(), 270, true); cudaDevAssist(cudaMemcpy(*states, devState, spawn*16*sizeof(uint8_t), cudaMemcpyDeviceToHost), 431, true); //Write results to out for (int i = 0; i < spawn; i++) { for (int ix = 0; ix < 4; ix++) { char hex[3]; sprintf(hex, "%02x", states[i][ix]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+4]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+8]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } sprintf(hex, "%02x", states[i][ix+12]); for (int i = 0; i < 3; i++) { putc(hex[i], outfile); } } } } c_stop = clock(); float diff = (((float)c_stop - (float)c_start) / CLOCKS_PER_SEC ) * 1000; printf("Done - Time taken: %f ms\n", diff); cudaFree(devState); cudaDeviceReset(); fclose(infile); fclose(outfile); fclose(keyfile); return 0; }
7b3c399ad7342b88720415090b912eb7c30ba869.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <THH/THHAtomics.cuh> #include <stdio.h> #include "cuda_helpers.h" template <typename T> __device__ T bilinear_interpolate( const T* input, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = input[y_low * width + x_low]; T v2 = input[y_low * width + x_high]; T v3 = input[y_high * width + x_low]; T v4 = input[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void PSROIAlignForwardCUDA( const int nthreads, const T* input, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* rois, const int channels_out, T* output, int* channel_mapping) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c_out, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c_out = (index / pooled_width / pooled_height) % channels_out; int n = index / pooled_width / pooled_height / channels_out; // (n, c_in, ph, pw) is the associated element in the input int c_in = (c_out * pooled_height + ph) * pooled_width + pw; // [start, end) interval for spatial sampling const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; const T* offset_input = input + (roi_batch_ind * channels + c_in) * height * width; T out_sum = 0; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_input, height, width, y, x, index); out_sum += val; } } out_sum /= count; output[index] = out_sum; channel_mapping[index] = c_in; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = input[y_low * width + x_low]; // T v2 = input[y_low * width + x_high]; // T v3 = input[y_high * width + x_low]; // T v4 = input[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void PSROIAlignBackwardCUDA( const int nthreads, const T* grad_output, const int* channel_mapping, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const int channels_out, T* grad_input, const T* rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, *, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels_out; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); // Force too small ROIs to be 1x1 T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int c_in = channel_mapping[index]; T* grad_input_offset = grad_input + (roi_batch_ind * channels + c_in) * height * width; // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; const T grad_output_this_bin = grad_output[index]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = grad_output_this_bin * w1 / count; T g2 = grad_output_this_bin * w2 / count; T g3 = grad_output_this_bin * w3 / count; T g4 = grad_output_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(grad_input_offset + y_low * width + x_low, g1); atomicAdd(grad_input_offset + y_low * width + x_high, g2); atomicAdd(grad_input_offset + y_high * width + x_low, g3); atomicAdd(grad_input_offset + y_high * width + x_high, g4); } // if } // ix } // iy } } std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda( const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { // Check if input tensors are CUDA tensors TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "PSROIAlign_forward_cuda"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); TORCH_CHECK( channels % (pooled_height * pooled_width) == 0, "input channels must be a multiple of pooling height * pooling width"); int channels_out = channels / (pooled_height * pooled_width); auto output = at::zeros( {num_rois, channels_out, pooled_height, pooled_width}, input.options()); auto channel_mapping = at::zeros(output.sizes(), input.options().dtype(at::kInt)); auto output_size = output.numel(); if (output_size == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(output, channel_mapping); } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "PSROIAlign_forward", [&] { hipLaunchKernelGGL(( PSROIAlignForwardCUDA<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois_.data_ptr<scalar_t>(), channels_out, output.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>()); }); AT_CUDA_CHECK(hipGetLastError()); hipDeviceSynchronize(); return std::make_tuple(output, channel_mapping); } at::Tensor PSROIAlign_backward_cuda( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& channel_mapping, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio, const int batch_size, const int channels, const int height, const int width) { // Check if input tensors are CUDA tensors TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, channel_mapping_t{channel_mapping, "channel_mapping", 3}; at::CheckedFrom c = "PSROIAlign_backward_cuda"; at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device()); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_input; } int channels_out = channels / (pooled_height * pooled_width); auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "PSROIAlign_backward", [&] { hipLaunchKernelGGL(( PSROIAlignBackwardCUDA<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad_.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, channels_out, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>()); }); AT_CUDA_CHECK(hipGetLastError()); return grad_input; }
7b3c399ad7342b88720415090b912eb7c30ba869.cu
#include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <THC/THCAtomics.cuh> #include <stdio.h> #include "cuda_helpers.h" template <typename T> __device__ T bilinear_interpolate( const T* input, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = input[y_low * width + x_low]; T v2 = input[y_low * width + x_high]; T v3 = input[y_high * width + x_low]; T v4 = input[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void PSROIAlignForwardCUDA( const int nthreads, const T* input, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* rois, const int channels_out, T* output, int* channel_mapping) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c_out, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c_out = (index / pooled_width / pooled_height) % channels_out; int n = index / pooled_width / pooled_height / channels_out; // (n, c_in, ph, pw) is the associated element in the input int c_in = (c_out * pooled_height + ph) * pooled_width + pw; // [start, end) interval for spatial sampling const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; const T* offset_input = input + (roi_batch_ind * channels + c_in) * height * width; T out_sum = 0; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_input, height, width, y, x, index); out_sum += val; } } out_sum /= count; output[index] = out_sum; channel_mapping[index] = c_in; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = input[y_low * width + x_low]; // T v2 = input[y_low * width + x_high]; // T v3 = input[y_high * width + x_low]; // T v4 = input[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void PSROIAlignBackwardCUDA( const int nthreads, const T* grad_output, const int* channel_mapping, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const int channels_out, T* grad_input, const T* rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, *, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels_out; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); // Force too small ROIs to be 1x1 T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int c_in = channel_mapping[index]; T* grad_input_offset = grad_input + (roi_batch_ind * channels + c_in) * height * width; // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; const T grad_output_this_bin = grad_output[index]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = grad_output_this_bin * w1 / count; T g2 = grad_output_this_bin * w2 / count; T g3 = grad_output_this_bin * w3 / count; T g4 = grad_output_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(grad_input_offset + y_low * width + x_low, g1); atomicAdd(grad_input_offset + y_low * width + x_high, g2); atomicAdd(grad_input_offset + y_high * width + x_low, g3); atomicAdd(grad_input_offset + y_high * width + x_high, g4); } // if } // ix } // iy } } std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda( const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { // Check if input tensors are CUDA tensors TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "PSROIAlign_forward_cuda"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::cuda::CUDAGuard device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); TORCH_CHECK( channels % (pooled_height * pooled_width) == 0, "input channels must be a multiple of pooling height * pooling width"); int channels_out = channels / (pooled_height * pooled_width); auto output = at::zeros( {num_rois, channels_out, pooled_height, pooled_width}, input.options()); auto channel_mapping = at::zeros(output.sizes(), input.options().dtype(at::kInt)); auto output_size = output.numel(); if (output_size == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(output, channel_mapping); } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "PSROIAlign_forward", [&] { PSROIAlignForwardCUDA<scalar_t><<<grid, block, 0, stream>>>( output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois_.data_ptr<scalar_t>(), channels_out, output.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>()); }); AT_CUDA_CHECK(cudaGetLastError()); cudaDeviceSynchronize(); return std::make_tuple(output, channel_mapping); } at::Tensor PSROIAlign_backward_cuda( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& channel_mapping, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio, const int batch_size, const int channels, const int height, const int width) { // Check if input tensors are CUDA tensors TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, channel_mapping_t{channel_mapping, "channel_mapping", 3}; at::CheckedFrom c = "PSROIAlign_backward_cuda"; at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::cuda::CUDAGuard device_guard(grad.device()); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_input; } int channels_out = channels / (pooled_height * pooled_width); auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "PSROIAlign_backward", [&] { PSROIAlignBackwardCUDA<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad_.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, channels_out, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>()); }); AT_CUDA_CHECK(cudaGetLastError()); return grad_input; }
2d198f2463070dcd2d9b9991c495f7094c33d685.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*********************************************************** By Huahua Wang, the University of Minnesota, twin cities ***********************************************************/ #include "badmm_kernel.cuh" __global__ void vecInit(float* X, unsigned int size, float value) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { X[i] = value; } } __global__ void xexp( float* X, float* C, float* Y, float* Z, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { X[i] = Z[i]*__expf(C[i] - Y[i]); } } __global__ void zexp( float* Z, float* X, float* Y, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { Z[i] = X[i]*__expf(Y[i]); } } __global__ void rowNorm( float* X, float* v, unsigned int size, unsigned int n) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; unsigned int row; for (unsigned long int i = idx; i < size; i += stride) { row = (int)i/n; X[i] /= v[row]; } } __global__ void colNorm( float* X, float* v, unsigned int size, unsigned int n) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; unsigned int col; for (unsigned long int i = idx; i < size; i += stride) { col = (int)i%n; X[i] /= v[col]; } } __global__ void dual( float* err, float* Y, float* X, float* Z, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; float temp; err[idx] = 0.0; for (unsigned int i = idx; i < size; i += stride) { temp = X[i] - Z[i]; Y[i] += temp; err[idx] += temp*temp; } // __syncthreads(); } __global__ void matsub( float* temp, float* X, float* Y, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { temp[i]= X[i] -Y[i]; } } __global__ void rowNorm_a( float* X, float* v, float* a, unsigned int size, unsigned int n) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; unsigned int row; for (unsigned long int i = idx; i < size; i += stride) { row = (int)i/n; X[i] /= v[row]*a[row]; } } __global__ void colNorm_b( float* X, float* v, float* b, unsigned int size, unsigned int n) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; unsigned int col; for (unsigned long int i = idx; i < size; i += stride) { col = (int)i%n; X[i] /= v[col]*b[col]; } } __global__ void reduce(float *g_idata, float *g_odata, unsigned int n) { extern __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; float mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds if (i + blockDim.x < n) mySum += g_idata[i+blockDim.x]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 1024) { if (tid < 512) { sdata[tid] = mySum = mySum + sdata[tid + 512]; } __syncthreads(); } if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } // avoid bank conflict if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void Mean_Xs( float* d_X, float* d_X1, float* d_X2, float* d_X3, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; unsigned int col; for (unsigned long int i = idx; i < size; i += stride) { d_X[i] = (float)(d_X1[i] + d_X2[i] + d_X3[i])/3; } } __global__ void B_update( float* d_B, float* d_Xmean, int N , float* d_A, float* d_U, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { d_B[i] = d_Xmean[i] - (float)(d_A[i]/N) + d_U[i]; } } __global__ void X1_update( float* d_X_1, float* d_B, float lambda, unsigned int size ) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { d_X_1[i] = (float)(1 /(1+ lambda)) * (d_X_1[i] - d_B[i]); } } // this is prox_l1 of X2 , this can be used to update X3, with necessary updates. __global__ void X2_update( float* d_X_2, float* d_B, float lambda_g2, unsigned int size ) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { if( d_B != NULL ) d_B[i] = 0.0f; float temp = (float)(d_X_2[i] - d_B[i]); d_X_2[i] = ( (float)(temp -lambda_g2) < 0 ? 0 : (temp -lambda_g2) ) - (float)(-temp -lambda_g2) < 0 ? 0 : (-temp -lambda_g2); } } __global__ void concat_X( float* d_X, float* d_X1, float* d_X2, float* d_X3, int N, unsigned int size ) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size*N; i += stride) { d_X[i] = (i< size) ? d_X1[i] : (i>=size && i < size*2) ? d_X2[i-size] : d_X3[i-2*size] ; } } // svd alag se calculate karna call the prox_l1 then __global__ void X3_update( float* d_X_3_Minus_B, float* d_svd_U, float* d_svd_S, float* d_svd_VH, unsigned int size ) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { //temp[i] = (float)(d_X_3[i] - d_B[i]); } }
2d198f2463070dcd2d9b9991c495f7094c33d685.cu
/*********************************************************** By Huahua Wang, the University of Minnesota, twin cities ***********************************************************/ #include "badmm_kernel.cuh" __global__ void vecInit(float* X, unsigned int size, float value) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { X[i] = value; } } __global__ void xexp( float* X, float* C, float* Y, float* Z, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { X[i] = Z[i]*__expf(C[i] - Y[i]); } } __global__ void zexp( float* Z, float* X, float* Y, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { Z[i] = X[i]*__expf(Y[i]); } } __global__ void rowNorm( float* X, float* v, unsigned int size, unsigned int n) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; unsigned int row; for (unsigned long int i = idx; i < size; i += stride) { row = (int)i/n; X[i] /= v[row]; } } __global__ void colNorm( float* X, float* v, unsigned int size, unsigned int n) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; unsigned int col; for (unsigned long int i = idx; i < size; i += stride) { col = (int)i%n; X[i] /= v[col]; } } __global__ void dual( float* err, float* Y, float* X, float* Z, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; float temp; err[idx] = 0.0; for (unsigned int i = idx; i < size; i += stride) { temp = X[i] - Z[i]; Y[i] += temp; err[idx] += temp*temp; } // __syncthreads(); } __global__ void matsub( float* temp, float* X, float* Y, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { temp[i]= X[i] -Y[i]; } } __global__ void rowNorm_a( float* X, float* v, float* a, unsigned int size, unsigned int n) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; unsigned int row; for (unsigned long int i = idx; i < size; i += stride) { row = (int)i/n; X[i] /= v[row]*a[row]; } } __global__ void colNorm_b( float* X, float* v, float* b, unsigned int size, unsigned int n) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; unsigned int col; for (unsigned long int i = idx; i < size; i += stride) { col = (int)i%n; X[i] /= v[col]*b[col]; } } __global__ void reduce(float *g_idata, float *g_odata, unsigned int n) { extern __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; float mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds if (i + blockDim.x < n) mySum += g_idata[i+blockDim.x]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 1024) { if (tid < 512) { sdata[tid] = mySum = mySum + sdata[tid + 512]; } __syncthreads(); } if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } // avoid bank conflict if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void Mean_Xs( float* d_X, float* d_X1, float* d_X2, float* d_X3, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; unsigned int col; for (unsigned long int i = idx; i < size; i += stride) { d_X[i] = (float)(d_X1[i] + d_X2[i] + d_X3[i])/3; } } __global__ void B_update( float* d_B, float* d_Xmean, int N , float* d_A, float* d_U, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { d_B[i] = d_Xmean[i] - (float)(d_A[i]/N) + d_U[i]; } } __global__ void X1_update( float* d_X_1, float* d_B, float lambda, unsigned int size ) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { d_X_1[i] = (float)(1 /(1+ lambda)) * (d_X_1[i] - d_B[i]); } } // this is prox_l1 of X2 , this can be used to update X3, with necessary updates. __global__ void X2_update( float* d_X_2, float* d_B, float lambda_g2, unsigned int size ) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { if( d_B != NULL ) d_B[i] = 0.0f; float temp = (float)(d_X_2[i] - d_B[i]); d_X_2[i] = ( (float)(temp -lambda_g2) < 0 ? 0 : (temp -lambda_g2) ) - (float)(-temp -lambda_g2) < 0 ? 0 : (-temp -lambda_g2); } } __global__ void concat_X( float* d_X, float* d_X1, float* d_X2, float* d_X3, int N, unsigned int size ) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size*N; i += stride) { d_X[i] = (i< size) ? d_X1[i] : (i>=size && i < size*2) ? d_X2[i-size] : d_X3[i-2*size] ; } } // svd alag se calculate karna call the prox_l1 then __global__ void X3_update( float* d_X_3_Minus_B, float* d_svd_U, float* d_svd_S, float* d_svd_VH, unsigned int size ) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned long int i = idx; i < size; i += stride) { //temp[i] = (float)(d_X_3[i] - d_B[i]); } }
0e01b7fa87bb5c8e11adc1871da317aa04126fde.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> __host__ void read(float *M, FILE *source, int rows, int cols){ for (int i = 0; i < rows; ++i){ for (int j = 0; j < cols; ++j){ fscanf(source, "%f,", &M[i * cols + j]); } } fclose(source); return; } __host__ void print(float *M, int rows, int cols){ printf("\n"); printf("----------------------------------------\n"); for(int i = 0; i < rows; i++) { for(int j = 0; j < cols; j++) { printf("%.2f ", M[i * cols + j]); } printf("\n"); } printf("----------------------------------------\n"); printf("\n"); return; } __global__ void MatrixMultiplyKernel(float *d_A, float *d_B, float *d_R, int colsA, int rowsA, int colsB, int rowsB){ int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; if((row < rowsA) && (col < colsB)){ float cont = 0.0; for (int k = 0; k < rowsB; ++k){ cont += d_A[row * colsA + k] * d_B[k * colsB + col]; } d_R[row * colsB + col] = cont; } return; } int main(int argc, char** argv) { clock_t start, end; if (argc != 3){ printf("Debe aadir los nombres de los archivos\n"); return 1; } float *h_A, *h_B, *h_R; int rowsA, rowsB, colsA, colsB; hipError_t error = hipSuccess; FILE *file_1, *file_2; file_1 = fopen(argv[1], "r"); file_2 = fopen(argv[2], "r"); fscanf(file_1, "%d", &rowsA); fscanf(file_1, "%d", &colsA); fscanf(file_2, "%d", &rowsB); fscanf(file_2, "%d", &colsB); if (colsA != rowsB){ printf("Es imposible multiplicar las matrices\n"); return 1; } float sizeA = rowsA * colsA * sizeof(float); float sizeB = rowsB * colsB * sizeof(float); float sizeR = rowsA * colsB * sizeof(float); h_A = (float*)malloc(sizeA); h_B = (float*)malloc(sizeB); h_R = (float*)malloc(sizeR); read(h_A, file_1, rowsA, colsA); read(h_B, file_2, rowsB, colsB); float *d_A, *d_B, *d_R; start = clock(); error = hipMalloc((void**)&d_A, sizeA); if (error != hipSuccess){ printf("Error solicitando memoria para d_A \n"); return 1; } error = hipMalloc((void**)&d_B, sizeB); if (error != hipSuccess){ printf("Error solicitando memoria para d_B \n"); return 1; } error = hipMalloc((void**)&d_R, sizeR); if (error != hipSuccess){ printf("Error solicitando memoria para d_R \n"); return 1; } hipMemcpy(d_A, h_A, sizeA, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, sizeB, hipMemcpyHostToDevice); int blockSize = 32; dim3 dimGrid(ceil((colsB) / float(blockSize)), ceil((rowsA)/ float(blockSize)), 1); dim3 dimBlock(blockSize, blockSize, 1); hipLaunchKernelGGL(( MatrixMultiplyKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_R, colsA, rowsA, colsB, rowsB); hipMemcpy(h_R, d_R, sizeR, hipMemcpyDeviceToHost); end = clock(); printf("Tiempo : %.6f\n", (double)(end - start)/CLOCKS_PER_SEC); print(h_A, rowsA, colsA); print(h_B, rowsB, colsB); print(h_R, rowsA, colsB); free(h_A); free(h_B); free(h_R); hipFree(d_A); hipFree(d_B); hipFree(d_R); /* code */ return 0; }
0e01b7fa87bb5c8e11adc1871da317aa04126fde.cu
#include <stdlib.h> #include <stdio.h> #include <cuda.h> __host__ void read(float *M, FILE *source, int rows, int cols){ for (int i = 0; i < rows; ++i){ for (int j = 0; j < cols; ++j){ fscanf(source, "%f,", &M[i * cols + j]); } } fclose(source); return; } __host__ void print(float *M, int rows, int cols){ printf("\n"); printf("----------------------------------------\n"); for(int i = 0; i < rows; i++) { for(int j = 0; j < cols; j++) { printf("%.2f ", M[i * cols + j]); } printf("\n"); } printf("----------------------------------------\n"); printf("\n"); return; } __global__ void MatrixMultiplyKernel(float *d_A, float *d_B, float *d_R, int colsA, int rowsA, int colsB, int rowsB){ int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; if((row < rowsA) && (col < colsB)){ float cont = 0.0; for (int k = 0; k < rowsB; ++k){ cont += d_A[row * colsA + k] * d_B[k * colsB + col]; } d_R[row * colsB + col] = cont; } return; } int main(int argc, char** argv) { clock_t start, end; if (argc != 3){ printf("Debe añadir los nombres de los archivos\n"); return 1; } float *h_A, *h_B, *h_R; int rowsA, rowsB, colsA, colsB; cudaError_t error = cudaSuccess; FILE *file_1, *file_2; file_1 = fopen(argv[1], "r"); file_2 = fopen(argv[2], "r"); fscanf(file_1, "%d", &rowsA); fscanf(file_1, "%d", &colsA); fscanf(file_2, "%d", &rowsB); fscanf(file_2, "%d", &colsB); if (colsA != rowsB){ printf("Es imposible multiplicar las matrices\n"); return 1; } float sizeA = rowsA * colsA * sizeof(float); float sizeB = rowsB * colsB * sizeof(float); float sizeR = rowsA * colsB * sizeof(float); h_A = (float*)malloc(sizeA); h_B = (float*)malloc(sizeB); h_R = (float*)malloc(sizeR); read(h_A, file_1, rowsA, colsA); read(h_B, file_2, rowsB, colsB); float *d_A, *d_B, *d_R; start = clock(); error = cudaMalloc((void**)&d_A, sizeA); if (error != cudaSuccess){ printf("Error solicitando memoria para d_A \n"); return 1; } error = cudaMalloc((void**)&d_B, sizeB); if (error != cudaSuccess){ printf("Error solicitando memoria para d_B \n"); return 1; } error = cudaMalloc((void**)&d_R, sizeR); if (error != cudaSuccess){ printf("Error solicitando memoria para d_R \n"); return 1; } cudaMemcpy(d_A, h_A, sizeA, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, sizeB, cudaMemcpyHostToDevice); int blockSize = 32; dim3 dimGrid(ceil((colsB) / float(blockSize)), ceil((rowsA)/ float(blockSize)), 1); dim3 dimBlock(blockSize, blockSize, 1); MatrixMultiplyKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_R, colsA, rowsA, colsB, rowsB); cudaMemcpy(h_R, d_R, sizeR, cudaMemcpyDeviceToHost); end = clock(); printf("Tiempo : %.6f\n", (double)(end - start)/CLOCKS_PER_SEC); print(h_A, rowsA, colsA); print(h_B, rowsB, colsB); print(h_R, rowsA, colsB); free(h_A); free(h_B); free(h_R); cudaFree(d_A); cudaFree(d_B); cudaFree(d_R); /* code */ return 0; }
0ff1dab54531f875d0febe57bdbeaf706d674788.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 5 #define M 10 //global means it is called by host, run by device //mat is the original matrix *already allocated on GPU* //mat_res is the matrix to store the result *already allocated on GPU* //s is the scalar, passed directly from host to function __global__ void mat_mult(int *mat, int *mat_res, int *mult) { //row int tidX = blockIdx.x * blockDim.x + threadIdx.x; //col int tidY = blockIdx.y * blockDim.y + threadIdx.y; //thread ID must be < # of matrix rows and columns if(tidX < M && tidY < N) mat_res[tidX * N + tidY] = mat[tidX * N + tidY] * mult[tidY]; } //__host__ is default (called and run on host), so this is optional __host__ int main() { //host stuff int *mat = (int *) malloc(N * M * sizeof(int)); int *mat_res = (int *) malloc(N * M * sizeof(int)); int *mult = (int *) malloc(N * sizeof(int)); int *mult_res = (int *) malloc(M * sizeof(int)); //device stuff int *d_mat, *d_mat_res, *d_mult, *d_mult_res; printf("Past Pointer Var Dec\n"); //fill host matrix int i, j; for(i = 0; i < M; i++) for(j = 0; j < N; j++) mat[i * M + j] = i * N + j; for(i = 0; i < N; i++) mult[i] = 20 + i; printf("Original matrix...\n"); for(i = 0; i < M; i++) { for(j = 0; j < N; j++) printf("%d\t", mat[i * M + j]); printf("\n"); } printf("Allocating CUDA memory\n"); //allocate device memory hipMalloc((void **) &d_mat,N * M * sizeof(int)); hipMalloc((void **) &d_mat_res, N * M * sizeof(int)); printf("1\n"); hipMalloc((void **) &d_mult, N * sizeof(int)); hipMalloc((void **) &d_mult_res, M * sizeof(int)); //copy host matrix to device printf("Copying to device...\n"); hipMemcpy(d_mat, mat, N * M * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_mult, mult, N * sizeof(int), hipMemcpyHostToDevice); printf("Starting kernel...\n"); //specify the number of threads per block in X and Y dimensions dim3 dimBlock(16, 16, 1); //specify the number of blocks: we need enough blocks in both the X and Y // dimensions to cover the entire matrix, assuming we have 16 threads/block dim3 dimGrid((M - 1)/16 + 1, (N - 1)/16 + 1, 1); //call the kernel hipLaunchKernelGGL(( mat_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, d_mat, d_mat_res, d_mult); printf("Copying back...\n"); hipMemcpy(mat_res, d_mat_res, N * M * sizeof(int), hipMemcpyDeviceToHost); printf("Final matrix...\n"); for(i = 0; i < M; i++) { for(j = 0; j < N; j++) printf("%d\t", mat_res[i * M + j]); printf("\n"); } return 0; }
0ff1dab54531f875d0febe57bdbeaf706d674788.cu
#include <stdio.h> #define N 5 #define M 10 //global means it is called by host, run by device //mat is the original matrix *already allocated on GPU* //mat_res is the matrix to store the result *already allocated on GPU* //s is the scalar, passed directly from host to function __global__ void mat_mult(int *mat, int *mat_res, int *mult) { //row int tidX = blockIdx.x * blockDim.x + threadIdx.x; //col int tidY = blockIdx.y * blockDim.y + threadIdx.y; //thread ID must be < # of matrix rows and columns if(tidX < M && tidY < N) mat_res[tidX * N + tidY] = mat[tidX * N + tidY] * mult[tidY]; } //__host__ is default (called and run on host), so this is optional __host__ int main() { //host stuff int *mat = (int *) malloc(N * M * sizeof(int)); int *mat_res = (int *) malloc(N * M * sizeof(int)); int *mult = (int *) malloc(N * sizeof(int)); int *mult_res = (int *) malloc(M * sizeof(int)); //device stuff int *d_mat, *d_mat_res, *d_mult, *d_mult_res; printf("Past Pointer Var Dec\n"); //fill host matrix int i, j; for(i = 0; i < M; i++) for(j = 0; j < N; j++) mat[i * M + j] = i * N + j; for(i = 0; i < N; i++) mult[i] = 20 + i; printf("Original matrix...\n"); for(i = 0; i < M; i++) { for(j = 0; j < N; j++) printf("%d\t", mat[i * M + j]); printf("\n"); } printf("Allocating CUDA memory\n"); //allocate device memory cudaMalloc((void **) &d_mat,N * M * sizeof(int)); cudaMalloc((void **) &d_mat_res, N * M * sizeof(int)); printf("1\n"); cudaMalloc((void **) &d_mult, N * sizeof(int)); cudaMalloc((void **) &d_mult_res, M * sizeof(int)); //copy host matrix to device printf("Copying to device...\n"); cudaMemcpy(d_mat, mat, N * M * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_mult, mult, N * sizeof(int), cudaMemcpyHostToDevice); printf("Starting kernel...\n"); //specify the number of threads per block in X and Y dimensions dim3 dimBlock(16, 16, 1); //specify the number of blocks: we need enough blocks in both the X and Y // dimensions to cover the entire matrix, assuming we have 16 threads/block dim3 dimGrid((M - 1)/16 + 1, (N - 1)/16 + 1, 1); //call the kernel mat_mult<<<dimGrid, dimBlock>>>(d_mat, d_mat_res, d_mult); printf("Copying back...\n"); cudaMemcpy(mat_res, d_mat_res, N * M * sizeof(int), cudaMemcpyDeviceToHost); printf("Final matrix...\n"); for(i = 0; i < M; i++) { for(j = 0; j < N; j++) printf("%d\t", mat_res[i * M + j]); printf("\n"); } return 0; }
b4bb4e8b4096a2b48149c9d701481811f5e4a3a1.hip
// !!! This is a file automatically generated by hipify!!! #include <mat.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <math.h> #include <matrix.h> #include <iostream> #include "rocblas.h" #include "cokus.cpp" #include "cuda_util.h" #include <hip/hip_runtime.h> using namespace std; const int KER_NUM = 20;// const int P_NUM = 3;// const int LEAP = 2;// const int GP_NUM = 5;//maxpooling const int NEU_NUM1 = 100; const int NEU_NUM2 = 13;// const int NEIGHBOR = 8;// const double LEARN_RATE = 0.5; const double MIN_ERR = 0.0001; //const int DATA_BATCH = 512;//512 //CUDA bool InitCUDA(){ int count; hipGetDeviceCount(&count); if(count==0){ fprintf(stderr,"There is no device.\n"); return false; } int i; for (i =0; i<count;i++){ hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop,i)==hipSuccess){ if(prop.major>=1){ break; } } } if(i==count){ fprintf(stderr,"There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } //copyshared memory __device__ void copy_data_to_shared(double * data, double * data_tmp,int head, int length){ for(int i=0; i<length; i++){ data_tmp[i] = data[i+head]; } __syncthreads(); } //GPU __global__ static void convol(int iter,int i0,double * train,double * kernel,double * re,double * bias,int x,int y,int z,int re_size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum;// //3*3*hight if (id < KER_NUM){ extern __shared__ double train_tmp[]; //__shared__ double train_tmp[9*200]; int st = i0 * x * y * z; copy_data_to_shared(train,train_tmp,st,x*y*z);//trainshared memory /*double * ker = new double [x*y*P_NUM];//kernel for(int i=0; i<x*y*P_NUM; i++){ ker[i] = kernel[id*x*y*P_NUM + i]; }*/ double mid; //int i_1=0; for(int i=0; i<re_size; i++){ mid = 0; int start = i*x*y*LEAP;// for(int j=0; j<x*y*P_NUM; j++){ mid = mid + train_tmp[start + j]*kernel[id*x*y*P_NUM+j]; } mid = mid + bias[id]; re[i + id*re_size] = 2/(1+(1/exp(2*mid))) - 1; } /*for }*/ } } //GPU __global__ static void maxpooling(int iter,double * re,double * mre,int * mre_index,int re_size,int mre_num){ int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; //int res = re_size, mres = mre_num; //extern __shared__ double re_tmp[]; //copy_data_to_shared(re, re_tmp, 0, re_size*KER_NUM); if(id < KER_NUM){ double mid; int mid_index; for(int i=0; i<mre_num; i++){ mid = re[i*GP_NUM + id*re_size];// mid_index = i*GP_NUM + id*re_size; for(int j=i*GP_NUM+1; j<(i+1)*GP_NUM && j<re_size; j++){ if(mid < re[j + id*re_size]){ mid = re[j + id*re_size]; mid_index = j+id*re_size; } } mre[i + id * mre_num] = mid; mre_index[i + id * mre_num] = mid_index; } } } //, __global__ static void fullconnect(int iter,double * mre,double * omega,double * bias,double * F1,int mre_size){ int tid = blockIdx.x * blockDim.x +threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < NEU_NUM1){ //mre //__shared__ double mre_tmp[50 * KER_NUM]; extern __shared__ double mre_tmp[]; copy_data_to_shared(mre,mre_tmp,0,mre_size); // double mid=0; for(int i=0; i<mre_size; i++){ mid = mid + omega[id + i*NEU_NUM1] * mre_tmp[i]; } F1[id] = 1/(1 + 1/exp(mid + bias[id]));//sigmoid } } // __global__ static void output(int iter, double * F1, double * omega2, double * bias, double * O2){ int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < NEU_NUM2){ //F1 __shared__ double F1_tmp[NEU_NUM1]; copy_data_to_shared(F1, F1_tmp, 0, NEU_NUM1); // double mid = 0; for(int i=0; i<NEU_NUM1; i++){ mid = mid + omega2[id + i*NEU_NUM2] * F1_tmp[i]; } O2[id] = 1/(1 + 1/exp(mid + bias[id]));//sigmoid } } /**/ // __global__ static void bp_output(int iter,int train_idx, double * labels, double * O2, double * bias2, double * delta_L_a, double * delta_L_z) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < NEU_NUM2){ delta_L_a[id] = -(labels[id + train_idx * NEU_NUM2] - O2[id]); delta_L_z[id] = delta_L_a[id] * O2[id] *(1 - O2[id]); bias2[id] = bias2[id] - delta_L_z[id]*LEARN_RATE; } } // __global__ static void bp_fullconnect(int iter, double * omega2,double * bias1, double * F1, double * delta_L_a, double * delta_L_z, double *delta_f_a, double * delta_f_z) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < NEU_NUM1){ double mid = 0; double delta_f_w; for(int i=0; i<NEU_NUM2; i++){ mid = mid + omega2[i + id*NEU_NUM2] * delta_L_z[i]; //delta_f_b[i] = delta_L_z[i]; delta_f_w = F1[id] * delta_L_z[i]; omega2[i + id*NEU_NUM2] = omega2[i + id*NEU_NUM2] - LEARN_RATE * delta_f_w; //bias2[i] = bias2[i] - LEARN_RATE*delta_f_b[i]; } delta_f_a[id] = mid; delta_f_z[id] = delta_f_a[id] * F1[id] * (1 - F1[id]); bias1[id] = bias1[id] - LEARN_RATE * delta_f_z[id]; } } //maxpoolingdelta_adelta_z __global__ static void bp_maxpooling(int iter, int mre_size,int *mre_index, double * omega1,double *mre, double * delta_f_a, double * delta_f_z, double * delta_m_a, double * delta_22) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < mre_size){ double mid = 0; double delta_m_w; for(int i=0; i<NEU_NUM1; i++){ mid = mid + omega1[i + id*NEU_NUM1] * delta_f_z[i]; //delta_2[i + id*NEU_NUM1] = mid; delta_m_w = mre[id] * delta_f_z[i]; omega1[i + id*NEU_NUM1] = omega1[i + id*NEU_NUM1] - LEARN_RATE * delta_m_w; } delta_m_a[id] = mid; //delta_2[id] = delta_m_a[id]; //int idx = mre_index[id]; delta_22[mre_index[id]] = delta_m_a[id]; } } //kernel __global__ static void bp_update_kernel(int iter,int i0, int x, int y, int z, int mre_num,int re_size, int * mre_index, double * delta_22, double * data, double * kernel,double * bias0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum =blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < KER_NUM){ extern __shared__ double train_tmp[]; copy_data_to_shared(data, train_tmp, x*y*z*i0, x*y*z); double * delta_k_w = new double [x*y*P_NUM]; double mid = 0; for (int i=0; i<mre_num; i++){ int idx = mre_index[i + id*mre_num]; int n = idx % re_size;//n int head = x*y*LEAP*n; for(int j=0; j<x*y*P_NUM; j++){ delta_k_w[j] = delta_k_w[j] + delta_22[idx] * train_tmp[j+head]; } mid = mid + delta_22[idx]; } for(int i=0;i<x*y*P_NUM;i++){ delta_k_w[i] = delta_k_w[i]/mre_num; kernel[id*x*y*P_NUM+i] = kernel[id*x*y*P_NUM+i] - LEARN_RATE*delta_k_w[i]; } //double delta_k_b = delta_22[idx]; bias0[id] = bias0[id] - LEARN_RATE*(mid/mre_num); delete [] delta_k_w; } } // __global__ static void processing(int iter, double * data, int * train_index, double * processed_data, int x, int y, int z, int train_size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; //int idx = id * (NEIGHBOR+1) * z;//processed_data if (id < train_size){ int idx = id * (NEIGHBOR+1) * z; for (int i=0; i<z; i++){ for (int j=0; j<(NEIGHBOR+1); j++){ processed_data[idx] = data[train_index[j + id*(NEIGHBOR+1)] + i * x*y]; idx = idx + 1; } } } } double lossfunction(double * output, double * labels, int idx){ double l = 0; for(int i=0; i<NEU_NUM2; i++){ l = l + (output[i] - labels[i + idx*NEU_NUM2]) * (output[i] - labels[i + idx*NEU_NUM2]); } l = l/2; return l; } // double count_err(double * test_labels, double * output, int test_idx) { double right=0; double max =0; int idx = 0; for(int i=0; i<NEU_NUM2; i++){ if(output[i]>max){ max = output[i]; idx = i; } } if((idx+1) == int(test_labels[test_idx])) right = 1; return right; } //shuffle void shuffle(int * data, double * labels, int dim_row, int width){ int index, i; int temp; double tmp; srand(time(NULL)); for(i=0; i<width; i++){ index=rand()%(width-i) + i; if(index != i){ for(int j=0; j<dim_row; j++){ temp = data[j + i*dim_row]; data[j + i*dim_row] = data[j +index*dim_row]; data[j + index*dim_row] = temp; } for(int j=0; j<NEU_NUM2; j++){ tmp = labels[j + i*NEU_NUM2]; labels[j + i*NEU_NUM2] = labels[j + index*NEU_NUM2]; labels[j + index*NEU_NUM2] = tmp; } } } } // double training(double * data, double * labels, int x, int y, int z){ clock_t start, end; start = clock(); double * gpu_data;// double * gpu_processed_train;// double * gpu_processed_test; int * gpu_train_index;// int * gpu_test_index; double * gpu_processed_labels; //double * gpu_test_labels; // int data_size = 0; int * data_index = new int [x*y]; for(int i=0; i<x*y; i++){ if(labels[i] != 0){ data_index[data_size]=i; data_size ++; } } int test_size = (data_size-1)/5 + 1; int train_size = data_size - test_size; fprintf(stdout,"train_size:%d test_size:%d\n",train_size,test_size); int * train_index = new int [train_size * (NEIGHBOR + 1)];//9x*y int * test_index = new int [test_size * (NEIGHBOR+1)]; double * processed_labels = new double [train_size * NEU_NUM2]; double * test_labels = new double [test_size]; int tr=0, te=0; for (int i=0; i<data_size; i++){ if (i%5 != 0){ train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1)] = data_index[i];// train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) - 1] = data_index[i] - 1; train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) + 1] = data_index[i] + 1; for(int j0=0;j0<3;j0++){ train_index[j0 + tr * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0; train_index[j0+6 + tr * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0; } if((data_index[i] % x) == 0){// for (int j=0; j<3; j++) train_index[j*3 + tr*(NEIGHBOR+1)] = train_index[j*3+2 + tr*(NEIGHBOR+1)]; } if((data_index[i] % x) == (x-1)){// for(int j=0;j<3;j++) train_index[j*3+2 + tr*(NEIGHBOR+1)] = train_index[j*3 + tr*(NEIGHBOR+1)]; } if((data_index[i]/x) == 0){// for(int j=0;j<3;j++) train_index[j + tr*(NEIGHBOR+1)] = train_index[j+6 + tr*(NEIGHBOR+1)]; } if((data_index[i]/x) == (y-1)){// for(int j=0;j<3;j++) train_index[j+6 + tr*(NEIGHBOR+1)] = train_index[j + tr*(NEIGHBOR+1)]; } int mid = int(labels[data_index[i]])-1 + tr*NEU_NUM2; processed_labels[mid] = 1; tr = tr + 1; } else{ test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1)] = data_index[i];// test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) - 1] = data_index[i] - 1; test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) + 1] = data_index[i] + 1; for(int j0=0;j0<3;j0++){ test_index[j0 + te * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0; test_index[j0+6 + te * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0; } if((data_index[i] % x) == 0){// for (int j=0; j<3; j++) test_index[j*3 + te*(NEIGHBOR+1)] = test_index[j*3+2 + te*(NEIGHBOR+1)]; } if((data_index[i] % x) == (x-1)){// for(int j=0;j<3;j++) test_index[j*3+2 + te*(NEIGHBOR+1)] = test_index[j*3 + te*(NEIGHBOR+1)]; } if((data_index[i]/x) == 0){// for(int j=0;j<3;j++) test_index[j + te*(NEIGHBOR+1)] = test_index[j+6 + te*(NEIGHBOR+1)]; } if((data_index[i]/x) == (y-1)){// for(int j=0;j<3;j++) test_index[j+6 + te*(NEIGHBOR+1)] = test_index[j + te*(NEIGHBOR+1)]; } //int mid = int(labels[data_index[i]])-1 + te*NEU_NUM2; test_labels[te] = labels[data_index[i]]; te = te + 1; } } shuffle(train_index, processed_labels, (NEIGHBOR+1), train_size); //fprintf(stdout,"train_size:%d\n",train_size); fprintf(stdout,"train_index:%d %d %d %d\ntrain_index:%d %d %d %d\ntrain_index:%d %d %d %d\n",train_index[0],train_index[1],train_index[2],train_index[3],train_index[9],train_index[10],train_index[11],train_index[12],train_index[18],train_index[19],train_index[20],train_index[21]); fprintf(stdout,"train labels:\n"); for(int i=0; i<NEU_NUM2; i++){ fprintf(stdout,"%lf ",processed_labels[i]); } fprintf(stdout,"\n"); //int * train_index = new int [train_size * (NEIGHBOR + 1)];//train_size9 // SAFE_CALL(hipMalloc((void **) &gpu_data, sizeof(double) * x * y * z)); SAFE_CALL(hipMemcpy(gpu_data, data, sizeof(double)* x * y * z, hipMemcpyHostToDevice)); SAFE_CALL(hipMalloc((void **) &gpu_train_index, sizeof(int) * train_size * (NEIGHBOR+1))); SAFE_CALL(hipMemcpy(gpu_train_index, train_index, sizeof(int) * train_size * (NEIGHBOR+1), hipMemcpyHostToDevice)); SAFE_CALL(hipMalloc((void **) &gpu_test_index, sizeof(int) * test_size * (NEIGHBOR+1))); SAFE_CALL(hipMemcpy(gpu_test_index, test_index, sizeof(int) * test_size * (NEIGHBOR+1), hipMemcpyHostToDevice)); SAFE_CALL(hipMalloc((void **) &gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z)); SAFE_CALL(hipMalloc((void **) &gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z));// int gridsize = 64; int blocksize = 1024; //int threadNum = gridsize * blocksize; double * processed_train = new double [train_size * (NEIGHBOR+1) * z]; double * processed_test = new double [test_size * (NEIGHBOR+1) * z]; // int iter=0; hipLaunchKernelGGL(( processing), dim3(gridsize),dim3(blocksize), 0, 0, iter, gpu_data, gpu_train_index, gpu_processed_train, x, y, z, train_size); hipLaunchKernelGGL(( processing), dim3(gridsize),dim3(blocksize), 0, 0, iter, gpu_data, gpu_test_index, gpu_processed_test, x, y, z, test_size); hipDeviceSynchronize(); end = clock(); double tt = double(end - start); fprintf(stdout,"Using time of preprocessing:%lf\n",tt/CLOCKS_PER_SEC); //SAFE_CALL(hipMemcpy(processed_train, gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z, hipMemcpyDeviceToHost)); //SAFE_CALL(hipMemcpy(processed_test, gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z, hipMemcpyDeviceToHost)); SAFE_CALL(hipFree(gpu_data)); SAFE_CALL(hipFree(gpu_train_index)); SAFE_CALL(hipFree(gpu_test_index)); hipDeviceSynchronize(); //fprintf(stdout,"Processed train data:%lf %lf %lf %lf\n",processed_train[0],processed_train[1],processed_train[2],processed_train[3]); //fprintf(stdout,"Processed test data:%lf %lf %lf %lf\n",processed_test[0],processed_test[1],processed_test[2],processed_test[3]); start = clock(); // double * kernel = new double [(NEIGHBOR+1)*P_NUM*KER_NUM]; //kernekl for(int i=0; i<(NEIGHBOR+1)*P_NUM*KER_NUM; i++){ kernel[i] = 2 * (rand()/(double)(RAND_MAX)) - 1 ; if(kernel[i] == 0 || kernel[i] == -1 || kernel[i] == 1) kernel[i] = 0.5; } // int re_size = 0; for (int i=0; i+P_NUM-1<z; i+=LEAP){ re_size ++; } //double * re = new double [re_size * KER_NUM]; fprintf(stdout,"Size of re:%d\n",re_size); int mre_num = re_size/GP_NUM + 1; if(re_size/GP_NUM == 0){ mre_num = re_size / GP_NUM; } fprintf(stdout,"mre_num:%d\n",mre_num); int mre_size = mre_num * KER_NUM; int ome_num1 = mre_num * KER_NUM * NEU_NUM1;// int ome_num2 = NEU_NUM1 * NEU_NUM2;// //double * gpu_labels; double * gpu_kernel; double * gpu_bias0; double * gpu_re;// double * gpu_mre;//maxpooling int * gpu_mre_index;// double * gpu_omega1;// double * gpu_F1;// double * gpu_bias1; double * gpu_omega2; double * gpu_O2; double * gpu_bias2; double * gpu_delta_La; double * gpu_delta_Lz; double * gpu_delta_fa; double * gpu_delta_fz; double * gpu_delta_ma; //double * gpu_delta_mz; //double * gpu_delta_2; double * gpu_delta_22; //double * gpu_delta_kw; //double * gpu_delta_ia; //double * gpu_delta_iz; double * delta_22 = new double [re_size*KER_NUM]; // SAFE_CALL(hipMalloc((void**) &gpu_processed_labels, sizeof(double) * train_size * NEU_NUM2)); SAFE_CALL(hipMemcpy(gpu_processed_labels,processed_labels,sizeof(double) * train_size * NEU_NUM2,hipMemcpyHostToDevice)); //kernel SAFE_CALL(hipMalloc((void**) &gpu_kernel,sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM)); SAFE_CALL(hipMemcpy(gpu_kernel,kernel,sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM,hipMemcpyHostToDevice)); //gpu_re SAFE_CALL(hipMalloc((void **) &gpu_re,sizeof(double) * re_size * KER_NUM)); // SAFE_CALL(hipMalloc((void **) &gpu_delta_La, sizeof(double) * NEU_NUM2)); SAFE_CALL(hipMalloc((void **) &gpu_delta_Lz, sizeof(double) * NEU_NUM2)); // SAFE_CALL(hipMalloc((void **) &gpu_delta_fa, sizeof(double) * NEU_NUM1)); SAFE_CALL(hipMalloc((void **) &gpu_delta_fz, sizeof(double) * NEU_NUM1)); //maxpooling SAFE_CALL(hipMalloc((void **) &gpu_delta_ma, sizeof(double) * mre_size)); //SAFE_CALL(hipMalloc((void **) &gpu_delta_mz, sizeof(double) * mre_size)); // //SAFE_CALL(hipMalloc((void **) &gpu_delta_2, sizeof(double) * mre_size)); SAFE_CALL(hipMalloc((void **) &gpu_delta_22,sizeof(double) * re_size * KER_NUM)); SAFE_CALL(hipMemcpy(gpu_delta_22, delta_22, sizeof(double) * re_size * KER_NUM, hipMemcpyHostToDevice)); //SAFE_CALL(hipMalloc((void **) &gpu_delta_kw, sizeof(double) * (NEIGHBOR+1) *P_NUM)); double * omega1 = new double [ome_num1]; double * omega2 = new double [ome_num2]; double * bias0 = new double [KER_NUM]; double * bias1 = new double [NEU_NUM1]; double * bias2 = new double [NEU_NUM2]; //Omega1 for(int i=0; i<ome_num1; i++){ omega1[i] = 2 * (rand()/(double)(RAND_MAX)) - 1 ; if(omega1[i] == 0 || omega1[i] == -1 || omega1[i] == 1) omega1[i] = 0.001; } //bias0 for(int i=0; i<KER_NUM; i++){ bias0[i] = 2*(rand()/(double)(RAND_MAX)) - 1; } //bias1 for(int i=0; i<NEU_NUM1; i++){ bias1[i] = 2*(rand()/(double)(RAND_MAX)) - 1; } //Omega2 for(int i=0; i<ome_num2; i++){ omega2[i] = 2 * (rand()/(double)(RAND_MAX)) - 1; if(omega2[i] ==0 || omega2[i] == 1 || omega2[i] ==-1) omega2[i] = 0.001; } fprintf(stdout, "Bias1: %lf %lf %lf\n",bias1[0],bias1[1],bias1[2]); //bias2 for(int i=0; i<NEU_NUM2; i++){ bias2[i] = 2*(rand()/(double)(RAND_MAX)) - 1; } fprintf(stdout, "Bias2: %lf %lf %lf\n",bias2[0],bias2[1],bias2[2]); SAFE_CALL(hipMalloc((void **) &gpu_mre, sizeof(double) * mre_num * KER_NUM));//maxpoolinggpu_mre SAFE_CALL(hipMalloc((void **) &gpu_mre_index, sizeof(int) * mre_num * KER_NUM));//maxpooling SAFE_CALL(hipMalloc((void **) &gpu_omega1, sizeof(double) * ome_num1));// SAFE_CALL(hipMalloc((void **) &gpu_omega2, sizeof(double) * ome_num2));// SAFE_CALL(hipMalloc((void **) &gpu_F1, sizeof(double) * NEU_NUM1));// SAFE_CALL(hipMalloc((void **) &gpu_O2, sizeof(double) * NEU_NUM2));// SAFE_CALL(hipMalloc((void **) &gpu_bias0, sizeof(double) * KER_NUM));// SAFE_CALL(hipMalloc((void **) &gpu_bias1, sizeof(double) * NEU_NUM1));// SAFE_CALL(hipMalloc((void **) &gpu_bias2, sizeof(double) * NEU_NUM2)); SAFE_CALL(hipMemcpy(gpu_omega1, omega1, sizeof(double) * ome_num1, hipMemcpyHostToDevice));//GPU SAFE_CALL(hipMemcpy(gpu_omega2, omega2, sizeof(double) * ome_num2, hipMemcpyHostToDevice)); SAFE_CALL(hipMemcpy(gpu_bias0, bias0, sizeof(double) * KER_NUM, hipMemcpyHostToDevice)); SAFE_CALL(hipMemcpy(gpu_bias1, bias1, sizeof(double) * NEU_NUM1, hipMemcpyHostToDevice));// SAFE_CALL(hipMemcpy(gpu_bias2, bias2, sizeof(double) * NEU_NUM2, hipMemcpyHostToDevice)); //double * mre = new double [mre_num * KER_NUM];//CPUmaxpooling //double * F1 = new double [NEU_NUM1];//CPU double * O2 = new double [NEU_NUM2];//CPU //double * lz = new double [NEU_NUM2]; double loss; for(int j=0; j<1001; j++){ if (j % 100 == 0) fprintf(stdout,"The %dth iteration.\n",j); for(int i0=0; i0<train_size; i0++){ int iter = 0; // hipLaunchKernelGGL(( convol), dim3(1),dim3(KER_NUM),(NEIGHBOR+1)*z*sizeof(double), 0, iter,i0,gpu_processed_train,gpu_kernel,gpu_re,gpu_bias0,3,3,z,re_size); hipDeviceSynchronize(); //maxpoolingre hipLaunchKernelGGL(( maxpooling), dim3(1),dim3(KER_NUM), 0, 0, iter,gpu_re,gpu_mre,gpu_mre_index,re_size,mre_num); hipDeviceSynchronize(); // hipLaunchKernelGGL(( fullconnect), dim3(1),dim3(NEU_NUM1),mre_size * sizeof(double), 0, iter,gpu_mre,gpu_omega1,gpu_bias1,gpu_F1,mre_size); hipDeviceSynchronize(); // hipLaunchKernelGGL(( output), dim3(1),dim3(NEU_NUM2), 0, 0, iter,gpu_F1,gpu_omega2,gpu_bias2,gpu_O2); hipDeviceSynchronize(); //SAFE_CALL(hipMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, hipMemcpyDeviceToHost)); //hipDeviceSynchronize(); //loss = lossfunction(O2, processed_labels, i0); //if(i0%100==0 && j%100==0) fprintf(stdout,"loss:%lf \n",loss); //if(loss < MIN_ERR) // break; // hipLaunchKernelGGL(( bp_output), dim3(1),dim3(NEU_NUM2), 0, 0, iter,i0,gpu_processed_labels,gpu_O2,gpu_bias2,gpu_delta_La,gpu_delta_Lz); // hipLaunchKernelGGL(( bp_fullconnect), dim3(1),dim3(NEU_NUM1), 0, 0, iter,gpu_omega2,gpu_bias1,gpu_F1,gpu_delta_La,gpu_delta_Lz,gpu_delta_fa,gpu_delta_fz); //maxpooling hipLaunchKernelGGL(( bp_maxpooling), dim3(1),dim3(mre_size), 0, 0, iter,mre_size,gpu_mre_index,gpu_omega1,gpu_mre,gpu_delta_fa,gpu_delta_fz,gpu_delta_ma,gpu_delta_22); //map //bp_map_convol<<<1,mre_size>>>(iter,mre_num,gpu_mre_index,gpu_re,gpu_delta_2,gpu_delta_22); hipLaunchKernelGGL(( bp_update_kernel), dim3(1),dim3(KER_NUM),(NEIGHBOR+1)*z*sizeof(double), 0, iter,i0,3,3,z,mre_num,re_size,gpu_mre_index,gpu_delta_22,gpu_processed_train,gpu_kernel,gpu_bias0); hipDeviceSynchronize(); //SAFE_CALL(hipMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, hipMemcpyDeviceToHost)); /*if(i0<10 && j == 0){ fprintf(stdout,"Output:\n "); for(int j=0; j<NEU_NUM2; j++) fprintf(stdout," %lf",O2[j]); fprintf(stdout,"\n"); }*/ } //double loss = lossfunction(O2, processed_labels, i0); //if(j%100 == 0) fprintf(stdout,"loss:%lf \n",loss); //if(loss < MIN_ERR) break; } fprintf(stdout,"Training completed!\n"); end = clock(); tt = double(end - start); fprintf(stdout,"Using time of training:%lfs\n",tt/CLOCKS_PER_SEC); start = clock(); //hipDeviceSynchronize(); SAFE_CALL(hipMemcpy(kernel, gpu_kernel, sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM, hipMemcpyDeviceToHost)); SAFE_CALL(hipMemcpy(bias0, gpu_bias0, sizeof(double) * KER_NUM, hipMemcpyDeviceToHost)); SAFE_CALL(hipMemcpy(bias1, gpu_bias1, sizeof(double) * NEU_NUM1, hipMemcpyDeviceToHost)); SAFE_CALL(hipMemcpy(bias2, gpu_bias2, sizeof(double) * NEU_NUM2, hipMemcpyDeviceToHost)); SAFE_CALL(hipMemcpy(omega1, gpu_omega1, sizeof(double) * ome_num1, hipMemcpyDeviceToHost)); SAFE_CALL(hipMemcpy(omega2, gpu_omega2, sizeof(double) * ome_num2, hipMemcpyDeviceToHost)); hipDeviceSynchronize(); //fprintf(stdout,"kernel:%lf %lf %lf %lf\n",kernel[0], kernel[1], kernel[2], kernel[3]); //mat /*MATFile * pmatFile; pmatFile = matOpen("model.mat","w"); mxArray * m1 = mxCreateDoubleMatrix((NEIGHBOR+1)*P_NUM,KER_NUM,mxREAL); memcpy((void *)mxGetPr(m1), (void *)kernel, sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM); matPutVariable(pmatFile, "kernel", m1); mxArray * m2 = mxCreateDoubleMatrix(KER_NUM,1,mxREAL); memcpy((void *)mxGetPr(m2), (void *)bias0, sizeof(double) * KER_NUM); matPutVariable(pmatFile, "bias0", m2); mxArray * m3 = mxCreateDoubleMatrix(NEU_NUM1,mre_size,mxREAL); memcpy((void *)mxGetPr(m3), (void *)omega1, sizeof(double) * ome_num1); matPutVariable(pmatFile, "omega1", m3); mxArray * m4 = mxCreateDoubleMatrix(NEU_NUM1,1,mxREAL); memcpy((void *)mxGetPr(m4), (void *)bias1, sizeof(double) * NEU_NUM1); matPutVariable(pmatFile, "bias1", m4); mxArray * m5 = mxCreateDoubleMatrix(NEU_NUM2,NEU_NUM1,mxREAL); memcpy((void *)mxGetPr(m5), (void *)omega2, sizeof(double) * ome_num2); matPutVariable(pmatFile, "omega2", m5); mxArray * m6 = mxCreateDoubleMatrix(NEU_NUM2,1,mxREAL); memcpy((void *)mxGetPr(m6), (void *)bias2, sizeof(double) * NEU_NUM2); matPutVariable(pmatFile, "bias2", m6); matClose(pmatFile);*/ //fprintf(stdout,"mre:%lf %lf %lf\n",mre[0],mre[1],mre[2]); //fprintf(stdout,"mre_index:%d %d %d\n",mre_index[0],mre_index[1],mre_index[2]); //fprintf(stdout,"F1 Output:%lf %lf; %lf %lf\n",F1[0],F1[1],F1[98],F1[99]); //fprintf(stdout,"O2 Output:%lf %lf; %lf %lf\n",O2[0],O2[1],O2[18],O2[19]); //end = clock(); //tt = double(end - start); //fprintf(stdout, "Using time of writeback:%lfs\n",tt/CLOCKS_PER_SEC); //test double right = 0; double count = 0; for (int i1=0; i1<test_size; i1++){ int iter = 0; hipLaunchKernelGGL(( convol), dim3(1),dim3(KER_NUM),(NEIGHBOR+1)*z*sizeof(double), 0, iter,i1,gpu_processed_test,gpu_kernel,gpu_re,gpu_bias0,3,3,z,re_size); hipDeviceSynchronize(); hipLaunchKernelGGL(( maxpooling), dim3(1),dim3(KER_NUM), 0, 0, iter,gpu_re,gpu_mre,gpu_mre_index,re_size,mre_num); hipDeviceSynchronize(); hipLaunchKernelGGL(( fullconnect), dim3(1),dim3(NEU_NUM1),mre_size * sizeof(double), 0, iter,gpu_mre,gpu_omega1,gpu_bias1,gpu_F1,mre_size); hipDeviceSynchronize(); hipLaunchKernelGGL(( output), dim3(1),dim3(NEU_NUM2), 0, 0, iter,gpu_F1,gpu_omega2,gpu_bias2,gpu_O2); hipDeviceSynchronize(); SAFE_CALL(hipMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, hipMemcpyDeviceToHost)); hipDeviceSynchronize(); if(i1<10){ fprintf(stdout,"Output:\n"); for (int i=0;i<NEU_NUM2;i++) fprintf(stdout," %lf",O2[i]); fprintf(stdout,"\n"); } //fprintf(stdout,"\n"); right = count_err(test_labels, O2, i1); count = count + right; } end = clock(); tt = double(end - start); fprintf(stdout,"Using time of test:%lf\n",tt/CLOCKS_PER_SEC); return count/test_size; } // int main(int argc, char * argv[]) { if(!InitCUDA()){ return 0; } printf("CUDA initialized.\n"); clock_t start,end; double *trainset,*trainlabels; if(argc!=2){ fprintf(stderr, "4 input arguments required!"); } MATFile * datamat = matOpen(argv[1], "r"); mxArray * train = matGetVariable(datamat,"DataSet"); mxArray * labels = matGetVariable(datamat,"labels"); trainset = (double*)mxGetData(train); trainlabels = (double*)mxGetData(labels); const mwSize * dim; dim = mxGetDimensions(train);//trainset start = clock(); double correct = training(trainset, trainlabels, dim[0], dim[1], dim[2]); end = clock(); fprintf(stdout,"Correct Rate:%lf\n",correct); double usetime = double(end - start); fprintf(stdout, "Using time of the whole procedure:%lfs\n",usetime/CLOCKS_PER_SEC); return 0; }
b4bb4e8b4096a2b48149c9d701481811f5e4a3a1.cu
#include <mat.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <math.h> #include <matrix.h> #include <iostream> #include "cublas_v2.h" #include "cokus.cpp" #include "cuda_util.h" #include <cuda_runtime.h> using namespace std; const int KER_NUM = 20;//卷积核数量 const int P_NUM = 3;//每次卷积的层数 const int LEAP = 2;//跳数 const int GP_NUM = 5;//maxpooling每组的个数 const int NEU_NUM1 = 100; const int NEU_NUM2 = 13;//输出层神经元个数 const int NEIGHBOR = 8;//定义邻居个数 const double LEARN_RATE = 0.5; const double MIN_ERR = 0.0001; //const int DATA_BATCH = 512;//每次处理512个像素对应的数据 //CUDA初始化 bool InitCUDA(){ int count; cudaGetDeviceCount(&count); if(count==0){ fprintf(stderr,"There is no device.\n"); return false; } int i; for (i =0; i<count;i++){ cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){ if(prop.major>=1){ break; } } } if(i==count){ fprintf(stderr,"There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } //copy数据到shared memory __device__ void copy_data_to_shared(double * data, double * data_tmp,int head, int length){ for(int i=0; i<length; i++){ data_tmp[i] = data[i+head]; } __syncthreads(); } //GPU端负责卷积 __global__ static void convol(int iter,int i0,double * train,double * kernel,double * re,double * bias,int x,int y,int z,int re_size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum;//保存当前线程编号 //每个线程负责一个卷积核与一个3*3*hight柱状图像的卷积 if (id < KER_NUM){ extern __shared__ double train_tmp[]; //__shared__ double train_tmp[9*200]; int st = i0 * x * y * z; copy_data_to_shared(train,train_tmp,st,x*y*z);//复制train到shared memory中 /*double * ker = new double [x*y*P_NUM];//载入对应的kernel到寄存器 for(int i=0; i<x*y*P_NUM; i++){ ker[i] = kernel[id*x*y*P_NUM + i]; }*/ double mid; //int i_1=0; for(int i=0; i<re_size; i++){ mid = 0; int start = i*x*y*LEAP;//训练数据每次卷积的起点 for(int j=0; j<x*y*P_NUM; j++){ mid = mid + train_tmp[start + j]*kernel[id*x*y*P_NUM+j]; } mid = mid + bias[id]; re[i + id*re_size] = 2/(1+(1/exp(2*mid))) - 1; } /*for }*/ } } //GPU端进行下采样 __global__ static void maxpooling(int iter,double * re,double * mre,int * mre_index,int re_size,int mre_num){ int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; //int res = re_size, mres = mre_num; //extern __shared__ double re_tmp[]; //copy_data_to_shared(re, re_tmp, 0, re_size*KER_NUM); if(id < KER_NUM){ double mid; int mid_index; for(int i=0; i<mre_num; i++){ mid = re[i*GP_NUM + id*re_size];//存放每组第一个值 mid_index = i*GP_NUM + id*re_size; for(int j=i*GP_NUM+1; j<(i+1)*GP_NUM && j<re_size; j++){ if(mid < re[j + id*re_size]){ mid = re[j + id*re_size]; mid_index = j+id*re_size; } } mre[i + id * mre_num] = mid; mre_index[i + id * mre_num] = mid_index; } } } //全连接层,每个线程负责一个神经元输出结果的计算 __global__ static void fullconnect(int iter,double * mre,double * omega,double * bias,double * F1,int mre_size){ int tid = blockIdx.x * blockDim.x +threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < NEU_NUM1){ //复制mre数组到共享内存 //__shared__ double mre_tmp[50 * KER_NUM]; extern __shared__ double mre_tmp[]; copy_data_to_shared(mre,mre_tmp,0,mre_size); //计算神经元的输出 double mid=0; for(int i=0; i<mre_size; i++){ mid = mid + omega[id + i*NEU_NUM1] * mre_tmp[i]; } F1[id] = 1/(1 + 1/exp(mid + bias[id]));//激活函数sigmoid } } //输出层,每个线程负责一个神经元输出结果的计算 __global__ static void output(int iter, double * F1, double * omega2, double * bias, double * O2){ int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < NEU_NUM2){ //复制F1到共享内存中 __shared__ double F1_tmp[NEU_NUM1]; copy_data_to_shared(F1, F1_tmp, 0, NEU_NUM1); //计算神经元的输出 double mid = 0; for(int i=0; i<NEU_NUM1; i++){ mid = mid + omega2[id + i*NEU_NUM2] * F1_tmp[i]; } O2[id] = 1/(1 + 1/exp(mid + bias[id]));//激活函数sigmoid } } /*反向传播*/ //输出层 __global__ static void bp_output(int iter,int train_idx, double * labels, double * O2, double * bias2, double * delta_L_a, double * delta_L_z) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < NEU_NUM2){ delta_L_a[id] = -(labels[id + train_idx * NEU_NUM2] - O2[id]); delta_L_z[id] = delta_L_a[id] * O2[id] *(1 - O2[id]); bias2[id] = bias2[id] - delta_L_z[id]*LEARN_RATE; } } //全连接层 __global__ static void bp_fullconnect(int iter, double * omega2,double * bias1, double * F1, double * delta_L_a, double * delta_L_z, double *delta_f_a, double * delta_f_z) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < NEU_NUM1){ double mid = 0; double delta_f_w; for(int i=0; i<NEU_NUM2; i++){ mid = mid + omega2[i + id*NEU_NUM2] * delta_L_z[i]; //delta_f_b[i] = delta_L_z[i]; delta_f_w = F1[id] * delta_L_z[i]; omega2[i + id*NEU_NUM2] = omega2[i + id*NEU_NUM2] - LEARN_RATE * delta_f_w; //bias2[i] = bias2[i] - LEARN_RATE*delta_f_b[i]; } delta_f_a[id] = mid; delta_f_z[id] = delta_f_a[id] * F1[id] * (1 - F1[id]); bias1[id] = bias1[id] - LEARN_RATE * delta_f_z[id]; } } //maxpooling层(并将delta_a映射到卷积层的delta_z) __global__ static void bp_maxpooling(int iter, int mre_size,int *mre_index, double * omega1,double *mre, double * delta_f_a, double * delta_f_z, double * delta_m_a, double * delta_22) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < mre_size){ double mid = 0; double delta_m_w; for(int i=0; i<NEU_NUM1; i++){ mid = mid + omega1[i + id*NEU_NUM1] * delta_f_z[i]; //delta_2[i + id*NEU_NUM1] = mid; delta_m_w = mre[id] * delta_f_z[i]; omega1[i + id*NEU_NUM1] = omega1[i + id*NEU_NUM1] - LEARN_RATE * delta_m_w; } delta_m_a[id] = mid; //delta_2[id] = delta_m_a[id]; //int idx = mre_index[id]; delta_22[mre_index[id]] = delta_m_a[id]; } } //计算并更新kernel __global__ static void bp_update_kernel(int iter,int i0, int x, int y, int z, int mre_num,int re_size, int * mre_index, double * delta_22, double * data, double * kernel,double * bias0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum =blockDim.x * gridDim.x; int id = tid + iter * threadNum; if(id < KER_NUM){ extern __shared__ double train_tmp[]; copy_data_to_shared(data, train_tmp, x*y*z*i0, x*y*z); double * delta_k_w = new double [x*y*P_NUM]; double mid = 0; for (int i=0; i<mre_num; i++){ int idx = mre_index[i + id*mre_num]; int n = idx % re_size;//对应卷积的第n块数据 int head = x*y*LEAP*n; for(int j=0; j<x*y*P_NUM; j++){ delta_k_w[j] = delta_k_w[j] + delta_22[idx] * train_tmp[j+head]; } mid = mid + delta_22[idx]; } for(int i=0;i<x*y*P_NUM;i++){ delta_k_w[i] = delta_k_w[i]/mre_num; kernel[id*x*y*P_NUM+i] = kernel[id*x*y*P_NUM+i] - LEARN_RATE*delta_k_w[i]; } //double delta_k_b = delta_22[idx]; bias0[id] = bias0[id] - LEARN_RATE*(mid/mre_num); delete [] delta_k_w; } } //数据预处理 __global__ static void processing(int iter, double * data, int * train_index, double * processed_data, int x, int y, int z, int train_size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = blockDim.x * gridDim.x; int id = tid + iter * threadNum; //int idx = id * (NEIGHBOR+1) * z;//记录processed_data的开始位置 if (id < train_size){ int idx = id * (NEIGHBOR+1) * z; for (int i=0; i<z; i++){ for (int j=0; j<(NEIGHBOR+1); j++){ processed_data[idx] = data[train_index[j + id*(NEIGHBOR+1)] + i * x*y]; idx = idx + 1; } } } } double lossfunction(double * output, double * labels, int idx){ double l = 0; for(int i=0; i<NEU_NUM2; i++){ l = l + (output[i] - labels[i + idx*NEU_NUM2]) * (output[i] - labels[i + idx*NEU_NUM2]); } l = l/2; return l; } //计算正确率 double count_err(double * test_labels, double * output, int test_idx) { double right=0; double max =0; int idx = 0; for(int i=0; i<NEU_NUM2; i++){ if(output[i]>max){ max = output[i]; idx = i; } } if((idx+1) == int(test_labels[test_idx])) right = 1; return right; } //shuffle void shuffle(int * data, double * labels, int dim_row, int width){ int index, i; int temp; double tmp; srand(time(NULL)); for(i=0; i<width; i++){ index=rand()%(width-i) + i; if(index != i){ for(int j=0; j<dim_row; j++){ temp = data[j + i*dim_row]; data[j + i*dim_row] = data[j +index*dim_row]; data[j + index*dim_row] = temp; } for(int j=0; j<NEU_NUM2; j++){ tmp = labels[j + i*NEU_NUM2]; labels[j + i*NEU_NUM2] = labels[j + index*NEU_NUM2]; labels[j + index*NEU_NUM2] = tmp; } } } } //训练 double training(double * data, double * labels, int x, int y, int z){ clock_t start, end; start = clock(); double * gpu_data;//显存上存储原始数据 double * gpu_processed_train;//显存上存储处理之后的数据 double * gpu_processed_test; int * gpu_train_index;//训练数据的索引 int * gpu_test_index; double * gpu_processed_labels; //double * gpu_test_labels; //计算有标签像素的个数 int data_size = 0; int * data_index = new int [x*y]; for(int i=0; i<x*y; i++){ if(labels[i] != 0){ data_index[data_size]=i; data_size ++; } } int test_size = (data_size-1)/5 + 1; int train_size = data_size - test_size; fprintf(stdout,"train_size:%d test_size:%d\n",train_size,test_size); int * train_index = new int [train_size * (NEIGHBOR + 1)];//9行,x*y列。每列保存一个像素及其邻居的索引位置 int * test_index = new int [test_size * (NEIGHBOR+1)]; double * processed_labels = new double [train_size * NEU_NUM2]; double * test_labels = new double [test_size]; int tr=0, te=0; for (int i=0; i<data_size; i++){ if (i%5 != 0){ train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1)] = data_index[i];//当前像素索引 train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) - 1] = data_index[i] - 1; train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) + 1] = data_index[i] + 1; for(int j0=0;j0<3;j0++){ train_index[j0 + tr * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0; train_index[j0+6 + tr * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0; } if((data_index[i] % x) == 0){//第一行 for (int j=0; j<3; j++) train_index[j*3 + tr*(NEIGHBOR+1)] = train_index[j*3+2 + tr*(NEIGHBOR+1)]; } if((data_index[i] % x) == (x-1)){//最后一行 for(int j=0;j<3;j++) train_index[j*3+2 + tr*(NEIGHBOR+1)] = train_index[j*3 + tr*(NEIGHBOR+1)]; } if((data_index[i]/x) == 0){//第一列 for(int j=0;j<3;j++) train_index[j + tr*(NEIGHBOR+1)] = train_index[j+6 + tr*(NEIGHBOR+1)]; } if((data_index[i]/x) == (y-1)){//最后一列 for(int j=0;j<3;j++) train_index[j+6 + tr*(NEIGHBOR+1)] = train_index[j + tr*(NEIGHBOR+1)]; } int mid = int(labels[data_index[i]])-1 + tr*NEU_NUM2; processed_labels[mid] = 1; tr = tr + 1; } else{ test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1)] = data_index[i];//当前像素索引 test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) - 1] = data_index[i] - 1; test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) + 1] = data_index[i] + 1; for(int j0=0;j0<3;j0++){ test_index[j0 + te * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0; test_index[j0+6 + te * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0; } if((data_index[i] % x) == 0){//第一行 for (int j=0; j<3; j++) test_index[j*3 + te*(NEIGHBOR+1)] = test_index[j*3+2 + te*(NEIGHBOR+1)]; } if((data_index[i] % x) == (x-1)){//最后一行 for(int j=0;j<3;j++) test_index[j*3+2 + te*(NEIGHBOR+1)] = test_index[j*3 + te*(NEIGHBOR+1)]; } if((data_index[i]/x) == 0){//第一列 for(int j=0;j<3;j++) test_index[j + te*(NEIGHBOR+1)] = test_index[j+6 + te*(NEIGHBOR+1)]; } if((data_index[i]/x) == (y-1)){//最后一列 for(int j=0;j<3;j++) test_index[j+6 + te*(NEIGHBOR+1)] = test_index[j + te*(NEIGHBOR+1)]; } //int mid = int(labels[data_index[i]])-1 + te*NEU_NUM2; test_labels[te] = labels[data_index[i]]; te = te + 1; } } shuffle(train_index, processed_labels, (NEIGHBOR+1), train_size); //fprintf(stdout,"train_size:%d\n",train_size); fprintf(stdout,"train_index:%d %d %d %d\ntrain_index:%d %d %d %d\ntrain_index:%d %d %d %d\n",train_index[0],train_index[1],train_index[2],train_index[3],train_index[9],train_index[10],train_index[11],train_index[12],train_index[18],train_index[19],train_index[20],train_index[21]); fprintf(stdout,"train labels:\n"); for(int i=0; i<NEU_NUM2; i++){ fprintf(stdout,"%lf ",processed_labels[i]); } fprintf(stdout,"\n"); //int * train_index = new int [train_size * (NEIGHBOR + 1)];//train_size列,9行。每行保存一个像素及其邻居的索引位置 //分配显存,拷贝数据到显存上 SAFE_CALL(cudaMalloc((void **) &gpu_data, sizeof(double) * x * y * z)); SAFE_CALL(cudaMemcpy(gpu_data, data, sizeof(double)* x * y * z, cudaMemcpyHostToDevice)); SAFE_CALL(cudaMalloc((void **) &gpu_train_index, sizeof(int) * train_size * (NEIGHBOR+1))); SAFE_CALL(cudaMemcpy(gpu_train_index, train_index, sizeof(int) * train_size * (NEIGHBOR+1), cudaMemcpyHostToDevice)); SAFE_CALL(cudaMalloc((void **) &gpu_test_index, sizeof(int) * test_size * (NEIGHBOR+1))); SAFE_CALL(cudaMemcpy(gpu_test_index, test_index, sizeof(int) * test_size * (NEIGHBOR+1), cudaMemcpyHostToDevice)); SAFE_CALL(cudaMalloc((void **) &gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z)); SAFE_CALL(cudaMalloc((void **) &gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z));//每一批数据的大小 int gridsize = 64; int blocksize = 1024; //int threadNum = gridsize * blocksize; double * processed_train = new double [train_size * (NEIGHBOR+1) * z]; double * processed_test = new double [test_size * (NEIGHBOR+1) * z]; //预处理 int iter=0; processing<<<gridsize,blocksize>>>(iter, gpu_data, gpu_train_index, gpu_processed_train, x, y, z, train_size); processing<<<gridsize,blocksize>>>(iter, gpu_data, gpu_test_index, gpu_processed_test, x, y, z, test_size); cudaDeviceSynchronize(); end = clock(); double tt = double(end - start); fprintf(stdout,"Using time of preprocessing:%lf\n",tt/CLOCKS_PER_SEC); //SAFE_CALL(cudaMemcpy(processed_train, gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z, cudaMemcpyDeviceToHost)); //SAFE_CALL(cudaMemcpy(processed_test, gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z, cudaMemcpyDeviceToHost)); SAFE_CALL(cudaFree(gpu_data)); SAFE_CALL(cudaFree(gpu_train_index)); SAFE_CALL(cudaFree(gpu_test_index)); cudaDeviceSynchronize(); //fprintf(stdout,"Processed train data:%lf %lf %lf %lf\n",processed_train[0],processed_train[1],processed_train[2],processed_train[3]); //fprintf(stdout,"Processed test data:%lf %lf %lf %lf\n",processed_test[0],processed_test[1],processed_test[2],processed_test[3]); start = clock(); //前向传播 double * kernel = new double [(NEIGHBOR+1)*P_NUM*KER_NUM]; //随机生成kernekl数组 for(int i=0; i<(NEIGHBOR+1)*P_NUM*KER_NUM; i++){ kernel[i] = 2 * (rand()/(double)(RAND_MAX)) - 1 ; if(kernel[i] == 0 || kernel[i] == -1 || kernel[i] == 1) kernel[i] = 0.5; } //计算每次卷积的结果个数 int re_size = 0; for (int i=0; i+P_NUM-1<z; i+=LEAP){ re_size ++; } //double * re = new double [re_size * KER_NUM]; fprintf(stdout,"Size of re:%d\n",re_size); int mre_num = re_size/GP_NUM + 1; if(re_size/GP_NUM == 0){ mre_num = re_size / GP_NUM; } fprintf(stdout,"mre_num:%d\n",mre_num); int mre_size = mre_num * KER_NUM; int ome_num1 = mre_num * KER_NUM * NEU_NUM1;//第一层网络的输入权重个数 int ome_num2 = NEU_NUM1 * NEU_NUM2;//输出层的权重个数 //double * gpu_labels; double * gpu_kernel; double * gpu_bias0; double * gpu_re;//存放卷积结果 double * gpu_mre;//存放maxpooling结果 int * gpu_mre_index;//存放每组最大值的索引 double * gpu_omega1;//第一层网络的输入权重 double * gpu_F1;//第一层神经元的输出 double * gpu_bias1; double * gpu_omega2; double * gpu_O2; double * gpu_bias2; double * gpu_delta_La; double * gpu_delta_Lz; double * gpu_delta_fa; double * gpu_delta_fz; double * gpu_delta_ma; //double * gpu_delta_mz; //double * gpu_delta_2; double * gpu_delta_22; //double * gpu_delta_kw; //double * gpu_delta_ia; //double * gpu_delta_iz; double * delta_22 = new double [re_size*KER_NUM]; //复制标签 SAFE_CALL(cudaMalloc((void**) &gpu_processed_labels, sizeof(double) * train_size * NEU_NUM2)); SAFE_CALL(cudaMemcpy(gpu_processed_labels,processed_labels,sizeof(double) * train_size * NEU_NUM2,cudaMemcpyHostToDevice)); //复制随机初始化的kernel数组 SAFE_CALL(cudaMalloc((void**) &gpu_kernel,sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM)); SAFE_CALL(cudaMemcpy(gpu_kernel,kernel,sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM,cudaMemcpyHostToDevice)); //卷积结果存入gpu_re,分配显存 SAFE_CALL(cudaMalloc((void **) &gpu_re,sizeof(double) * re_size * KER_NUM)); //输出层偏导数 SAFE_CALL(cudaMalloc((void **) &gpu_delta_La, sizeof(double) * NEU_NUM2)); SAFE_CALL(cudaMalloc((void **) &gpu_delta_Lz, sizeof(double) * NEU_NUM2)); //全连接层偏导数 SAFE_CALL(cudaMalloc((void **) &gpu_delta_fa, sizeof(double) * NEU_NUM1)); SAFE_CALL(cudaMalloc((void **) &gpu_delta_fz, sizeof(double) * NEU_NUM1)); //maxpooling SAFE_CALL(cudaMalloc((void **) &gpu_delta_ma, sizeof(double) * mre_size)); //SAFE_CALL(cudaMalloc((void **) &gpu_delta_mz, sizeof(double) * mre_size)); //输入层 //SAFE_CALL(cudaMalloc((void **) &gpu_delta_2, sizeof(double) * mre_size)); SAFE_CALL(cudaMalloc((void **) &gpu_delta_22,sizeof(double) * re_size * KER_NUM)); SAFE_CALL(cudaMemcpy(gpu_delta_22, delta_22, sizeof(double) * re_size * KER_NUM, cudaMemcpyHostToDevice)); //SAFE_CALL(cudaMalloc((void **) &gpu_delta_kw, sizeof(double) * (NEIGHBOR+1) *P_NUM)); double * omega1 = new double [ome_num1]; double * omega2 = new double [ome_num2]; double * bias0 = new double [KER_NUM]; double * bias1 = new double [NEU_NUM1]; double * bias2 = new double [NEU_NUM2]; //随机生成Omega1 for(int i=0; i<ome_num1; i++){ omega1[i] = 2 * (rand()/(double)(RAND_MAX)) - 1 ; if(omega1[i] == 0 || omega1[i] == -1 || omega1[i] == 1) omega1[i] = 0.001; } //随机生成bias0 for(int i=0; i<KER_NUM; i++){ bias0[i] = 2*(rand()/(double)(RAND_MAX)) - 1; } //随机生成bias1 for(int i=0; i<NEU_NUM1; i++){ bias1[i] = 2*(rand()/(double)(RAND_MAX)) - 1; } //随机生成Omega2 for(int i=0; i<ome_num2; i++){ omega2[i] = 2 * (rand()/(double)(RAND_MAX)) - 1; if(omega2[i] ==0 || omega2[i] == 1 || omega2[i] ==-1) omega2[i] = 0.001; } fprintf(stdout, "Bias1: %lf %lf %lf\n",bias1[0],bias1[1],bias1[2]); //随机生成bias2 for(int i=0; i<NEU_NUM2; i++){ bias2[i] = 2*(rand()/(double)(RAND_MAX)) - 1; } fprintf(stdout, "Bias2: %lf %lf %lf\n",bias2[0],bias2[1],bias2[2]); SAFE_CALL(cudaMalloc((void **) &gpu_mre, sizeof(double) * mre_num * KER_NUM));//maxpooling结果存入gpu_mre,分配显存 SAFE_CALL(cudaMalloc((void **) &gpu_mre_index, sizeof(int) * mre_num * KER_NUM));//为maxpooling的最大值索引分配显存 SAFE_CALL(cudaMalloc((void **) &gpu_omega1, sizeof(double) * ome_num1));//第一层网络的输入权重,分配显存 SAFE_CALL(cudaMalloc((void **) &gpu_omega2, sizeof(double) * ome_num2));//输出层的权重,分配显存 SAFE_CALL(cudaMalloc((void **) &gpu_F1, sizeof(double) * NEU_NUM1));//第一层网络的输出,分配显存 SAFE_CALL(cudaMalloc((void **) &gpu_O2, sizeof(double) * NEU_NUM2));//输出层的结果 SAFE_CALL(cudaMalloc((void **) &gpu_bias0, sizeof(double) * KER_NUM));//卷积层偏置值 SAFE_CALL(cudaMalloc((void **) &gpu_bias1, sizeof(double) * NEU_NUM1));//偏置值 SAFE_CALL(cudaMalloc((void **) &gpu_bias2, sizeof(double) * NEU_NUM2)); SAFE_CALL(cudaMemcpy(gpu_omega1, omega1, sizeof(double) * ome_num1, cudaMemcpyHostToDevice));//复制初始权重到GPU端 SAFE_CALL(cudaMemcpy(gpu_omega2, omega2, sizeof(double) * ome_num2, cudaMemcpyHostToDevice)); SAFE_CALL(cudaMemcpy(gpu_bias0, bias0, sizeof(double) * KER_NUM, cudaMemcpyHostToDevice)); SAFE_CALL(cudaMemcpy(gpu_bias1, bias1, sizeof(double) * NEU_NUM1, cudaMemcpyHostToDevice));//复制偏置值到显存 SAFE_CALL(cudaMemcpy(gpu_bias2, bias2, sizeof(double) * NEU_NUM2, cudaMemcpyHostToDevice)); //double * mre = new double [mre_num * KER_NUM];//CPU端存放maxpooling结果 //double * F1 = new double [NEU_NUM1];//CPU端存放第一层网络输出结果 double * O2 = new double [NEU_NUM2];//CPU端存放输出层的结果 //double * lz = new double [NEU_NUM2]; double loss; for(int j=0; j<1001; j++){ if (j % 100 == 0) fprintf(stdout,"The %dth iteration.\n",j); for(int i0=0; i0<train_size; i0++){ int iter = 0; //卷积,每个线程负责一个卷积核和训练数据的卷积 convol<<<1,KER_NUM,(NEIGHBOR+1)*z*sizeof(double)>>>(iter,i0,gpu_processed_train,gpu_kernel,gpu_re,gpu_bias0,3,3,z,re_size); cudaDeviceSynchronize(); //下采样,maxpooling方法,每个线程负责re的一列 maxpooling<<<1,KER_NUM>>>(iter,gpu_re,gpu_mre,gpu_mre_index,re_size,mre_num); cudaDeviceSynchronize(); //全连接层 fullconnect<<<1,NEU_NUM1,mre_size * sizeof(double)>>>(iter,gpu_mre,gpu_omega1,gpu_bias1,gpu_F1,mre_size); cudaDeviceSynchronize(); //输出层 output<<<1,NEU_NUM2>>>(iter,gpu_F1,gpu_omega2,gpu_bias2,gpu_O2); cudaDeviceSynchronize(); //SAFE_CALL(cudaMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, cudaMemcpyDeviceToHost)); //cudaDeviceSynchronize(); //loss = lossfunction(O2, processed_labels, i0); //if(i0%100==0 && j%100==0) fprintf(stdout,"loss:%lf \n",loss); //if(loss < MIN_ERR) // break; //反向传播,输出层 bp_output<<<1,NEU_NUM2>>>(iter,i0,gpu_processed_labels,gpu_O2,gpu_bias2,gpu_delta_La,gpu_delta_Lz); //反向传播,全连接层 bp_fullconnect<<<1,NEU_NUM1>>>(iter,gpu_omega2,gpu_bias1,gpu_F1,gpu_delta_La,gpu_delta_Lz,gpu_delta_fa,gpu_delta_fz); //反向传播,maxpooling层 bp_maxpooling<<<1,mre_size>>>(iter,mre_size,gpu_mre_index,gpu_omega1,gpu_mre,gpu_delta_fa,gpu_delta_fz,gpu_delta_ma,gpu_delta_22); //反向传播,map到卷积层 //bp_map_convol<<<1,mre_size>>>(iter,mre_num,gpu_mre_index,gpu_re,gpu_delta_2,gpu_delta_22); bp_update_kernel<<<1,KER_NUM,(NEIGHBOR+1)*z*sizeof(double)>>>(iter,i0,3,3,z,mre_num,re_size,gpu_mre_index,gpu_delta_22,gpu_processed_train,gpu_kernel,gpu_bias0); cudaDeviceSynchronize(); //SAFE_CALL(cudaMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, cudaMemcpyDeviceToHost)); /*if(i0<10 && j == 0){ fprintf(stdout,"Output:\n "); for(int j=0; j<NEU_NUM2; j++) fprintf(stdout," %lf",O2[j]); fprintf(stdout,"\n"); }*/ } //double loss = lossfunction(O2, processed_labels, i0); //if(j%100 == 0) fprintf(stdout,"loss:%lf \n",loss); //if(loss < MIN_ERR) break; } fprintf(stdout,"Training completed!\n"); end = clock(); tt = double(end - start); fprintf(stdout,"Using time of training:%lfs\n",tt/CLOCKS_PER_SEC); start = clock(); //cudaDeviceSynchronize(); SAFE_CALL(cudaMemcpy(kernel, gpu_kernel, sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM, cudaMemcpyDeviceToHost)); SAFE_CALL(cudaMemcpy(bias0, gpu_bias0, sizeof(double) * KER_NUM, cudaMemcpyDeviceToHost)); SAFE_CALL(cudaMemcpy(bias1, gpu_bias1, sizeof(double) * NEU_NUM1, cudaMemcpyDeviceToHost)); SAFE_CALL(cudaMemcpy(bias2, gpu_bias2, sizeof(double) * NEU_NUM2, cudaMemcpyDeviceToHost)); SAFE_CALL(cudaMemcpy(omega1, gpu_omega1, sizeof(double) * ome_num1, cudaMemcpyDeviceToHost)); SAFE_CALL(cudaMemcpy(omega2, gpu_omega2, sizeof(double) * ome_num2, cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); //fprintf(stdout,"kernel:%lf %lf %lf %lf\n",kernel[0], kernel[1], kernel[2], kernel[3]); //将训练完的参数写入mat文件 /*MATFile * pmatFile; pmatFile = matOpen("model.mat","w"); mxArray * m1 = mxCreateDoubleMatrix((NEIGHBOR+1)*P_NUM,KER_NUM,mxREAL); memcpy((void *)mxGetPr(m1), (void *)kernel, sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM); matPutVariable(pmatFile, "kernel", m1); mxArray * m2 = mxCreateDoubleMatrix(KER_NUM,1,mxREAL); memcpy((void *)mxGetPr(m2), (void *)bias0, sizeof(double) * KER_NUM); matPutVariable(pmatFile, "bias0", m2); mxArray * m3 = mxCreateDoubleMatrix(NEU_NUM1,mre_size,mxREAL); memcpy((void *)mxGetPr(m3), (void *)omega1, sizeof(double) * ome_num1); matPutVariable(pmatFile, "omega1", m3); mxArray * m4 = mxCreateDoubleMatrix(NEU_NUM1,1,mxREAL); memcpy((void *)mxGetPr(m4), (void *)bias1, sizeof(double) * NEU_NUM1); matPutVariable(pmatFile, "bias1", m4); mxArray * m5 = mxCreateDoubleMatrix(NEU_NUM2,NEU_NUM1,mxREAL); memcpy((void *)mxGetPr(m5), (void *)omega2, sizeof(double) * ome_num2); matPutVariable(pmatFile, "omega2", m5); mxArray * m6 = mxCreateDoubleMatrix(NEU_NUM2,1,mxREAL); memcpy((void *)mxGetPr(m6), (void *)bias2, sizeof(double) * NEU_NUM2); matPutVariable(pmatFile, "bias2", m6); matClose(pmatFile);*/ //fprintf(stdout,"mre:%lf %lf %lf\n",mre[0],mre[1],mre[2]); //fprintf(stdout,"mre_index:%d %d %d\n",mre_index[0],mre_index[1],mre_index[2]); //fprintf(stdout,"F1 Output:%lf %lf; %lf %lf\n",F1[0],F1[1],F1[98],F1[99]); //fprintf(stdout,"O2 Output:%lf %lf; %lf %lf\n",O2[0],O2[1],O2[18],O2[19]); //end = clock(); //tt = double(end - start); //fprintf(stdout, "Using time of writeback:%lfs\n",tt/CLOCKS_PER_SEC); //test double right = 0; double count = 0; for (int i1=0; i1<test_size; i1++){ int iter = 0; convol<<<1,KER_NUM,(NEIGHBOR+1)*z*sizeof(double)>>>(iter,i1,gpu_processed_test,gpu_kernel,gpu_re,gpu_bias0,3,3,z,re_size); cudaDeviceSynchronize(); maxpooling<<<1,KER_NUM>>>(iter,gpu_re,gpu_mre,gpu_mre_index,re_size,mre_num); cudaDeviceSynchronize(); fullconnect<<<1,NEU_NUM1,mre_size * sizeof(double)>>>(iter,gpu_mre,gpu_omega1,gpu_bias1,gpu_F1,mre_size); cudaDeviceSynchronize(); output<<<1,NEU_NUM2>>>(iter,gpu_F1,gpu_omega2,gpu_bias2,gpu_O2); cudaDeviceSynchronize(); SAFE_CALL(cudaMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); if(i1<10){ fprintf(stdout,"Output:\n"); for (int i=0;i<NEU_NUM2;i++) fprintf(stdout," %lf",O2[i]); fprintf(stdout,"\n"); } //fprintf(stdout,"\n"); right = count_err(test_labels, O2, i1); count = count + right; } end = clock(); tt = double(end - start); fprintf(stdout,"Using time of test:%lf\n",tt/CLOCKS_PER_SEC); return count/test_size; } //主函数 int main(int argc, char * argv[]) { if(!InitCUDA()){ return 0; } printf("CUDA initialized.\n"); clock_t start,end; double *trainset,*trainlabels; if(argc!=2){ fprintf(stderr, "4 input arguments required!"); } MATFile * datamat = matOpen(argv[1], "r"); mxArray * train = matGetVariable(datamat,"DataSet"); mxArray * labels = matGetVariable(datamat,"labels"); trainset = (double*)mxGetData(train); trainlabels = (double*)mxGetData(labels); const mwSize * dim; dim = mxGetDimensions(train);//获取trainset每维的元素个数 start = clock(); double correct = training(trainset, trainlabels, dim[0], dim[1], dim[2]); end = clock(); fprintf(stdout,"Correct Rate:%lf\n",correct); double usetime = double(end - start); fprintf(stdout, "Using time of the whole procedure:%lfs\n",usetime/CLOCKS_PER_SEC); return 0; }
73c3f3059701de7265a6160a52e5d8785aff8770.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void uniformAdd1(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex) { __shared__ int uni; if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset]; unsigned int address = __mul24(blockIdx.x, blockDim.x) + baseIndex + threadIdx.x; __syncthreads(); // note one add per thread g_data[address] += uni; }
73c3f3059701de7265a6160a52e5d8785aff8770.cu
#include "includes.h" __global__ void uniformAdd1(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex) { __shared__ int uni; if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset]; unsigned int address = __mul24(blockIdx.x, blockDim.x) + baseIndex + threadIdx.x; __syncthreads(); // note one add per thread g_data[address] += uni; }
f119903b1640be64ca5ac8d2bd6a3ce452250d33.hip
// !!! This is a file automatically generated by hipify!!! /************************************************ * Simple CUDA example to transfer data CPU-GPU * ************************************************/ #include <stdio.h> #include <stdlib.h> #define CUDA_SAFE_CALL( call ) { \ hipError_t err = call; \ if( hipSuccess != err ) { \ fprintf(stderr,"CUDA: error occurred in cuda routine. Exiting...\n"); \ exit(err); \ } } #define A(i,j) A[ (j) + ((i)*(n)) ] #define B(i,j) B[ (j) + ((i)*(n)) ] int main( int argc, char *argv[] ) { unsigned int m, n; unsigned int i, j; /* Generating input data */ if( argc<3 ) { printf("Usage: %s rows cols \n",argv[0]); exit(-1); } sscanf(argv[1],"%d",&m); sscanf(argv[2],"%d",&n); /* STEP 1: Allocate memory for three m-by-n matrices called A and B in the host */ float *A, *B, *C; A = (float*) malloc( m*n*sizeof(float) ); C = (float*) malloc( m*n*sizeof(float) ); B = (float*) malloc( m*n*sizeof(float) ); for(int i=0;i<m;i++){ for(int j=0;j<n;j++){ A(i,j) = ( 2.0f * (float) rand() / 1.0f ) - 1.0f; } } /* STEP 2: Fill matrices A and B with real values between -1.0 and 1.0 */ float *d_A, *d_B, *d_C; /* STEP 3: Allocate memory for three m-by-n matrices into the device memory */ CUDA_SAFE_CALL( hipMalloc( (void**) &d_A, m*n*sizeof(float) ) ); CUDA_SAFE_CALL( hipMalloc( (void**) &d_B, m*n*sizeof(float) ) ); CUDA_SAFE_CALL( hipMalloc( (void**) &d_C, m*n*sizeof(float) ) ); /* STEP 4: Copy host memory (only matrices A and B) to the device memory (matrices d_A and d_B) */ CUDA_SAFE_CALL( hipMemcpy(d_A, A, m*n*sizeof(float), hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemcpy(d_B, B, m*n*sizeof(float), hipMemcpyHostToDevice) ); /* STEP 5: Copy back from device memory into the host memory only data cohrresponding to matrix C (d_C) */ CUDA_SAFE_CALL( hipMemcpy(C, d_C, m*n*sizeof(float), hipMemcpyDeviceToHost) ); /* STEP 6: Deallocate device memory */ CUDA_SAFE_CALL( hipFree(d_A) ); CUDA_SAFE_CALL( hipFree(d_B) ); CUDA_SAFE_CALL( hipFree(d_C) ); /* STEP 7: Deallocate host memory */ free(A); free(B); free(C); }
f119903b1640be64ca5ac8d2bd6a3ce452250d33.cu
/************************************************ * Simple CUDA example to transfer data CPU-GPU * ************************************************/ #include <stdio.h> #include <stdlib.h> #define CUDA_SAFE_CALL( call ) { \ cudaError_t err = call; \ if( cudaSuccess != err ) { \ fprintf(stderr,"CUDA: error occurred in cuda routine. Exiting...\n"); \ exit(err); \ } } #define A(i,j) A[ (j) + ((i)*(n)) ] #define B(i,j) B[ (j) + ((i)*(n)) ] int main( int argc, char *argv[] ) { unsigned int m, n; unsigned int i, j; /* Generating input data */ if( argc<3 ) { printf("Usage: %s rows cols \n",argv[0]); exit(-1); } sscanf(argv[1],"%d",&m); sscanf(argv[2],"%d",&n); /* STEP 1: Allocate memory for three m-by-n matrices called A and B in the host */ float *A, *B, *C; A = (float*) malloc( m*n*sizeof(float) ); C = (float*) malloc( m*n*sizeof(float) ); B = (float*) malloc( m*n*sizeof(float) ); for(int i=0;i<m;i++){ for(int j=0;j<n;j++){ A(i,j) = ( 2.0f * (float) rand() / 1.0f ) - 1.0f; } } /* STEP 2: Fill matrices A and B with real values between -1.0 and 1.0 */ float *d_A, *d_B, *d_C; /* STEP 3: Allocate memory for three m-by-n matrices into the device memory */ CUDA_SAFE_CALL( cudaMalloc( (void**) &d_A, m*n*sizeof(float) ) ); CUDA_SAFE_CALL( cudaMalloc( (void**) &d_B, m*n*sizeof(float) ) ); CUDA_SAFE_CALL( cudaMalloc( (void**) &d_C, m*n*sizeof(float) ) ); /* STEP 4: Copy host memory (only matrices A and B) to the device memory (matrices d_A and d_B) */ CUDA_SAFE_CALL( cudaMemcpy(d_A, A, m*n*sizeof(float), cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL( cudaMemcpy(d_B, B, m*n*sizeof(float), cudaMemcpyHostToDevice) ); /* STEP 5: Copy back from device memory into the host memory only data cohrresponding to matrix C (d_C) */ CUDA_SAFE_CALL( cudaMemcpy(C, d_C, m*n*sizeof(float), cudaMemcpyDeviceToHost) ); /* STEP 6: Deallocate device memory */ CUDA_SAFE_CALL( cudaFree(d_A) ); CUDA_SAFE_CALL( cudaFree(d_B) ); CUDA_SAFE_CALL( cudaFree(d_C) ); /* STEP 7: Deallocate host memory */ free(A); free(B); free(C); }
a8c26cc6ea86f06affd0fb694468a75f87136d8c.hip
// !!! This is a file automatically generated by hipify!!! // // mcar.cu // mcar // // Created by Dwight Bell on 11/20/10. // Copyright dbelll 2010. All rights reserved. // #include <hip/hip_runtime.h> #include "cutil.h" #include <math.h> #include <assert.h> #include "main.h" #include "mcar.h" #include "cuda_utils.h" #include "cuda_rand.cu" #include "misc_utils.h" #include "reduction.h" #include "gpu_results.h" // parameters stored in global structure for CPU static PARAMS _p; // Initial global seeds used to ensure identical random variables each run on all machines static unsigned g_multiseeds[16*4] = { 2784565659u, 1491908209u, 3415062841u, 3293636241u, \ 1714636915u, 1681692777u, 846930886u, 1804289383u, \ 1649760492u, 719885386u, 424238335u, 1957747793u, \ 1350490027u, 1025202362u, 1189641421u, 596516649u, \ 1967513926u, 2044897763u, 1102520059u, 783368690u, \ 1303455736u, 304089172u, 1540383426u, 1365180540u, \ 1726956429u, 294702567u, 521595368u, 35005211u, \ 233665123u, 278722862u, 861021530u, 336465782u, \ 1801979802u, 1101513929u, 468703135u, 2145174067u, \ 1125898167u, 1369133069u, 635723058u, 1315634022u, \ 1656478042u, 628175011u, 2089018456u, 1059961393u, \ 1914544919u, 859484421u, 1653377373u, 1131176229u, \ 1973594324u, 1734575198u, 756898537u, 608413784u, \ 184803526u, 1129566413u, 2038664370u, 149798315u, \ 749241873u, 1911759956u, 1424268980u, 412776091u, \ 1827336327u, 1937477084u, 2084420925u, 511702305u } ; static unsigned *g_seeds = g_multiseeds; static float accel[NUM_ACTIONS] = {-ACCEL_FACTOR, 0.0f, ACCEL_FACTOR}; void set_seed(unsigned seed){ g_seeds = g_multiseeds + seed*4; printf("seeds are %u %u %u %u\n", g_seeds[0], g_seeds[1], g_seeds[2], g_seeds[3]); } #pragma mark GPU constant memory __constant__ float dc_accel[NUM_ACTIONS]; //__constant__ PARAMS dc_p; __constant__ AGENT_DATA dc_ag; __constant__ unsigned dc_agents; __constant__ float dc_epsilon; __constant__ float dc_gamma; __constant__ float dc_lambda; __constant__ float dc_alpha; __constant__ unsigned dc_test_reps; __constant__ unsigned dc_test_max; __constant__ unsigned dc_restart_interval; //__constant__ float dc_share_best_pct; __constant__ float dc_copy_alpha_multiplier; // fixed pointers are stored in constant memory on the device //__constant__ unsigned *dc_seeds; //__constant__ float *dc_theta; //__constant__ float *dc_W; //__constant__ float *dc_s; //__constant__ unsigned *dc_action; //__constant__ float *fitness; const char * string_for_action(unsigned a) { return (a == 0) ? "LEFT" : ((a == 1) ? "NONE" : "RIGHT"); } #pragma mark - #pragma mark CPU & GPU DUAL_PREFIX float sigmoid(float in) { return 1.0f/(1.0f + expf(-in)); } DUAL_PREFIX unsigned iActionStart(unsigned a, unsigned stride, unsigned num_hidden) { unsigned i = (a * ((STATE_SIZE + 2) * num_hidden + 1)) * stride; return i; } //#define iActionStart(a, stride, num_hidden) (((a)*((STATE_SIZE+2)*NUM_HIDDEN + 1))*(stride)) // calculate the offset from the start of the weights for the bias weight for hidden node j DUAL_PREFIX unsigned offsetToHiddenBias(unsigned j, unsigned stride, unsigned num_hidden) { unsigned i = j*(1 + STATE_SIZE) * stride; return i; } // calculate the index for the bias weight for the output node DUAL_PREFIX unsigned offsetToOutputBias(unsigned stride, unsigned num_hidden) { unsigned i = num_hidden*(1 + STATE_SIZE) * stride; return i; } // Calculate the output of the neural net for specified state and action. // Hidden node activation values are stored in activation array and the output Q value is returned. DUAL_PREFIX float calc_Q(float *s, unsigned a, float *theta, unsigned stride, unsigned num_hidden, float *activation) { // adjust theta to point to beginning of this action's weights theta += iActionStart(a, stride, num_hidden); unsigned iOutputBias = offsetToOutputBias(stride, num_hidden); float result = 0.0f; // loop over each hidden node for (int j = 0; j < num_hidden; j++) { // iBias is the index into theta for the bias weight for the hidden node j unsigned iBias = offsetToHiddenBias(j, stride, num_hidden); // first calculate contribution of the bias for this hidden node float in = theta[iBias] * -1.0f; // next add in the contributions for the state input nodes for (int k = 0; k < STATE_SIZE; k++) { in += theta[iBias + (1+k) * stride] * s[k * stride]; } activation[j * stride] = sigmoid(in); result += theta[iOutputBias + (1+j) * stride] * activation[j*stride]; #ifdef DEBUG_CALC_Q printf("calc_Q for state (%9.4f, %9.4f) and action %d ... ", s[0], s[stride], a); // printf("input to hidden node %d is %9.4f and activation is %9.4f\n", j, in, activation[j*stride]); #endif } result += theta[iOutputBias] * -1.0f; #ifdef DEBUG_CALC_Q printf("output activation is %9.4f\n", result); #endif return result; } // different strides for state and theta // state has stride of LEARN_BLOCK_SIZE // theta stride is specified by argument DUAL_PREFIX float calc_Q2(float *s, unsigned a, float *theta, unsigned stride_theta, unsigned num_hidden, float *activation) { // adjust theta to point to beginning of this action's weights theta += iActionStart(a, stride_theta, num_hidden); float result = 0.0f; unsigned iOutputBias = offsetToOutputBias(stride_theta, num_hidden); // loop over each hidden node for (int j = 0; j < num_hidden; j++) { // iBias is the index into theta for the bias weight for the hidden node j unsigned iBias = offsetToHiddenBias(j, stride_theta, num_hidden); // first calculate contribution of the bias for this hidden node float in = theta[iBias] * -1.0f; // next add in the contributions for the state input nodes for (int k = 0; k < STATE_SIZE; k++) { in += theta[iBias + (1+k) * stride_theta] * s[k * LEARN_BLOCK_SIZE]; } // apply sigmoid and accumulate in the result in = sigmoid(in); if (activation) activation[j*stride_theta] = in; result += theta[iOutputBias + (1+j) * stride_theta] * in; #ifdef DEBUG_CALC_Q printf("calc_Q for state (%9.4f, %9.4f) and action %d ... ", s[0], s[LEARN_BLOCK_SIZE], a); // printf("input to hidden node %d is %9.4f and activation is %9.4f\n", j, in, activation[j*stride]); #endif } // add in the output bias contribution result += theta[iOutputBias] * -1.0f; #ifdef DEBUG_CALC_Q printf("output activation is %9.4f\n", result); #endif return result; } // state array has stride of stride_s // theta array has stride of 1 DUAL_PREFIX float calc_Q3(float *s, unsigned a, float *theta, unsigned num_hidden, float *activation, unsigned stride_s) { // adjust theta to point to beginning of this action's weights theta += iActionStart(a, 1, num_hidden); float result = 0.0f; unsigned iOutputBias = offsetToOutputBias(1, num_hidden); // loop over each hidden node for (int j = 0; j < num_hidden; j++) { // iBias is the index into theta for the bias weight for the hidden node j unsigned iBias = offsetToHiddenBias(j, 1, num_hidden); // first calculate contribution of the bias for this hidden node float in = theta[iBias] * -1.0f; // next add in the contributions for the state input nodes #pragma unroll 2 for (int k = 0; k < STATE_SIZE; k++) { in += theta[iBias + (1+k)] * s[k*stride_s]; } // apply sigmoid and accumulate in the result in = sigmoid(in); if (activation) activation[j] = in; result += theta[iOutputBias + (1+j)] * in; #ifdef DEBUG_CALC_Q printf("calc_Q for state (%9.4f, %9.4f) and action %d ... ", s[0], s[stride_s], a); // printf("input to hidden node %d is %9.4f and activation is %9.4f\n", j, in, activation[j*stride]); #endif } // add in the output bias contribution result += theta[iOutputBias] * -1.0f; #ifdef DEBUG_CALC_Q printf("output activation is %9.4f\n", result); #endif return result; } DUAL_PREFIX void reset_gradient(float *W, unsigned stride, unsigned num_wgts) { for (int i = 0; i < num_wgts; i++) { W[i * stride] = 0.0f; } } DUAL_PREFIX void accumulate_gradient(float *s, unsigned a, float *theta, unsigned stride, unsigned num_hidden, unsigned num_wgts, float *activation, float *W, float lambda, float gamma) { // First, decay all the existing gradients by lambda * gamma #ifdef DEBUG_GRADIENT_CALC printf("all gradients after decay:\n"); #endif for (int i = 0; i < num_wgts; i++) { W[i*stride] *= lambda * gamma; #ifdef DEBUG_GRADIENT_CALC printf(" %9.6f\n", W[i*stride]); #endif } // Next, need to add in the new gradient for the specified action. // adjust W & theta to point to this action's weights unsigned offset = iActionStart(a, stride, num_hidden); // printf("[accumulate_gradient] offset is %d for action %d\n", offset, a); theta += offset; W += offset; #ifdef DEBUG_GRADIENT_CALC printf("updating gradients for action %d\n", a); #endif // for gradients to output node, the gradient equals the activation of the hidden layer node (or bias) // first update the gradient for bias -> output unsigned iOutBias = offsetToOutputBias(stride, num_hidden); W[iOutBias] += -1.0f; #ifdef DEBUG_GRADIENT_CALC printf("[accumulate_gradient] iOutBias is %d\n", iOutBias); printf("output bias changed by %9.6f and is now %9.6f\n", -1.0f, W[iOutBias]); #endif // next update the gradients with respect to weights from hidden to output for (int j = 0; j < num_hidden; j++) { // printf("[accumulate_gradient] iOutBias is %d\n", iOutBias); W[iOutBias + (1+j)*stride] += activation[j * stride]; #ifdef DEBUG_GRADIENT_CALC printf("[accumulate_gradient] hidden node %d is at %d\n", j, iOutBias + (1+j)*stride); printf("hidden%d to output changed by %9.6f and is now %9.6f\n", j, activation[j*stride], W[iOutBias + (1+j)*stride]); #endif } // update the gradients with respect to the weights from input to hidden for (int j = 0; j < num_hidden; j++) { // first the bias weight unsigned iHidBias = offsetToHiddenBias(j, stride, num_hidden); // gradient of output i wrt wgt from input k to hidden j equals // grad(in_j wrt wgt_kj) * grad(activation_j wrt in_j) * grad(output activation wrt activation_j) = // activation_k * activation_j * (1-activation_j) * wgt_ji // The last two terms are only a function of j (and there is only one output node), so // calculate grad to be the last two terms float grad = activation[j*stride] * (1-activation[j*stride]) * theta[iOutBias + (1+j)*stride]; // total gradient is the activation of the input node times grad // The updated value includes eligibility trace of prior gradient W[iHidBias] += -1.0f * grad; #ifdef DEBUG_GRADIENT_CALC printf("[accumulate_gradient] iHidBias is %d\n", iHidBias); printf("bias to hidden%d changed by %9.6f and is now %9.6f\n", j, -1.0f*grad, W[iHidBias]); #endif // next the states for (int k = 0; k < STATE_SIZE; k++) { W[iHidBias + (k+1)*stride] += s[k * stride] * grad; #ifdef DEBUG_GRADIENT_CALC printf("[accumulate_gradient] state %d is at %d\n", k, iHidBias + (k+1)*stride); printf("state%d to hidden%d changed by %9.6f and is now %9.6f\n", k, j, s[k*stride]*grad, W[iHidBias + (k+1)*stride]); #endif } } } DUAL_PREFIX void accumulate_gradient2(float *s, unsigned a, float *theta, float *activation, float *W) { // First, decay all the existing gradients by lambda * gamma for (int i = 0; i < NUM_WGTS; i++) { W[i*LEARN_BLOCK_SIZE] *= dc_lambda * dc_gamma; } // Next, need to add in the new gradient for the specified action. // adjust W & theta to point to this action's weights unsigned offset = iActionStart(a, LEARN_BLOCK_SIZE, NUM_HIDDEN); // theta += offset; // W += offset; theta += offset; W += offset; // for gradients to output node, the gradient equals the activation of the hidden layer node (or bias) // first update the gradient for bias -> output unsigned iOutBias = offsetToOutputBias(LEARN_BLOCK_SIZE, NUM_HIDDEN); W[iOutBias] += -1.0f; // next update the gradients with respect to weights from hidden to output for (int j = 0; j < NUM_HIDDEN; j++) { W[iOutBias + (1+j)*LEARN_BLOCK_SIZE] += activation[j * LEARN_BLOCK_SIZE]; } // update the gradients with respect to the weights from input to hidden for (int j = 0; j < NUM_HIDDEN; j++) { // first the bias weight unsigned iHidBias = offsetToHiddenBias(j, LEARN_BLOCK_SIZE, NUM_HIDDEN); // gradient of output i wrt wgt from input k to hidden j equals // grad(in_j wrt wgt_kj) * grad(activation_j wrt in_j) * grad(output activation wrt activation_j) = // activation_k * activation_j * (1-activation_j) * wgt_ji // The last two terms are only a function of j (and there is only one output node), so // calculate grad to be the last two terms float grad = activation[j*LEARN_BLOCK_SIZE] * (1-activation[j*LEARN_BLOCK_SIZE]) * theta[iOutBias + (1+j)*LEARN_BLOCK_SIZE]; // total gradient is the activation of the input node times grad // The updated value includes eligibility trace of prior gradient W[iHidBias] += -1.0f * grad; // next the states for (int k = 0; k < STATE_SIZE; k++) { W[iHidBias + (k+1)*LEARN_BLOCK_SIZE] += s[k * LEARN_BLOCK_SIZE] * grad; } } } // Update the weights in the neural net (theta's) using back-propagation of the output error // Current activation for the hidden layer is pre-calculated in activation DUAL_PREFIX void update_thetas(float *s, float *theta0, float *W0, float alpha, float error, unsigned stride, unsigned num_hidden, float *activation) { // Repeat for all actions for (int a = 0; a < NUM_ACTIONS; a++) { // adjust theta and W to point to start of weights/gradients for this action unsigned offset = iActionStart(a, stride, num_hidden); float *theta = theta0 + offset; float *W = W0 + offset; // First the bias // wgt_j_i += alpha * error * W_ji unsigned iOutBias = offsetToOutputBias(stride, num_hidden); theta[iOutBias] += alpha * error * W[iOutBias]; // if (isnan(theta[iOutBias])){ // printf("theta ISNAN !! added error of %9.6f with alpha of %9.6f\n", error, alpha); // } #ifdef DEBUG_THETA_UPDATE printf("\nupdate_thetas for error of %9.7f\n", error); printf("output bias: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", alpha, error, W[iOutBias], theta[iOutBias]); #endif // next update each weight from hidden nodes to output node for (int j = 0; j < num_hidden; j++) { // wgt_j_i += alpha * error * W_ji theta[iOutBias + (1+j) * stride] += alpha * error * W[iOutBias + (1+j)*stride]; #ifdef DEBUG_THETA_UPDATE printf("hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", j, alpha, error, W[iOutBias + (1+j)*stride], theta[iOutBias + (1+j)*stride]); #endif } // update weights from input layer to hidden layer for each node in hidden layer for (int j = 0; j < num_hidden; j++) { // first update the bias weight // wgt_k_j = alpha * error * W_k_j unsigned iHidBias = offsetToHiddenBias(j, stride, num_hidden); theta[iHidBias] += alpha * error * W[iHidBias]; #ifdef DEBUG_THETA_UPDATE printf("bias -> hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", j, alpha, error, W[iHidBias], theta[iHidBias]); #endif // update the weights from the state nodes for (int k = 0; k < STATE_SIZE; k++) { // wgt_k_j = alpha * error * W_k_j theta[iHidBias + (k+1) * stride] += alpha * error * W[iHidBias + (k+1)*stride]; #ifdef DEBUG_THETA_UPDATE printf("state%d -> hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", k, j, alpha, error, W[iHidBias + (k+1)*stride], theta[iHidBias + (k+1)*stride]); #endif } } } } // theta and activation have stride of LEARN_BLOCK_SIZE, W has stride based on value passed in (# agents) DUAL_PREFIX void update_thetas2(float *theta0, float *W0, float alpha, float error, float *activation) { // Repeat for all actions for (int a = 0; a < NUM_ACTIONS; a++) { // adjust theta and W to point to start of weights/gradients for this action float *theta = theta0 + iActionStart(a, LEARN_BLOCK_SIZE, NUM_HIDDEN); float *W = W0 + iActionStart(a, LEARN_BLOCK_SIZE, NUM_HIDDEN); // First the bias // wgt_j_i += alpha * error * W_ji unsigned iOutBias = offsetToOutputBias(LEARN_BLOCK_SIZE, NUM_HIDDEN); theta[iOutBias] += alpha * error * W[iOutBias]; // if (isnan(theta[iOutBias])){ // printf("theta ISNAN !! added error of %9.6f with alpha of %9.6f\n", error, alpha); // } #ifdef DEBUG_THETA_UPDATE printf("\nupdate_thetas for error of %9.7f\n", error); printf("output bias: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", alpha, error, W[iOutBias], theta[iOutBias]); #endif // next update each weight from hidden nodes to output node for (int j = 0; j < NUM_HIDDEN; j++) { // wgt_j_i += alpha * error * W_ji theta[iOutBias + (1+j) * LEARN_BLOCK_SIZE] += alpha * error * W[iOutBias + (1+j)*LEARN_BLOCK_SIZE]; #ifdef DEBUG_THETA_UPDATE printf("hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", j, alpha, error, W[iOutBias + (1+j)*LEARN_BLOCK_SIZE], theta[iOutBias + (1+j)*LEARN_BLOCK_SIZE]); #endif } // update weights from input layer to hidden layer for each node in hidden layer for (int j = 0; j < NUM_HIDDEN; j++) { // first update the bias weight // wgt_k_j = alpha * error * W_k_j unsigned iHidBias = offsetToHiddenBias(j, LEARN_BLOCK_SIZE, NUM_HIDDEN); theta[iHidBias] += alpha * error * W[iHidBias]; #ifdef DEBUG_THETA_UPDATE printf("bias -> hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", j, alpha, error, W[iHidBias], theta[iHidBias]); #endif // update the weights from the state nodes for (int k = 0; k < STATE_SIZE; k++) { // wgt_k_j = alpha * error * W_k_j theta[iHidBias + (k+1) * LEARN_BLOCK_SIZE] += alpha * error * W[iHidBias + (k+1)*LEARN_BLOCK_SIZE]; #ifdef DEBUG_THETA_UPDATE printf("state%d -> hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", k, j, alpha, error, W[iHidBias + (k+1)*LEARN_BLOCK_SIZE], theta[iHidBias + (k+1)*LEARN_BLOCK_SIZE]); #endif } } } } //DUAL_PREFIX void update_thetas2(float *s, float *theta0, float *W0, float alpha, float error, unsigned stride_s, unsigned stride_g, unsigned num_hidden, float *activation) //{ // // Repeat for all actions // for (int a = 0; a < NUM_ACTIONS; a++) { // // adjust theta and W to point to start of weights/gradients for this action // unsigned offset = iActionStart(a, stride_g, num_hidden); // float *theta = theta0 + offset; // float *W = W0 + offset; // // // First the bias // unsigned iOutBias = offsetToOutputBias(stride_g, num_hidden); // theta[iOutBias] += alpha * error * W[iOutBias]; // // // next update each weight from hidden nodes to output node // for (int j = 0; j < num_hidden; j++) { // // wgt_j_i += alpha * error * W_ji // theta[iOutBias + (1+j) * stride_g] += alpha * error * W[iOutBias + (1+j)*stride_g]; // } // // // update weights from input layer to hidden layer for each node in hidden layer // for (int j = 0; j < num_hidden; j++) { // // first update the bias weight // // wgt_k_j = alpha * error * W_k_j // unsigned iHidBias = offsetToHiddenBias(j, stride_g, num_hidden); // theta[iHidBias] += alpha * error * W[iHidBias]; // // // update the weights from the state nodes // for (int k = 0; k < STATE_SIZE; k++) { // // wgt_k_j = alpha * error * W_k_j // theta[iHidBias + (k+1) * stride_g] += alpha * error * W[iHidBias + (k+1)*stride_g]; // } // } // } //} // Calculate the Q value for each action from the given state, returning the best Q value // and storing the action in *pAction DUAL_PREFIX float best_action(float *s, unsigned *pAction, float *theta, unsigned stride, unsigned num_hidden, float *activation) { // calculate Q value for each action unsigned best_action = 0; float bestQ = calc_Q(s, 0, theta, stride, num_hidden, activation); for (int k = 1; k < NUM_ACTIONS; k++) { float tempQ = calc_Q(s, k, theta, stride, num_hidden, activation); if (tempQ > bestQ) { bestQ = tempQ; best_action = k; } } *pAction = best_action; return bestQ; } DUAL_PREFIX float best_action2(float *s, unsigned *pAction, float *theta, unsigned stride_g, unsigned num_hidden, float *activation) { // calculate Q value for each action unsigned best_action = 0; float bestQ = calc_Q2(s, 0, theta, stride_g, num_hidden, activation); for (int k = 1; k < NUM_ACTIONS; k++) { float tempQ = calc_Q2(s, k, theta, stride_g, num_hidden, activation); if (tempQ > bestQ) { bestQ = tempQ; best_action = k; } } *pAction = best_action; return bestQ; } // theta has stride of 1 and state has stride of stride_s DUAL_PREFIX float best_action3(float *s, unsigned *pAction, float *theta, unsigned num_hidden, float *activation, unsigned stride_s) { // calculate Q value for each action unsigned best_action = 0; float bestQ = calc_Q3(s, 0, theta, num_hidden, activation, stride_s); for (int k = 1; k < NUM_ACTIONS; k++) { float tempQ = calc_Q3(s, k, theta, num_hidden, activation, stride_s); if (tempQ > bestQ) { bestQ = tempQ; best_action = k; } } *pAction = best_action; return bestQ; } // choose action from current state, return the Q value for the chosen action // and store the action in *pAction DUAL_PREFIX float choose_action(float *s, unsigned *pAction, float *theta, float epsilon, unsigned stride, unsigned num_hidden, float *activation, unsigned *seeds) { if (epsilon > 0.0f && RandUniform(seeds, stride) < epsilon){ // choose random action float r = RandUniform(seeds, stride); *pAction = (unsigned)(r * NUM_ACTIONS); return calc_Q(s, *pAction, theta, stride, num_hidden, activation); }else{ // choose the best action return best_action(s, pAction, theta, stride, num_hidden, activation); } } DUAL_PREFIX float choose_action2(float *s, unsigned *pAction, float *theta, float *activation, unsigned *seeds) { unsigned stride_s = LEARN_BLOCK_SIZE; if (dc_epsilon > 0.0f && RandUniform(seeds, stride_s) < dc_epsilon){ // choose random action float r = RandUniform(seeds, stride_s); *pAction = (unsigned)(r * NUM_ACTIONS); return calc_Q2(s, *pAction, theta, LEARN_BLOCK_SIZE, NUM_HIDDEN, activation); }else{ // choose the best action return best_action2(s, pAction, theta, LEARN_BLOCK_SIZE, NUM_HIDDEN, activation); } } //__device__ float choose_action3(unsigned idx, unsigned *s_u, float *s_f) //{ // if (dc_epsilon > 0.0f && RandUniform(s_u+idx, LEARN_BLOCK_SIZE) < dc_epsilon){ // // choose random action // float r = RandUniform(s_u+idx, LEARN_BLOCK_SIZE); // *pAction = (unsigned)(r * NUM_ACTIONS); // return calc_Q4(idx, s_u, s_f); // }else{ // // choose the best action // return best_action3(idx, s_u, s_f); // } //} //DUAL_PREFIX float choose_action2(float *s, unsigned *pAction, float *theta, float epsilon, unsigned stride_g, unsigned num_hidden, float *activation, unsigned *seeds) //{ // if (epsilon > 0.0f && RandUniform(seeds, LEARN_BLOCK_SIZE) < epsilon){ // // choose random action // float r = RandUniform(seeds, LEARN_BLOCK_SIZE); // *pAction = r * NUM_ACTIONS; // return calc_Q2(s, *pAction, theta, stride_g, num_hidden, activation); // }else{ // // choose the best action // return best_action2(s, pAction, theta, stride_g, num_hidden, activation); // } //} DUAL_PREFIX unsigned terminal_state(float *s) { return s[0] >= MAX_X; } // take an action from the current state, s, returning the reward and saving new state in s_prime // Note, s & s_prime may be the same location. DUAL_PREFIX float take_action(float *s, unsigned a, float *s_prime, unsigned stride, float *accel) { // Forumlation of mountain car problem is from Sutton & Barto, // "Reinforcement Learning, An Introduction" #ifdef DEBUG_CPU printf("take_action %s from state (%9.4f, %9.4f)\n", string_for_action(a), s[0], s[stride]); #endif // normal reward is -1.0f per time step float reward = -1.0f; // update velocity and limit it to within bounds s_prime[stride] = s[stride] + accel[a] + GRAVITY_FACTOR * cosf(GRAVITY_X_SCALE * s[0]); #ifdef DEBUG_CPU printf("accel is %9.6f from force and %9.6f from gravity resulting in new velocity of %9.6f\n", accel[a], GRAVITY_FACTOR * cosf(GRAVITY_X_SCALE * s[0]), s_prime[stride]); #endif if (s_prime[stride] < MIN_VEL) s_prime[stride] = MIN_VEL; if (s_prime[stride] > MAX_VEL) s_prime[stride] = MAX_VEL; // update position and test for success and limit with minimum bound s_prime[0] = s[0] + s_prime[stride]; if (s_prime[0] >= MAX_X) reward = 0.0f; if (s_prime[0] <= MIN_X) { s_prime[0] = MIN_X; s_prime[stride] = 0.0f;} #ifdef DEBUG_CPU printf("new state is (%9.6f, %9.6f) and reward is %9.6f\n", s_prime[0], s_prime[stride], reward); #endif return reward; } // random number in the specified range DUAL_PREFIX float rand_in_range(unsigned *seeds, unsigned stride, float min, float max) { float r = min + (max-min)*RandUniform(seeds, stride); return r; } // randomize the position and velocity uniformly over their range DUAL_PREFIX void randomize_state(float *s, unsigned *seeds, unsigned stride) { s[0] = rand_in_range(seeds, stride, MIN_X, MAX_X); s[stride] = rand_in_range(seeds, stride, MIN_VEL, MAX_VEL); // s[0] = MIN_X + (MAX_X-MIN_X)*RandUniform(seeds, stride); // s[stride] = MIN_VEL + (MAX_VEL-MIN_VEL)*RandUniform(seeds, stride); } //DUAL_PREFIX void randomize_state2(float *s, unsigned *seeds, unsigned stride_s, unsigned stride_g) //{ // s[0] = rand_in_range(seeds, stride_g, MIN_X, MAX_X); // s[stride_s] = rand_in_range(seeds, stride_g, MIN_VEL, MAX_VEL); //} //__device__ void randomize_stateGPU(unsigned ag) //{ // dc_ag.s[ag] = rand_in_range(dc_ag.seeds + ag, dc_p.stride, MIN_X, MAX_X); // dc_ag.s[ag + dc_p.stride] = rand_in_range(dc_ag.seeds + ag, dc_p.stride, MIN_VEL, MAX_VEL); //} // void randomize_all_states(AGENT_DATA *ag) { // randomize state for all agents, deterine first action and set activation values for hidden for (int agent = 0; agent < _p.agents; agent++) { randomize_state(ag->s + agent, ag->seeds + agent, _p.agents); reset_gradient(ag->W + agent, _p.agents, NUM_WGTS); // printf("randomize_state, state is now (%9.6f, %9.6f)\n", ag->s[agent], ag->s[agent + _p.agents]); choose_action(ag->s + agent, ag->action + agent, ag->theta + agent, _p.epsilon, _p.agents, NUM_HIDDEN, ag->activation + agent, ag->seeds + agent); // force activation values to be recalculated for the chosen action // printf("chosen action will be %s\n", string_for_action(ag->action[agent])); calc_Q(ag->s + agent, ag->action[agent], ag->theta + agent, _p.agents, NUM_HIDDEN, ag->activation + agent); // update_trace(... } } #pragma mark - #pragma mark CPU void set_params(PARAMS p){ _p = p; // fill in some calculated values in the parameters _p.iActionStart[0] = iActionStart(0, p.agents, NUM_HIDDEN); _p.iActionStart[1] = iActionStart(1, p.agents, NUM_HIDDEN); _p.iActionStart[2] = iActionStart(2, p.agents, NUM_HIDDEN); // printf("iActionStart values %d, %d, %d\n", _p.iActionStart[0], _p.iActionStart[1], _p.iActionStart[2]); _p.offsetToOutputBias = offsetToOutputBias(p.agents, NUM_HIDDEN); // printf("_p.agents = %d, _p.hidden_nodes = %d\n", _p.agents, _p.hidden_nodes); } // dump agent data to stdout // uses parameter values in _p // (hard-coded to 2 dimensional state) void dump_agent(AGENT_DATA *ag, unsigned agent, unsigned crude) { printf("[agent %d]: ", agent); printf(" seeds = %u, %u, %u, %u\n", ag->seeds[agent], ag->seeds[agent + _p.agents], ag->seeds[agent + 2*_p.agents], ag->seeds[agent + 3*_p.agents]); printf(" FROM TO THETA W \n"); unsigned i = agent; for (int a = 0; a < NUM_ACTIONS; a++) { for (int h = 0; h < NUM_HIDDEN; h++) { printf("[%6s] bias --> hidden%2d %9.6f %9.6f\n", string_for_action(a), h, ag->theta[i], ag->W[i]); i += _p.agents; printf(" x --> hidden%2d %9.6f %9.6f\n", h, ag->theta[i], ag->W[i]); i += _p.agents; printf(" x' --> hidden%2d %9.6f %9.6f\n", h, ag->theta[i], ag->W[i]); i += _p.agents; } printf( " bias --> output %9.6f %9.6f\n", ag->theta[i], ag->W[i]); i += _p.agents; for (int h = 0; h < NUM_HIDDEN; h++) { printf(" hidden%2d --> output %9.6f %9.6f\n", h, ag->theta[i], ag->W[i]); i += _p.agents; } } printf("fitness = %5.3f alpha = %7.4f\n", ag->fitness[agent]/(crude ? CRUDE_NUM_TOT_DIV : NUM_TOT_DIV), ag->alpha[agent]); printf("\nCurrent State: x = %9.6f x' = %9.6f, stored action is %s\n", ag->s[agent], ag->s[agent + _p.agents], string_for_action(ag->action[agent])); printf(" HIDDEN NODE ACTIVATION\n"); for (int j = 0; j < NUM_HIDDEN; j++) { printf("[%6s] %3d %9.6f\n", string_for_action(ag->action[agent]), j, ag->activation[agent + j * _p.agents]); } printf("\n"); } void dump_agent_pointers(const char *str, AGENT_DATA *ag) { printf("\n===================================================\n%s\n", str); printf("---------------------------------------------------\n", str); printf(" seeds: %p\n", ag->seeds); printf(" theta: %p\n", ag->theta); printf(" W: %p\n", ag->W); printf(" state: %p\n", ag->s); printf("activation: %p\n", ag->activation); printf(" action: %p\n", ag->action); printf(" fitness: %p\n", ag->fitness); printf("====================================================\n\n", str); } // print message and dump all agent data void dump_agents(const char *str, AGENT_DATA *ag, unsigned crude) { printf("\n===================================================\n%s\n", str); printf("---------------------------------------------------\n", str); for (int agent = 0; agent < _p.agents; agent++) { dump_agent(ag, agent, crude); } printf("====================================================\n\n", str); } void dump_one_agent(const char *str, AGENT_DATA *ag, unsigned crude) { printf("%s\n", str); dump_agent(ag, 0, crude); } RESULTS *initialize_results() { RESULTS *r = (RESULTS *)malloc(sizeof(RESULTS)); r->avg_fitness = (float *)malloc(_p.num_tests * sizeof(float)); r->best_fitness = (float *)malloc(_p.num_tests * sizeof(float)); r->best_agent = (unsigned *)malloc(_p.num_tests * sizeof(unsigned)); return r; } void free_results(RESULTS *r) { if (r){ if (r->avg_fitness){ free(r->avg_fitness); r->avg_fitness = NULL;} if (r->best_fitness){ free(r->best_fitness); r->best_fitness = NULL;} if (r->best_agent){ free(r->best_agent); r->best_agent = NULL;} free(r); } } void display_results(const char *str, RESULTS *r) { printf("%s \n", str); printf(" TEST Avg Steps\n"); for (int i = 0; i < _p.num_tests; i++) { printf(" [%10d]%8.0f, %8.0f, %8d\n", i*_p.test_interval, r->avg_fitness[i], r->best_fitness[i], r->best_agent[i]); } } // generate random seeds for the sepecified number of agents unsigned *create_seeds(unsigned num_agents) { #ifdef VERBOSE printf("create_seeds for %d agents\n", num_agents); #endif unsigned *seeds = (unsigned *)malloc(num_agents * SEEDS_PER_AGENT * sizeof(unsigned)); for (int i = 0; i < num_agents * SEEDS_PER_AGENT; i++) { seeds[i] = RandUniformui(g_seeds, 1); } return seeds; } // create wgts set initially to random values between theta_min and theta_max float *create_theta(unsigned num_agents, unsigned num_wgts, float theta_min, float theta_max) { #ifdef VERBOSE printf("create_theta for %d agents and %d weights in range %9.7f to %9.7f\n", num_agents, num_wgts, theta_min, theta_max); #endif float *theta = (float *)malloc(num_agents * num_wgts * sizeof(float)); for (int i = 0; i < num_agents * num_wgts; i++) { theta[i] = rand_in_range(g_seeds, 1, theta_min, theta_max); } return theta; } // create gradient trace set initially to 0.0f float *create_W(unsigned num_agents, unsigned num_wgts) { #ifdef VERBOSE printf("create_W for %d agents and %d weights\n", num_agents, num_wgts); #endif float *W = (float *)malloc(num_agents * num_wgts * sizeof(float)); for (int i = 0; i < num_agents * num_wgts; i++) W[i] = 0.0f; return W; } // create initial random states float *create_states(unsigned num_agents, unsigned state_size, unsigned *seeds) { #ifdef VERBOSE printf("create_states for %d agents and state size of %d\n", num_agents, state_size); #endif float *states = (float *)malloc(num_agents * state_size * sizeof(float)); for (int i = 0; i < num_agents * state_size; i++) states[i] = 0.0f; return states; } unsigned *create_actions(unsigned num_agents, unsigned num_actions) { #ifdef VERBOSE printf("create_actions for %d agents\n", num_agents); #endif unsigned *actions = (unsigned *)malloc(num_agents * num_actions * sizeof(unsigned)); for (int i = 0; i < num_agents * num_actions; i++) actions[i] = num_actions; // not valid value return actions; } float *create_fitness(unsigned num_agents) { float *fitness = (float *)malloc(num_agents * sizeof(float)); for (int i = 0; i < num_agents; i++) fitness[i] = MAX_FITNESS; return fitness; } float *create_alpha(unsigned num_agents) { float *alpha = (float *)malloc(num_agents * sizeof(float)); for (int i = 0; i < num_agents; i++) alpha[i] = _p.alpha; return alpha; } float *create_activation(unsigned num_agents, unsigned num_hidden) { #ifdef VERBOSE printf("create_activation for %d agents wiht %d hidden nodes\n", num_agents, num_hidden); #endif float *activation = (float *)malloc(num_agents * (num_hidden) * sizeof(float)); for (int i = 0; i < num_agents * num_hidden; i++) activation[i] = 0.0f; return activation; } // initialize agents on CPU, including the initial randomization of state and choice of first action AGENT_DATA *initialize_agentsCPU() { #ifdef VERBOSE printf("initializing agents on CPU...\n"); #endif AGENT_DATA *ag = (AGENT_DATA *)malloc(sizeof(AGENT_DATA)); ag->seeds = create_seeds(_p.agents); ag->theta = create_theta(_p.agents, NUM_WGTS, _p.initial_theta_min, _p.initial_theta_max); ag->W = create_W(_p.agents, NUM_WGTS); ag->s = create_states(_p.agents, STATE_SIZE, ag->seeds); ag->action = create_actions(_p.agents, NUM_ACTIONS); ag->activation = create_activation(_p.agents, NUM_HIDDEN); ag->fitness = create_fitness(_p.agents); ag->alpha = create_alpha(_p.agents); randomize_all_states(ag); return ag; } void free_agentsCPU(AGENT_DATA *ag) { #ifdef VERBOSE printf("freeing agents on CPU...\n"); #endif if (ag) { if (ag->seeds){ free(ag->seeds); ag->seeds = NULL;} if (ag->theta){ free(ag->theta); ag->theta = NULL;} if (ag->W){ free(ag->W); ag->W = NULL;} if (ag->s){ free(ag->s); ag->s = NULL;} if (ag->action){ free(ag->action); ag->action = NULL;} if (ag->activation){ free(ag->activation); ag->activation = NULL;} if (ag->fitness){ free(ag->fitness); ag->fitness = NULL;} if (ag->alpha) {free(ag->alpha); ag->alpha = NULL;} free(ag); } } /* On entry, the agent data has the current state and chosen action based on current weights. */ void learning_session(AGENT_DATA *ag) { // for each time step for (int t = 0; t < _p.chunk_interval; t++) { #ifdef VERBOSE printf("\n*****************************************\n"); printf( "************ TIME STEP %d ****************\n", t); printf( "*****************************************\n"); #endif // for each agent for (int agent = 0; agent < _p.agents; agent++) { #ifdef DEBUG_CPU printf("[[ AGENT %d ]]\n", agent); #endif // Calculate Q_curr based on current state and action // Activation values will be stored for use in updating the gradient float Q_curr = calc_Q(ag->s + agent, ag->action[agent], ag->theta + agent, _p.agents, NUM_HIDDEN, ag->activation + agent); #ifdef DEBUG_CPU printf("Q_curr is %9.6f based on state (%9.6f, %9.6f) and action %s\n", Q_curr, ag->s[agent], ag->s[agent + _p.agents], string_for_action(ag->action[agent])); #endif //accumulate_gradient uses current activations and weights to update the gradient array, W accumulate_gradient(ag->s + agent, ag->action[agent], ag->theta + agent, _p.agents, NUM_HIDDEN, NUM_WGTS, ag->activation + agent, ag->W + agent, _p.lambda, _p.gamma); //#ifdef DUMP_AGENT_UPDATES // dump_agents("after accumulate_gradient", ag); //#endif // take_action will calculate the new state based on the current state and current action, // storing the new state in the agent, returning the reward float reward = take_action(ag->s + agent, ag->action[agent], ag->s + agent, _p.agents, accel); #ifdef DUMP_STATES printf("[AGENT%3d] x = %9.6f x' = %9.6f after action = %s\n", agent, ag->s[agent], ag->s[agent + _p.agents], string_for_action(ag->action[agent])); #endif unsigned success = terminal_state(ag->s + agent); if (success){ // printf("success for ageent %d at time step %d\n", agent, t); randomize_state(ag->s + agent, ag->seeds + agent, _p.agents); } // choose the next action, storing it in the agent and returning the Q_next value float Q_next = choose_action(ag->s + agent, ag->action + agent, ag->theta + agent, _p.epsilon, _p.agents, NUM_HIDDEN, ag->activation + agent, ag->seeds + agent); #ifdef DEBUG_CPU printf("Q_next is %12.6f based on state (%9.6f, %9.6f) and action %s\n", Q_next, ag->s[agent], ag->s[agent + _p.agents], string_for_action(ag->action[agent])); #endif float error = reward + _p.gamma*Q_next - Q_curr; // printf("reward + _p.gamma*Q_next = %9.6f, (Q_next is %9.6f), Q_curr = %9.6f, so error is %9.6f\n", reward + _p.gamma*Q_next, Q_next, Q_curr, error); #ifdef DEBUG_CPU printf("error is %12.6f\n", error); #endif update_thetas(ag->s + agent, ag->theta + agent, ag->W + agent, _p.alpha, error, _p.agents, NUM_HIDDEN, ag->activation + agent); //#ifdef DUMP_AGENT_UPDATES // dump_agents("after update_thetas", ag); //#endif if (success) reset_gradient(ag->W + agent, _p.agents, NUM_WGTS); } #ifdef DUMP_AGENT_UPDATES printf("***** end of time step %d *****\n", t); dump_agents("after update_thetas", ag); #endif // update_stored_Q(ag->Q + agent, ag->s + agent, ag->theta + agent, _p.agents, STATE_SIZE, NUM_ACTIONS, NUM_HIDDEN, ag->activation + agent); // update_trace(... } } // copy theta valuels from agent iFrom and over-write agent iTo void copy_theta(AGENT_DATA *ag, unsigned iFrom, unsigned iTo, unsigned num_wgts, unsigned stride) { for (int i = 0; i < num_wgts; i++) { ag->theta[iTo + i * stride] = ag->theta[iFrom + i*stride]; } } // share is where the best agents will be selected and duplicated //void share(AGENT_DATA *ag, float share_best_pct, unsigned agent_group_size, unsigned num_agents, unsigned num_wgts) //{ // printf("share...\n"); // for (int group = 0; group < num_agents / agent_group_size; group++) { // unsigned iGroup = group * agent_group_size; // // determine the best agent in this group // unsigned iBest = 0; // float best_fitness = ag->fitness[iGroup]; // for (int a = 1; a < agent_group_size; a++) { // if (ag->fitness[iGroup + a] < best_fitness) { // best_fitness = ag->fitness[iGroup + a]; // iBest = a; // } // } // // printf("agent %d is the best in group %d\n", iGroup + iBest, group); // // // now copy the best agents to the others with probability share_best_pct // for (int a = 0; a < agent_group_size; a++) { // if (a == iBest) continue; // float r = RandUniform(ag->seeds + iGroup + a, num_agents); // if (r < share_best_pct) { // printf("copy weights from agent %d to agent %d\n", iGroup + iBest, iGroup + a); // copy_theta(ag, iBest, iGroup + a, num_wgts, num_agents); // } // } // } //} // test the agents and store the results in the iTest entry in the RESULTS arrays void run_test(AGENT_DATA *ag, unsigned iTest) { float total_steps = 0.0f; float best_fitness = MAX_FITNESS; float save_s[STATE_SIZE]; unsigned save_action; //**TODO** may not need to be saved unsigned save_seeds[4]; static float *junk_activation = NULL; if(!junk_activation) junk_activation = (float *)malloc(NUM_HIDDEN * sizeof(float)); // test all agents and average the result for (int agent = 0; agent < _p.agents; agent++) { #ifdef TRACE_TEST printf("Testing agent %d...\n", agent); #endif // save agent state prior to testing save_s[0] = ag->s[agent]; save_s[1] = ag->s[agent + _p.agents]; save_action = ag->action[agent]; save_seeds[0] = ag->seeds[agent]; save_seeds[1] = ag->seeds[agent + _p.agents]; save_seeds[2] = ag->seeds[agent + 2*_p.agents]; save_seeds[3] = ag->seeds[agent + 3*_p.agents]; float agent_steps = 0.0f; for (int rep = 0; rep < _p.test_reps; rep++) { ag->seeds[agent] = save_seeds[0] + rep; ag->seeds[agent + _p.agents] = save_seeds[1] + rep; ag->seeds[agent + 2*_p.agents] = save_seeds[2] + rep; ag->seeds[agent + 3*_p.agents] = save_seeds[3] + rep; randomize_state(ag->s + agent, ag->seeds + agent, _p.agents); int t; unsigned action; for (t = 0; t < _p.test_max; t++) { best_action(ag->s + agent, &action, ag->theta + agent, _p.agents, NUM_HIDDEN, junk_activation); #ifdef TRACE_TEST printf("[test%4d] state = (%9.6f, %9.6f) action will be %s\n", t, ag->s[agent], ag->s[agent + _p.agents], string_for_action(action)); #endif take_action(ag->s + agent, action, ag->s + agent, _p.agents, accel); if (terminal_state(ag->s + agent)) { #ifdef TRACE_TEST printf("Done at step %d!!!\n", t); #endif break; } } #ifdef TRACE_TEST if (t == _p.test_reps) printf("failure\n"); #endif agent_steps += t; } ag->fitness[agent] = agent_steps / _p.test_reps; if (ag->fitness[agent] < best_fitness){ best_fitness = ag->fitness[agent]; // best_agent = agent; } total_steps += agent_steps; //restore state and action ag->s[agent] = save_s[0]; ag->s[agent + _p.agents] = save_s[1]; ag->action[agent] = save_action; ag->seeds[agent] = save_seeds[0]; ag->seeds[agent + _p.agents] = save_seeds[1]; ag->seeds[agent + 2*_p.agents] = save_seeds[2]; ag->seeds[agent + 3*_p.agents] = save_seeds[3]; } #ifdef DUMP_TESTED_AGENTS printf("Testing %d\n", iTest); dump_agents("after testing", ag); #endif // r->avg_fitness[iTest] = total_steps / float(_p.agents) / float(_p.test_reps); // r->best_fitness[iTest] = best_fitness; // r->best_agent[iTest] = best_agent; } void run_CPU(AGENT_DATA *ag) { #ifdef VERBOSE printf("\n==============================================\nrunning on CPU...\n"); #endif // dump_agents("run_CPU entry", ag); unsigned timer; CREATE_TIMER(&timer); START_TIMER(timer); timing_feedback_header(_p.num_chunks); for (int i = 0; i < _p.num_chunks; i++) { timing_feedback_dot(i); if ((i > 0) && 0 == (i % _p.chunks_per_restart)){ // printf("randomize all states...\n"); randomize_all_states(ag); #ifdef DUMP_AGENTS_AFTER_RESTART dump_agents("after restart", ag); #endif } if (i == 0) { #ifdef DUMP_INITIAL_AGENTS dump_agents("Initial agents on CPU, prior to learning session", ag); #endif // run_test(ag, r, i); } learning_session(ag); // dump_agents("after learning session", ag); if (0 == ((i+1) % _p.chunks_per_test)) run_test(ag, (i+1)/_p.chunks_per_test); // dump_agents("after testing", ag); // if ((_p.agent_group_size > 1) && 0 == ((i+1) % _p.chunks_per_share)) { // share(ag, _p.share_best_pct, _p.agent_group_size, _p.agents, _p.num_wgts); // } } printf("\n"); STOP_TIMER(timer, "run on CPU"); //#ifdef DUMP_FINAL_AGENTS // dump_agents("Final agents on CPU", ag); //#endif } #pragma mark - #pragma mark GPU // copy agents from device back to host AGENT_DATA *copy_GPU_agents(AGENT_DATA *agGPU) { // printf("copy_GPU_agents\n"); AGENT_DATA *agCopy = (AGENT_DATA *)malloc(sizeof(AGENT_DATA)); // dump_agent_pointers("agGPU", agGPU); // printf(" %d seeds from %p\n", _p.agents * 4, agGPU->seeds); agCopy->seeds = host_copyui(agGPU->seeds, _p.agents * 4); agCopy->theta = host_copyf(agGPU->theta, _p.agents * NUM_WGTS); agCopy->W = host_copyf(agGPU->W, _p.agents * NUM_WGTS); agCopy->s = host_copyf(agGPU->s, _p.agents * STATE_SIZE); agCopy->activation = host_copyf(agGPU->activation, _p.agents * NUM_HIDDEN); agCopy->action = host_copyui(agGPU->action, _p.agents); agCopy->fitness = host_copyf(agGPU->fitness, _p.agents); agCopy->alpha = host_copyf(agGPU->alpha, _p.agents); return agCopy; } // dump the agents from the GPU by first copying to CPU and then dumping the CPU copy void dump_agentsGPU(const char *str, AGENT_DATA *agGPU, unsigned crude) { AGENT_DATA *agCopy = copy_GPU_agents(agGPU); dump_agents(str, agCopy, crude); free(agCopy); } void dump_one_agentGPU(const char *str, AGENT_DATA *agGPU, unsigned ag, unsigned crude) { AGENT_DATA *agCopy = copy_GPU_agents(agGPU); printf("%s\n", str); dump_agent(agCopy, ag, crude); free(agCopy); } // Copy the provided CPU agent data to the GPU, storing device pointers in a new AGENT_DATA structure // Also copy the AGENT_DATA and PARAMS structures to constant memory on the device AGENT_DATA *initialize_agentsGPU(AGENT_DATA *agCPU) { #ifdef VERBOSE printf("\n==============================================\nrunning on GPU...\n"); #endif #ifdef VERBOSE printf("initializing agents on GPU...\n"); #endif AGENT_DATA *ag = (AGENT_DATA *)malloc(sizeof(AGENT_DATA)); ag->seeds = device_copyui(agCPU->seeds, _p.agents * 4); ag->theta = device_copyf(agCPU->theta, _p.agents * NUM_WGTS); ag->W = device_copyf(agCPU->W, _p.agents * NUM_WGTS); ag->s = device_copyf(agCPU->s, _p.agents * STATE_SIZE); ag->activation = device_copyf(agCPU->activation, _p.agents * NUM_HIDDEN); ag->action = device_copyui(agCPU->action, _p.agents); ag->fitness = device_copyf(agCPU->fitness, _p.agents); ag->alpha = device_copyf(agCPU->alpha, _p.agents); ag->alphaOn = device_alloc_filledui(_p.agents, 1); // hipMemcpyToSymbol("dc_p", &_p, sizeof(PARAMS)); hipMemcpyToSymbol("dc_agents", &_p.agents, sizeof(unsigned)); hipMemcpyToSymbol("dc_epsilon", &_p.epsilon, sizeof(float)); hipMemcpyToSymbol("dc_gamma", &_p.gamma, sizeof(float)); hipMemcpyToSymbol("dc_lambda", &_p.lambda, sizeof(float)); hipMemcpyToSymbol("dc_alpha", &_p.alpha, sizeof(float)); hipMemcpyToSymbol("dc_test_reps", &_p.test_reps, sizeof(unsigned)); hipMemcpyToSymbol("dc_test_max", &_p.test_max, sizeof(unsigned)); hipMemcpyToSymbol("dc_restart_interval", &_p.restart_interval, sizeof(unsigned)); // hipMemcpyToSymbol("dc_share_best_pct", &_p.share_best_pct, sizeof(unsigned)); hipMemcpyToSymbol("dc_copy_alpha_multiplier", &_p.copy_alpha_multiplier, sizeof(unsigned)); hipMemcpyToSymbol("dc_ag", ag, sizeof(AGENT_DATA)); hipMemcpyToSymbol("dc_accel", accel, 3 * sizeof(float)); // dump_agent_pointers("agent copied to GPU", ag); return ag; } // Free the deivce memory pointed to by elements of AGENT_DATA ag, then free ag void free_agentsGPU(AGENT_DATA *ag) { #ifdef VERBOSE printf("freeing agents on GPU...\n"); #endif if (ag) { if (ag->seeds){ deviceFree(ag->seeds); ag->seeds = NULL;} if (ag->theta){ deviceFree(ag->theta); ag->theta = NULL;} if (ag->W){ deviceFree(ag->W); ag->W = NULL;} if (ag->s){ deviceFree(ag->s); ag->s = NULL;} if (ag->activation){ deviceFree(ag->activation); ag->activation = NULL;} if (ag->action){ deviceFree(ag->action); ag->action = NULL;} if (ag->fitness){ deviceFree(ag->fitness); ag->fitness = NULL;} if (ag->alpha){ deviceFree(ag->alpha); ag->alpha = NULL;} free(ag); } } //__global__ void kernel_randomize_all_states() //{ //} __global__ void reset_gradient_kernel() { unsigned iGlobal = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; if (iGlobal < dc_agents * NUM_WGTS) dc_ag.W[iGlobal] = 0.0f; } // take the number of wins for each agent and convert to a fitness value and store in results // thread number if the number for the opponent // wins has the number of wins for each agent with stride against each other agent (dc_agents x dc_agents) // results and array of size dc_agents where the results for this test go with stride 1 //__global__ void update_fitness_kernel1(float *wins, float *results) //{ // unsigned idx = threadIdx.x; // // //} /* Run a competition between agents, storing wins - losses in the d_wins array on the device. threads per block = the number of competitions between each agent pair = TEST_REPS blocks are in a square grid with the number of agents per group on each side Agents compete for a maximum of TEST_MAX steps or until one of them reaches finish line. The score is stored in the results array in the row for agent a1, column for agent a2. */ __global__ void test_kernel3(float *d_wins) { unsigned ag1 = blockIdx.x; unsigned ag2 = blockIdx.y; if (ag1 == ag2){ d_wins[ag1*dc_agents + ag2] = 0; return;} // the entire block may exit here unsigned idx = threadIdx.x; // seeds, state, and # of wins must be kept separate for each competition between the two agents __shared__ unsigned s_seeds[4*TEST3_BLOCK_SIZE]; __shared__ float s_s1[2*TEST3_BLOCK_SIZE]; __shared__ float s_s2[2*TEST3_BLOCK_SIZE]; __shared__ float s_wins[TEST3_BLOCK_SIZE]; // only one copy of thetas is needed for each agent, since it is constant __shared__ float s_theta1[NUM_WGTS]; __shared__ float s_theta2[NUM_WGTS]; // copy seeds from ag1 to seeds[0] and [2] and from ag2 to seeds[1] and seeds[3] // adding in the idx value so each competition has different seeds // s_wins will have +1 for ag1 wins and -1 for ag2 wins and 0 for ties s_seeds[idx] = dc_ag.seeds[ag1] + idx; s_seeds[idx + TEST3_BLOCK_SIZE] = dc_ag.seeds[ag2 + dc_agents] + idx; s_seeds[idx + 2*TEST3_BLOCK_SIZE] = dc_ag.seeds[ag1 + 2*dc_agents] + idx; s_seeds[idx + 3*TEST3_BLOCK_SIZE] = dc_ag.seeds[ag2 + 3*dc_agents] + idx; s_wins[idx] = 0.0f; // this is the number of wins for ag1 minus wins for ag2 // copy thetas for each agent to shared memory // This is a loop because the number of threads my be less than the block size for (int iOffset = 0; iOffset < NUM_WGTS; iOffset += blockDim.x) { if (idx + iOffset < NUM_WGTS){ s_theta1[idx + iOffset] = dc_ag.theta[ag1 + (idx + iOffset) * dc_agents]; s_theta2[idx + iOffset] = dc_ag.theta[ag2 + (idx + iOffset) * dc_agents]; } }; __syncthreads(); // randomize the state for ag1 and copy the same state for ag2 randomize_state(s_s1 + idx, s_seeds + idx, TEST3_BLOCK_SIZE); s_s2[idx] = s_s1[idx]; s_s2[idx + TEST3_BLOCK_SIZE] = s_s1[idx + TEST3_BLOCK_SIZE]; unsigned action1, action2; if (idx < dc_test_reps) { int done1 = 0; int done2 = 0; int t; for (t = 0; t < dc_test_max; t++) { if (!done1) { best_action3(s_s1 + idx, &action1, s_theta1, NUM_HIDDEN, NULL, TEST3_BLOCK_SIZE); // best_action2(s_s1 + idx, &action1, dc_ag.theta + ag1, dc_agents, NUM_HIDDEN, NULL); take_action(s_s1 + idx, action1, s_s1 + idx, TEST3_BLOCK_SIZE, dc_accel); if (terminal_state(s_s1 + idx)) { done1 = t+1; } } if (!done2) { best_action3(s_s2 + idx, &action2, s_theta2, NUM_HIDDEN, NULL, TEST3_BLOCK_SIZE); // best_action2(s_s2 + idx, &action2, dc_ag.theta + ag2, dc_agents, dc_p.hidden_nodes, NULL); take_action(s_s2 + idx, action2, s_s2 + idx, TEST3_BLOCK_SIZE, dc_accel); if (terminal_state(s_s2 + idx)) done2 = 1 + t; } if (done1 || done2) break; // stop when either agent is done } if (!done1) done1 = t + 2; if (!done2) done2 = t + 2; if (done1 < done2) s_wins[idx] += 1.0f; if (done1 > done2) s_wins[idx] += -1.0f; } __syncthreads(); // do a reduction on the results unsigned half = TEST3_BLOCK_SIZE / 2; while (half > 0) { if (idx < half && idx + half < dc_test_reps) { s_wins[idx] += s_wins[idx + half]; } half /= 2; __syncthreads(); } // copy the wins to global memory if (idx == 0) { d_wins[ag1 * dc_agents + ag2] = s_wins[0] / dc_test_reps; } } __global__ void learn_kernel(unsigned steps) { unsigned idx = threadIdx.x; unsigned iGlobal = threadIdx.x + blockIdx.x * blockDim.x; if (iGlobal >= dc_agents) return; __shared__ unsigned s_seeds[5*LEARN_BLOCK_SIZE]; __shared__ float s_s[34*LEARN_BLOCK_SIZE]; unsigned *s_action = s_seeds + 4*LEARN_BLOCK_SIZE; float *s_alpha = s_s + 2*LEARN_BLOCK_SIZE; float *s_theta = s_s + 3*LEARN_BLOCK_SIZE; float *s_W = s_s + 18*LEARN_BLOCK_SIZE; float *s_activation = s_s + 33*LEARN_BLOCK_SIZE; // __shared__ unsigned s_seeds[4*LEARN_BLOCK_SIZE]; // __shared__ unsigned s_action[LEARN_BLOCK_SIZE]; // __shared__ float s_s[2*LEARN_BLOCK_SIZE]; // __shared__ float s_alpha[LEARN_BLOCK_SIZE]; // __shared__ float s_theta[15*LEARN_BLOCK_SIZE]; // __shared__ float s_W[15*LEARN_BLOCK_SIZE]; // __shared__ float s_activation[LEARN_BLOCK_SIZE]; // copy state, action, and seeds to shared memory s_s[idx] = dc_ag.s[iGlobal]; s_s[idx + LEARN_BLOCK_SIZE] = dc_ag.s[iGlobal + dc_agents]; s_action[idx] = dc_ag.action[iGlobal]; // unsigned iG = iGlobal; // unsigned ii = idx; unsigned *sSeeds = s_seeds + idx; unsigned *gSeeds = dc_ag.seeds + iGlobal; // s_seeds[idx] = dc_ag.seeds[iGlobal]; // s_seeds[idx + LEARN_BLOCK_SIZE] = dc_ag.seeds[iGlobal + dc_agents]; // s_seeds[idx + 2*LEARN_BLOCK_SIZE] = dc_ag.seeds[iGlobal + 2*dc_agents]; // s_seeds[idx + 3*LEARN_BLOCK_SIZE] = dc_ag.seeds[iGlobal + 3*dc_agents]; // s_seeds[ii] = dc_ag.seeds[iG]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // s_seeds[ii] = dc_ag.seeds[iG]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // s_seeds[ii] = dc_ag.seeds[iG]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // s_seeds[ii] = dc_ag.seeds[iG]; sSeeds[0] = gSeeds[0]; sSeeds+= LEARN_BLOCK_SIZE; gSeeds += dc_agents; sSeeds[0] = gSeeds[0]; sSeeds+= LEARN_BLOCK_SIZE; gSeeds += dc_agents; sSeeds[0] = gSeeds[0]; sSeeds+= LEARN_BLOCK_SIZE; gSeeds += dc_agents; sSeeds[0] = gSeeds[0]; s_alpha[idx] = dc_ag.alpha[iGlobal];// * dc_ag.alphaOn[iGlobal]; // copy weights and gradients from global memory to shared memory for (int i = 0, j=0; i < NUM_WGTS*LEARN_BLOCK_SIZE; i +=LEARN_BLOCK_SIZE, j += dc_agents) { s_theta[idx + i] = dc_ag.theta[iGlobal + j]; s_W[idx + i] = dc_ag.W[iGlobal + j]; } s_activation[idx] = dc_ag.activation[iGlobal]; unsigned restart_counter = 0; for (int t = 0; t < steps; t++, restart_counter--) { // reset state at restart intervals if (0 == restart_counter) { randomize_state(s_s + idx, s_seeds + idx, LEARN_BLOCK_SIZE); choose_action2(s_s + idx, s_action + idx, s_theta + idx, s_activation + idx, s_seeds + idx); calc_Q2(s_s + idx, s_action[idx], s_theta + idx, LEARN_BLOCK_SIZE, NUM_HIDDEN, s_activation + idx); reset_gradient(s_W + idx, LEARN_BLOCK_SIZE, NUM_WGTS); restart_counter = dc_restart_interval; } float Q_curr = calc_Q2(s_s + idx, s_action[idx], s_theta + idx, LEARN_BLOCK_SIZE, NUM_HIDDEN, s_activation + idx); accumulate_gradient2(s_s + idx, s_action[idx], s_theta + idx, s_activation + idx, s_W + idx); float reward = take_action(s_s + idx, s_action[idx], s_s + idx, LEARN_BLOCK_SIZE, dc_accel); unsigned success = terminal_state(s_s + idx); if (success) randomize_state(s_s + idx, s_seeds + idx, LEARN_BLOCK_SIZE); float Q_next = choose_action2(s_s + idx, s_action + idx, s_theta + idx, s_activation + idx, s_seeds + idx); float error = reward + dc_gamma * Q_next - Q_curr; update_thetas2(s_theta + idx, s_W + idx, s_alpha[idx], error, s_activation + idx); if (success) reset_gradient(s_W + idx, LEARN_BLOCK_SIZE, NUM_WGTS); } // copy state, action and seeds back to global memory dc_ag.action[iGlobal] = s_action[idx]; // iG = iGlobal; // ii = idx; // dc_ag.seeds[iG] = s_seeds[ii]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // dc_ag.seeds[iG] = s_seeds[ii]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // dc_ag.seeds[iG] = s_seeds[ii]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // dc_ag.seeds[iG] = s_seeds[ii]; sSeeds = s_seeds + idx; gSeeds = dc_ag.seeds + iGlobal; gSeeds[0] = sSeeds[0]; gSeeds += dc_agents; sSeeds += LEARN_BLOCK_SIZE; gSeeds[0] = sSeeds[0]; gSeeds += dc_agents; sSeeds += LEARN_BLOCK_SIZE; gSeeds[0] = sSeeds[0]; gSeeds += dc_agents; sSeeds += LEARN_BLOCK_SIZE; gSeeds[0] = sSeeds[0]; dc_ag.s[iGlobal] = s_s[idx]; dc_ag.s[iGlobal + dc_agents] = s_s[idx + LEARN_BLOCK_SIZE]; // copy weights and gradients from global memory to shared memory for (int i = 0, j=0; i < NUM_WGTS*LEARN_BLOCK_SIZE; i +=LEARN_BLOCK_SIZE, j += dc_agents) { dc_ag.theta[iGlobal + j] = s_theta[idx + i]; dc_ag.W[iGlobal + j] = s_W[idx + i]; } dc_ag.activation[iGlobal] = s_activation[idx]; } // total x dimension is the agent number __global__ void share_best_kernel(float *d_agent_scores, float threshold, unsigned iBest, unsigned higherIsBetter, float share_pct) { unsigned idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= dc_agents) return; // // if this is the best agent, set it's alpha to 0.0f to preserve // // otherwise reset the alpha // if (idx == iBest) dc_ag.alphaOn[idx] = 0; // else dc_ag.alphaOn[idx] = 1; if (idx == iBest) dc_ag.alpha[idx] = 0.0f; else dc_ag.alpha[idx] = dc_alpha; // do nothing if agent has a better score than the threshold // if (d_agent_scores[idx] >= 0.0f) return; if (higherIsBetter && (d_agent_scores[idx] >= threshold)) return; if (!higherIsBetter && (d_agent_scores[idx] <= threshold)) return; // with a probability share_best_pct, copy best agents weights to this agent float r = RandUniform(dc_ag.seeds+idx, dc_agents); if (r < share_pct) { for (int i = 0; i < NUM_WGTS; i++) { dc_ag.theta[idx + i * dc_agents] = dc_ag.theta[iBest + i * dc_agents]; } dc_ag.alpha[idx] = dc_alpha * dc_copy_alpha_multiplier; } } /* x-dimension represents all the possible starting states number of threads is CALC_QUALITY_BLOCK_SIZE (which must be greater than NUM_WGTS) iBest is the agent to be tested maxSteps is the maximum number of time steps before giving up d_steps is where the results are stored for each of the possible starting states */ __global__ void calc_quality_kernel(unsigned iBest, unsigned maxSteps, float *d_steps) { unsigned idx = threadIdx.x; unsigned iGlobal = idx + blockIdx.x * blockDim.x; __shared__ float s_theta[NUM_WGTS]; __shared__ float s_s[2 * CALC_QUALITY_BLOCK_SIZE]; // set up values in shared memory... // ... agent weights // unsigned v_div = iGlobal / NUM_X_DIV; float x_div_num = 0.5f + (float)(iGlobal % NUM_X_DIV); float vel_div_num = 0.5f + (float)(iGlobal / NUM_X_DIV); // float x_div_num = 0.25 + 0.5f*RandUniform(dc_ag.seeds+iBest, dc_agents) + (float)(iGlobal % NUM_X_DIV); // float y_div_num = 0.25 + 0.5f*RandUniform(dc_ag.seeds+iBest, dc_agents) + (float)(iGlobal / NUM_X_DIV); if (idx < NUM_WGTS) s_theta[idx] = dc_ag.theta[iBest + idx * dc_agents]; // ... state based on thread and block indexes **TODO this can be modified to have larger blocks of threads if (idx < CALC_QUALITY_BLOCK_SIZE) { s_s[idx] = MIN_X + x_div_num * DIV_X; s_s[idx + CALC_QUALITY_BLOCK_SIZE] = MIN_VEL + vel_div_num * DIV_VEL; } __syncthreads(); unsigned t; unsigned action; for (t = 0; t < maxSteps; t++) { best_action3(s_s+idx, &action, s_theta, NUM_HIDDEN, NULL, CALC_QUALITY_BLOCK_SIZE); take_action(s_s+idx, action, s_s+idx, CALC_QUALITY_BLOCK_SIZE, dc_accel); if (terminal_state(s_s+idx)) break; } d_steps[iGlobal] = (float)(1+t); } /* similar to calc_quality_kernel, but does the calculations for all the agents the x-dimension represents all the possible starting states number of threads is CALC_QUALITY_BLOCK_SIZE (must be greater than NUM_WGTS) the agent number is in blockIdx.y */ __global__ void calc_all_quality_kernel(unsigned maxSteps, float *d_steps) { unsigned idx = threadIdx.x; unsigned iGlobal = threadIdx.x + blockIdx.x * blockDim.x; unsigned ag = blockIdx.y; __shared__ float s_theta[NUM_WGTS]; // __shared__ float s_s[2*CRUDE_NUM_TOT_DIV]; __shared__ float s_s[2*CALC_QUALITY_BLOCK_SIZE]; // setup values in shared memory... // ... first agent weights float x_div_num = 0.5f + (float)(iGlobal % CRUDE_NUM_X_DIV); float vel_div_num = 0.5f + (float)(iGlobal / CRUDE_NUM_X_DIV); if (idx < NUM_WGTS) s_theta[idx] = dc_ag.theta[ag + idx * dc_agents]; // ... then the state based on the x-dimension // if (idx < CRUDE_NUM_TOT_DIV) { if (idx < CALC_QUALITY_BLOCK_SIZE) { s_s[idx] = MIN_X + x_div_num * CRUDE_DIV_X; // s_s[idx + CRUDE_NUM_TOT_DIV] = MIN_VEL + vel_div_num * CRUDE_DIV_VEL; s_s[idx + CALC_QUALITY_BLOCK_SIZE] = MIN_VEL + vel_div_num * CRUDE_DIV_VEL; } __syncthreads(); if (iGlobal >= CRUDE_NUM_TOT_DIV) return; unsigned t; unsigned action; for (t = 0; t < maxSteps; t++) { // best_action3(s_s+idx, &action, s_theta, NUM_HIDDEN, NULL, CRUDE_NUM_TOT_DIV); // take_action(s_s+idx, action, s_s+idx, CRUDE_NUM_TOT_DIV, dc_accel); best_action3(s_s+idx, &action, s_theta, NUM_HIDDEN, NULL, CALC_QUALITY_BLOCK_SIZE); take_action(s_s+idx, action, s_s+idx, CALC_QUALITY_BLOCK_SIZE, dc_accel); if (terminal_state(s_s+idx)) break; } d_steps[ag * CRUDE_NUM_TOT_DIV + iGlobal] = (float)(1+t); // d_steps[ag * CRUDE_NUM_TOT_DIV + iGlobal] = s_s[idx + CRUDE_NUM_TOT_DIV]; // d_steps[ag * CRUDE_NUM_TOT_DIV + iGlobal] = s_s[idx]; } // raw fitness value is the total steps summed over the test starting positions __global__ void copy_fitness_to_agent_kernel(float *d_steps) { unsigned iGlobal = threadIdx.x + blockIdx.x * blockDim.x; if (iGlobal >= dc_agents) return; dc_ag.fitness[iGlobal] = d_steps[iGlobal * CRUDE_NUM_TOT_DIV]; } /* calculate the average quality of an agent by running it for specific starting positions spanning the state space The value returned is the sum of the steps for */ float calc_agent_quality(AGENT_DATA *agGPU, unsigned iBest, float *d_steps, unsigned max_steps) { // // calculate the number of values for x and velocity // unsigned num_x = 1.5f + (MAX_X - MIN_X) / DIV_X; // unsigned num_vel = 1.5f + (MAX_VEL - MIN_VEL) / DIV_VEL; // unsigned num_tot = num_x * num_vel; #ifdef VERBOSE printf("calc_agent_quality for best agent #%d\n using %d x values, %d veloicty values, total of %d values\n", iBest, NUM_X_DIV, NUM_VEL_DIV, NUM_TOT_DIV); #endif // *** TODO increase block dimension to at least 32, calculate the x and velocity values in the kernel, // instead of just using the thread and block indexes // dim3 blockDim(NUM_X_DIV); // dim3 gridDim(NUM_VEL_DIV); dim3 blockDim(CALC_QUALITY_BLOCK_SIZE); dim3 gridDim(1 + (NUM_TOT_DIV-1)/CALC_QUALITY_BLOCK_SIZE); // allocate a location to store the number of steps for every trial // float *d_steps = device_allocf(num_tot); // printf("launching calc_quality_kernel with blocks of (%d x %d) and grid of (%d x %d)\n", blockDim.x, blockDim.y, gridDim.x, gridDim.y); PRE_KERNEL2("calc_quality_kernel", blockDim, gridDim); hipLaunchKernelGGL(( calc_quality_kernel), dim3(gridDim), dim3(blockDim), 0, 0, iBest, max_steps, d_steps); POST_KERNEL("calc_quality_kernel"); #ifdef VERBOSE device_dumpf("steps for each x, velocity value", d_steps, NUM_VEL_DIV, NUM_X_DIV); #endif row_reduce(d_steps, NUM_TOT_DIV, 1); float quality; CUDA_SAFE_CALL(hipMemcpy(&quality, d_steps, sizeof(float), hipMemcpyDeviceToHost)); // CUDA_SAFE_CALL(hipMemcpy(agGPU->fitness + iBest, d_steps, sizeof(float), hipMemcpyDeviceToDevice)); #ifdef VERBOSE printf("[calc_agent_quality] quality of %d is %7.2f\n", iBest, quality / NUM_TOT_DIV); #endif return quality; } static float *d_bestVal = NULL; static unsigned *d_iBest = NULL; static unsigned *h_iBest = NULL; void describe_crude_divs() { printf("There are %d divs for X of size %6.3f\nThere are %d divs for VEL of size %6.3f\n Total test points equals %d\n", CRUDE_NUM_X_DIV, CRUDE_DIV_X, CRUDE_NUM_VEL_DIV, CRUDE_DIV_VEL, CRUDE_NUM_TOT_DIV); } // calc the quality value for all agents unsigned calc_all_agents_quality(unsigned t, AGENT_DATA *agGPU, float *d_steps) { unsigned best_size = 1 + (_p.agents - 1)/(2*LEARN_BLOCK_SIZE); if (NULL == d_bestVal) d_bestVal = (float *)device_allocf(best_size); if (NULL == d_iBest) d_iBest = (unsigned *)device_allocui(best_size); if (NULL == h_iBest) h_iBest = (unsigned *)malloc(best_size * sizeof(unsigned)); // keep track of the best agent and its precies quality with static variables static int iOldBest = -1; static float oldBestQuality = BIG_FLOAT; row_reduce(d_steps, CRUDE_NUM_TOT_DIV, _p.agents); #ifdef VERBOSE device_dumpf("d_steps, after row reduce", d_steps, _p.agents, CRUDE_NUM_TOT_DIV); #endif // increase BLOCK_SIZE for this kernel (?) // ??? why copy all fitness values back to the agent data? It's only needed there if we are going to be sharing // Instead, just do a col_armin to get the best fitness and it's agent number ??? // have to copy the raw fitness value back to the agent data structure since // it will be used to determine the worst agents that can be over-written with copy of best one // use maximum blocksize dim3 blockDim(_p.agents); if (blockDim.x > 512) blockDim.x = 512; dim3 gridDim(1 + (_p.agents - 1) / blockDim.x); PRE_KERNEL("copy_fitness_to_agent_kernel"); hipLaunchKernelGGL(( copy_fitness_to_agent_kernel), dim3(gridDim), dim3(blockDim), 0, 0, d_steps); POST_KERNEL("copy_fitness_to_agent_kernel"); #ifdef VERBOSE dump_agentsGPU("after copy_fitness_to_agent", agGPU, 1); #endif // determine the best fitness value row_argmin2(agGPU->fitness, _p.agents, 1, d_bestVal, d_iBest); // see if the best agent is a new one // unsigned iBest; unsigned newBestFlag = 0; // printf("copying %d unsigned values from %p on device to %p on host\n", best_size, d_iBest, h_iBest); CUDA_SAFE_CALL(hipMemcpy(h_iBest, d_iBest, best_size * sizeof(unsigned), hipMemcpyDeviceToHost)); #ifdef VERBOSE printf("agent %d has the best fitness\n", h_iBest[0]); #endif if (h_iBest[0] != iOldBest) { // we have a possible new best agent! // calc the accurate fitness for iBest float iBestQuality = calc_agent_quality(agGPU, h_iBest[0], d_steps, FINAL_QUALITY_MAX_STEPS); if (iBestQuality < oldBestQuality) { // we really do have a new best agent #ifdef VERBOSE printf("new best with quality of %9.1f, old quality was %9.1f\n", iBestQuality, oldBestQuality); #endif newBestFlag = 1; iOldBest = h_iBest[0]; oldBestQuality = iBestQuality; if (_p.dump_all_winners) dump_one_agentGPU("new best agent", agGPU, h_iBest[0], 0); add_to_GPU_result_list(agGPU, h_iBest[0], t, iBestQuality); }else { #ifdef VERBOSE printf("current best agent, %d, is still the best!\n", iOldBest); #endif // the reigning best agent is still the best, but it's fitness has been over-written // in agGPU. This is not a problem except when the agGPU fitness is printed it needs // to be multiplied by NUM_TOT_DIV / CRUDE_NUM_TOT_DIV } } if (newBestFlag || (_p.share_always_pct > 0.0f)) { #ifdef VERBOSE printf("--> going to share the best agent...\n"); #endif // going to share the best agent // need to create an agent score that is negative for agents that might be cloned from the best float avg_fitness = clean_reduce(agGPU->fitness, _p.agents) / _p.agents; #ifdef VERBOSE printf("average fitness is %f\n", avg_fitness / CRUDE_NUM_TOT_DIV); #endif blockDim.x = SHARE_BEST_BLOCK_SIZE; gridDim.x = 1 + (_p.agents - 1) / blockDim.x; PRE_KERNEL("share_best_kernel"); #ifdef VERBOSE printf("avg_fitness is %f and iBest is %d\n", avg_fitness, h_iBest[0]); device_dumpf("fitness values", agGPU->fitness, 1, _p.agents); #endif // hipLaunchKernelGGL(( share_best_kernel), dim3(gridDim), dim3(blockDim), 0, 0, agGPU->fitness, avg_fitness, h_iBest[0], 0, newBestFlag ? _p.share_best_pct : _p.share_always_pct); hipLaunchKernelGGL(( share_best_kernel), dim3(gridDim), dim3(blockDim), 0, 0, agGPU->fitness, avg_fitness, iOldBest, 0, newBestFlag ? _p.share_best_pct : _p.share_always_pct); POST_KERNEL("share_best_kernel"); #ifdef VERBOSE dump_agentsGPU("after share_best_kernel", agGPU, 1); #endif } // deviceFree(d_bestVal); // deviceFree(d_iBest); return h_iBest[0]; } /* determine the new best agent based on the new winner, with a possible fitness comparison returns 1 if the best agent is new and always sets the value of pBest to the reigning best agent */ unsigned determine_new_best(AGENT_DATA *agGPU, unsigned *d_iWinner, unsigned *pBest, float * pBestFitness, float *d_steps) { static int iBest = -1; // will hold the current best agent static float iBestQuality = BIG_FLOAT; // has the fitness value of current best agent unsigned iWinner; CUDA_SAFE_CALL(hipMemcpy(&iWinner, d_iWinner, sizeof(unsigned), hipMemcpyDeviceToHost)); if (iWinner == iBest){ #ifdef VERBOSE printf("best agent, %d, won the competition, nothing new here\n", iWinner); #endif *pBest = iBest; // nothing new here }else{ #ifdef VERBOSE printf("%d won the competition!!!\n", iWinner); #endif if (_p.dump_all_winners) dump_one_agentGPU("competition winner", agGPU, iWinner, 0); // The competition winner is different than the current best agent. if (_p.share_fitness) { // check fitness of winner and compare to fitness of best agent float winnerQuality = calc_agent_quality(agGPU, iWinner, d_steps, FINAL_QUALITY_MAX_STEPS); #ifdef VERBOSE printf("quality of %d is %f\n", iWinner, winnerQuality); #endif if (winnerQuality >= iBestQuality){ #ifdef VERBOSE printf("%d is not good enough to become the new best\n", iWinner); #endif *pBest = iBest; // no change because winner has worse quality than current best }else { #ifdef VERBOSE printf("%d is the new best!!! (replacing %d)\n", iWinner, iBest); #endif if (_p.dump_all_new_best) dump_one_agentGPU("new best agent", agGPU, iWinner, 0); *pBest = iWinner; // the winner is better than the current best!! *pBestFitness = winnerQuality; iBestQuality = winnerQuality; // save the information } }else { // calc quality for information purposes if (iWinner != iBest){ calc_agent_quality(agGPU, iWinner, d_steps, FINAL_QUALITY_MAX_STEPS); if (_p.dump_all_new_best) dump_one_agentGPU("new best agent", agGPU, iWinner, 0); } // no fitness check, the winner automatically becomes the best *pBest = iWinner; } } unsigned newBestFlag = (iBest != *pBest); iBest = *pBest; // remember the best agent for next time return newBestFlag; } /* Reduce the results of the competition to determine the winner and record the information. If the winner is not the current best agent then... if _p.share_fitness is false, the competition winner becomes the best agent if _p.share_fitness is true, calculate fitness of winner and if better than current best agent, the winner becomes the new best agent. If the best agent is different, or the _p.always_share flag is set, then only copy best agent over the losers, using probability _p.share_best_pct d_wins is an (_p.agents x _p.agents) array on the device with the results of the round-robin d_agent_scores will be filled in with the net score for each agent d_steps is a temporary working area on device for use by calc_quality Strategy: All agents with a non-negative row score will be preserved agents with row score < zero will be copied from the best agent with probability _p.share_best_pct */ void share_after_competition(unsigned t, AGENT_DATA *agGPU, unsigned *pBest, float *d_wins, float *d_agent_scores, float *d_steps) { // Determine who won the competition #ifdef VERBOSE printf("sharing after competition... \n"); device_dumpf("agent scores from competition", d_wins, _p.agents, _p.agents); #endif // first accumulate the column totals of d_wins times -1 and store it in d_agent_scores col_reduce_x_k(d_wins, d_agent_scores, _p.agents, _p.agents, -1.0f); // next, calculate the row totals, keeping the total in column 0 of d_wins row_reduce(d_wins, _p.agents, _p.agents); // add row totals to the column totals in d_agent_scores vsum(d_agent_scores, d_wins, _p.agents, _p.agents); #ifdef VERBOSE device_dumpf("agent total score", d_agent_scores, 1, _p.agents); #endif float *d_winnerVal; unsigned *d_iWinner; row_argmax(d_agent_scores, _p.agents, 1, &d_winnerVal, &d_iWinner); // d_iWinner now contains the agent that won the competition // Determine if there is a new best agent, and record the best agent (whoever it is) in *pBest float newBestFitness; unsigned newBestFlag = determine_new_best(agGPU, d_iWinner, pBest, &newBestFitness, d_steps); if (newBestFlag) add_to_GPU_result_list(agGPU, *pBest, t, newBestFitness); // if there is a new best agent, or if SHARE_ALWAYS is on, then share the if (newBestFlag || _p.share_always_pct > 0.0f) { printf("%d is the new best agent\n", *pBest); dim3 blockDim(SHARE_BEST_BLOCK_SIZE); dim3 gridDim(1 + (_p.agents-1)/SHARE_BEST_BLOCK_SIZE); PRE_KERNEL2("share_best_kernel", blockDim, gridDim); hipLaunchKernelGGL(( share_best_kernel), dim3(gridDim), dim3(blockDim), 0, 0, d_agent_scores, 0.0f, *pBest, 1, newBestFlag ? _p.share_best_pct : _p.share_always_pct); POST_KERNEL("share_best_kernel"); #ifdef VERBOSE dump_agentsGPU("after sharing", agGPU, 1); #endif } deviceFree(d_winnerVal); deviceFree(d_iWinner); } unsigned iBest; // will hold the best agent value // run on the GPU, storing results in the RESULTS array provided void run_GPU(AGENT_DATA *agGPU) { // prepare the place to store results of run prepare_GPU_result_list(_p.num_tests / 2, _p.dump_updates); // on entry, device pointers are stored in dc_ag for agent data, and // parameters are stored in dc_p // dump_agentsGPU("run_GPU entry", agGPU); // allocate memory on device to hold results float *d_results = device_allocf(_p.agents * _p.num_tests); // allocate a temporary area on device to hold the steps array for the quality calculation // if doing competition, then only need room for one agent with NUM_TOT_DIV values // if doing competion, must be able to hold the greater of NUM_TOT_DIV and // _p.agents * CRUDE_NUM_TOT_DIV unsigned steps_size = NUM_TOT_DIV; if (!_p.share_compete && (steps_size < (CRUDE_NUM_TOT_DIV * _p.agents))) steps_size = CRUDE_NUM_TOT_DIV * _p.agents; printf("NUM_TOT_DIV is %d, CRUDE_NUM_TOT_DIV is %d, num agents is %d, so size of d_steps is %d\n", NUM_TOT_DIV, CRUDE_NUM_TOT_DIV, _p.agents, steps_size); float *d_steps = device_allocf(steps_size); // allocate memory on device to hold temporary wins and temporary agent scores float *d_wins = device_allocf(_p.agents * _p.agents); float *d_agent_scores = device_allocf(_p.agents); // calculate block and grid sizes for kernels // learning kernel used the value in LEARN_BLOCK_SIZE. // The entire x dimension is the agent number. // The y dimension is not used. dim3 learnBlockDim(LEARN_BLOCK_SIZE); dim3 learnGridDim(1 + (_p.agents-1) / LEARN_BLOCK_SIZE); // test3 runs the competition between agents to determine the best agent. // The thread x value is the test number. The number of test repititions must be // less than or equal ot the TEST3_MAX_BLOCK_SIZE value. // The block x and y values are the agent numbers for the two competing agents. dim3 test3BlockDim(_p.test_reps); if (_p.agents > 65535) printf("***** too many agents for round-robin competition *****"); dim3 test3GridDim(_p.agents, _p.agents); // reset gradient kernel has total number of threads equal to the gradient values dim3 resetGradientBlockDim(512); dim3 resetGradientGridDim(1 + (_p.agents * NUM_WGTS - 1) / 512); if (resetGradientGridDim.x > 65535) { resetGradientGridDim.y = 1 + (resetGradientGridDim.x - 1) / 65535; resetGradientGridDim.x = 1 + (resetGradientGridDim.x - 1) / resetGradientGridDim.y; } // set up timing values CUDA_EVENT_PREPARE; float timeLearn = 0.0f; // learning kernel float timeTest = 0.0f; // competition float timeShare = 0.0f; // all the work for sharing results (except the competition) float timeFitCalc = 0.0f; // testLogTime will hold the time recorded at the start of each test and at the end of the run float *testLogTime = (float *)malloc(_p.num_tests*sizeof(float)); unsigned timerGPU; CREATE_TIMER(&timerGPU); START_TIMER(timerGPU); timing_feedback_header(_p.num_tests); for (int i = 0; i < _p.num_tests; i++) { timing_feedback_dot(i); #ifdef VERBOSE printf("\n**************************** main loop %d ************************\n", i); #endif // do some learning CUDA_EVENT_START; PRE_KERNEL2("learn_kernel", learnBlockDim, learnGridDim); hipLaunchKernelGGL(( learn_kernel), dim3(learnGridDim), dim3(learnBlockDim), 0, 0, _p.test_interval); POST_KERNEL("learn_kernel"); CUDA_EVENT_STOP(timeLearn); // dump_agentsGPU("after learning session", agGPU); // run tests and sharing // if (0 == ((i+1) % _p.chunks_per_test)) { if (_p.share_compete) { // printf("running competition..."); CUDA_EVENT_START; PRE_KERNEL2("test_kernel3", test3BlockDim, test3GridDim); hipLaunchKernelGGL(( test_kernel3), dim3(test3GridDim), dim3(test3BlockDim), 0, 0, d_wins); POST_KERNEL("test_kernel3"); CUDA_EVENT_STOP(timeTest); // dump_agentsGPU("after testing, before sharing", agGPU); CUDA_EVENT_START share_after_competition((i+1) * _p.test_interval, agGPU, &iBest, d_wins, d_agent_scores, d_steps); CUDA_EVENT_STOP(timeShare); // dump_agentsGPU("after sharing", agGPU); }else if (_p.share_fitness) { // total x coordinate is the index for the point in state space, // the block's y dimension is the agent number dim3 fitCalcBlockDim(CALC_QUALITY_BLOCK_SIZE); dim3 fitCalcGridDim(1 + (CRUDE_NUM_TOT_DIV-1)/CALC_QUALITY_BLOCK_SIZE, _p.agents); // dump_agentsGPU("prior to calc_all_quality_kernel", agGPU, 1); CUDA_EVENT_START PRE_KERNEL2("calc_all_quality_kernel", fitCalcBlockDim, fitCalcGridDim); hipLaunchKernelGGL(( calc_all_quality_kernel), dim3(fitCalcGridDim), dim3(fitCalcBlockDim), 0, 0, MAX_STEPS_FOR_QUALITY, d_steps); POST_KERNEL("calc_all_quality_kernel"); CUDA_EVENT_STOP(timeFitCalc); // describe_crude_divs(); // device_dumpf("d_steps", d_steps, _p.agents, CRUDE_NUM_TOT_DIV); CUDA_EVENT_START iBest = calc_all_agents_quality((i+1) * _p.test_interval, agGPU, d_steps); CUDA_EVENT_STOP(timeShare); // dump_agentsGPU("after sharing", agGPU); } } printf("\n"); CUDA_EVENT_CLEANUP; STOP_TIMER(timerGPU, "total GPU time"); PRINT_TIME(timeLearn, "learn time"); PRINT_TIME(timeTest, "test time"); PRINT_TIME(timeShare, "share time"); PRINT_TIME(timeFitCalc, "calc fitness time"); #ifdef DUMP_FINAL_AGENTS dump_agentsGPU("--------------------------------------\n Ending Agent States\n", agGPU, 1); #endif if (_p.dump_best){ dump_one_agentGPU("Best Agent on GPU:", agGPU, last_agent_on_GPU_result_list(), 1); printf("quality based on %d MAX_ITERATIONS and (%d x %d) start states is %8.3f\n", FINAL_QUALITY_MAX_STEPS, NUM_X_DIV, NUM_VEL_DIV, last_fitness_on_GPU_result_list()); } if (d_results) deviceFree(d_results); if (d_wins) deviceFree(d_wins); if (d_agent_scores) deviceFree(d_agent_scores); if (d_steps) deviceFree(d_steps); if (testLogTime) free(testLogTime); }
a8c26cc6ea86f06affd0fb694468a75f87136d8c.cu
// // mcar.cu // mcar // // Created by Dwight Bell on 11/20/10. // Copyright dbelll 2010. All rights reserved. // #include <cuda.h> #include "cutil.h" #include <math.h> #include <assert.h> #include "main.h" #include "mcar.h" #include "cuda_utils.h" #include "cuda_rand.cu" #include "misc_utils.h" #include "reduction.h" #include "gpu_results.h" // parameters stored in global structure for CPU static PARAMS _p; // Initial global seeds used to ensure identical random variables each run on all machines static unsigned g_multiseeds[16*4] = { 2784565659u, 1491908209u, 3415062841u, 3293636241u, \ 1714636915u, 1681692777u, 846930886u, 1804289383u, \ 1649760492u, 719885386u, 424238335u, 1957747793u, \ 1350490027u, 1025202362u, 1189641421u, 596516649u, \ 1967513926u, 2044897763u, 1102520059u, 783368690u, \ 1303455736u, 304089172u, 1540383426u, 1365180540u, \ 1726956429u, 294702567u, 521595368u, 35005211u, \ 233665123u, 278722862u, 861021530u, 336465782u, \ 1801979802u, 1101513929u, 468703135u, 2145174067u, \ 1125898167u, 1369133069u, 635723058u, 1315634022u, \ 1656478042u, 628175011u, 2089018456u, 1059961393u, \ 1914544919u, 859484421u, 1653377373u, 1131176229u, \ 1973594324u, 1734575198u, 756898537u, 608413784u, \ 184803526u, 1129566413u, 2038664370u, 149798315u, \ 749241873u, 1911759956u, 1424268980u, 412776091u, \ 1827336327u, 1937477084u, 2084420925u, 511702305u } ; static unsigned *g_seeds = g_multiseeds; static float accel[NUM_ACTIONS] = {-ACCEL_FACTOR, 0.0f, ACCEL_FACTOR}; void set_seed(unsigned seed){ g_seeds = g_multiseeds + seed*4; printf("seeds are %u %u %u %u\n", g_seeds[0], g_seeds[1], g_seeds[2], g_seeds[3]); } #pragma mark GPU constant memory __constant__ float dc_accel[NUM_ACTIONS]; //__constant__ PARAMS dc_p; __constant__ AGENT_DATA dc_ag; __constant__ unsigned dc_agents; __constant__ float dc_epsilon; __constant__ float dc_gamma; __constant__ float dc_lambda; __constant__ float dc_alpha; __constant__ unsigned dc_test_reps; __constant__ unsigned dc_test_max; __constant__ unsigned dc_restart_interval; //__constant__ float dc_share_best_pct; __constant__ float dc_copy_alpha_multiplier; // fixed pointers are stored in constant memory on the device //__constant__ unsigned *dc_seeds; //__constant__ float *dc_theta; //__constant__ float *dc_W; //__constant__ float *dc_s; //__constant__ unsigned *dc_action; //__constant__ float *fitness; const char * string_for_action(unsigned a) { return (a == 0) ? "LEFT" : ((a == 1) ? "NONE" : "RIGHT"); } #pragma mark - #pragma mark CPU & GPU DUAL_PREFIX float sigmoid(float in) { return 1.0f/(1.0f + expf(-in)); } DUAL_PREFIX unsigned iActionStart(unsigned a, unsigned stride, unsigned num_hidden) { unsigned i = (a * ((STATE_SIZE + 2) * num_hidden + 1)) * stride; return i; } //#define iActionStart(a, stride, num_hidden) (((a)*((STATE_SIZE+2)*NUM_HIDDEN + 1))*(stride)) // calculate the offset from the start of the weights for the bias weight for hidden node j DUAL_PREFIX unsigned offsetToHiddenBias(unsigned j, unsigned stride, unsigned num_hidden) { unsigned i = j*(1 + STATE_SIZE) * stride; return i; } // calculate the index for the bias weight for the output node DUAL_PREFIX unsigned offsetToOutputBias(unsigned stride, unsigned num_hidden) { unsigned i = num_hidden*(1 + STATE_SIZE) * stride; return i; } // Calculate the output of the neural net for specified state and action. // Hidden node activation values are stored in activation array and the output Q value is returned. DUAL_PREFIX float calc_Q(float *s, unsigned a, float *theta, unsigned stride, unsigned num_hidden, float *activation) { // adjust theta to point to beginning of this action's weights theta += iActionStart(a, stride, num_hidden); unsigned iOutputBias = offsetToOutputBias(stride, num_hidden); float result = 0.0f; // loop over each hidden node for (int j = 0; j < num_hidden; j++) { // iBias is the index into theta for the bias weight for the hidden node j unsigned iBias = offsetToHiddenBias(j, stride, num_hidden); // first calculate contribution of the bias for this hidden node float in = theta[iBias] * -1.0f; // next add in the contributions for the state input nodes for (int k = 0; k < STATE_SIZE; k++) { in += theta[iBias + (1+k) * stride] * s[k * stride]; } activation[j * stride] = sigmoid(in); result += theta[iOutputBias + (1+j) * stride] * activation[j*stride]; #ifdef DEBUG_CALC_Q printf("calc_Q for state (%9.4f, %9.4f) and action %d ... ", s[0], s[stride], a); // printf("input to hidden node %d is %9.4f and activation is %9.4f\n", j, in, activation[j*stride]); #endif } result += theta[iOutputBias] * -1.0f; #ifdef DEBUG_CALC_Q printf("output activation is %9.4f\n", result); #endif return result; } // different strides for state and theta // state has stride of LEARN_BLOCK_SIZE // theta stride is specified by argument DUAL_PREFIX float calc_Q2(float *s, unsigned a, float *theta, unsigned stride_theta, unsigned num_hidden, float *activation) { // adjust theta to point to beginning of this action's weights theta += iActionStart(a, stride_theta, num_hidden); float result = 0.0f; unsigned iOutputBias = offsetToOutputBias(stride_theta, num_hidden); // loop over each hidden node for (int j = 0; j < num_hidden; j++) { // iBias is the index into theta for the bias weight for the hidden node j unsigned iBias = offsetToHiddenBias(j, stride_theta, num_hidden); // first calculate contribution of the bias for this hidden node float in = theta[iBias] * -1.0f; // next add in the contributions for the state input nodes for (int k = 0; k < STATE_SIZE; k++) { in += theta[iBias + (1+k) * stride_theta] * s[k * LEARN_BLOCK_SIZE]; } // apply sigmoid and accumulate in the result in = sigmoid(in); if (activation) activation[j*stride_theta] = in; result += theta[iOutputBias + (1+j) * stride_theta] * in; #ifdef DEBUG_CALC_Q printf("calc_Q for state (%9.4f, %9.4f) and action %d ... ", s[0], s[LEARN_BLOCK_SIZE], a); // printf("input to hidden node %d is %9.4f and activation is %9.4f\n", j, in, activation[j*stride]); #endif } // add in the output bias contribution result += theta[iOutputBias] * -1.0f; #ifdef DEBUG_CALC_Q printf("output activation is %9.4f\n", result); #endif return result; } // state array has stride of stride_s // theta array has stride of 1 DUAL_PREFIX float calc_Q3(float *s, unsigned a, float *theta, unsigned num_hidden, float *activation, unsigned stride_s) { // adjust theta to point to beginning of this action's weights theta += iActionStart(a, 1, num_hidden); float result = 0.0f; unsigned iOutputBias = offsetToOutputBias(1, num_hidden); // loop over each hidden node for (int j = 0; j < num_hidden; j++) { // iBias is the index into theta for the bias weight for the hidden node j unsigned iBias = offsetToHiddenBias(j, 1, num_hidden); // first calculate contribution of the bias for this hidden node float in = theta[iBias] * -1.0f; // next add in the contributions for the state input nodes #pragma unroll 2 for (int k = 0; k < STATE_SIZE; k++) { in += theta[iBias + (1+k)] * s[k*stride_s]; } // apply sigmoid and accumulate in the result in = sigmoid(in); if (activation) activation[j] = in; result += theta[iOutputBias + (1+j)] * in; #ifdef DEBUG_CALC_Q printf("calc_Q for state (%9.4f, %9.4f) and action %d ... ", s[0], s[stride_s], a); // printf("input to hidden node %d is %9.4f and activation is %9.4f\n", j, in, activation[j*stride]); #endif } // add in the output bias contribution result += theta[iOutputBias] * -1.0f; #ifdef DEBUG_CALC_Q printf("output activation is %9.4f\n", result); #endif return result; } DUAL_PREFIX void reset_gradient(float *W, unsigned stride, unsigned num_wgts) { for (int i = 0; i < num_wgts; i++) { W[i * stride] = 0.0f; } } DUAL_PREFIX void accumulate_gradient(float *s, unsigned a, float *theta, unsigned stride, unsigned num_hidden, unsigned num_wgts, float *activation, float *W, float lambda, float gamma) { // First, decay all the existing gradients by lambda * gamma #ifdef DEBUG_GRADIENT_CALC printf("all gradients after decay:\n"); #endif for (int i = 0; i < num_wgts; i++) { W[i*stride] *= lambda * gamma; #ifdef DEBUG_GRADIENT_CALC printf(" %9.6f\n", W[i*stride]); #endif } // Next, need to add in the new gradient for the specified action. // adjust W & theta to point to this action's weights unsigned offset = iActionStart(a, stride, num_hidden); // printf("[accumulate_gradient] offset is %d for action %d\n", offset, a); theta += offset; W += offset; #ifdef DEBUG_GRADIENT_CALC printf("updating gradients for action %d\n", a); #endif // for gradients to output node, the gradient equals the activation of the hidden layer node (or bias) // first update the gradient for bias -> output unsigned iOutBias = offsetToOutputBias(stride, num_hidden); W[iOutBias] += -1.0f; #ifdef DEBUG_GRADIENT_CALC printf("[accumulate_gradient] iOutBias is %d\n", iOutBias); printf("output bias changed by %9.6f and is now %9.6f\n", -1.0f, W[iOutBias]); #endif // next update the gradients with respect to weights from hidden to output for (int j = 0; j < num_hidden; j++) { // printf("[accumulate_gradient] iOutBias is %d\n", iOutBias); W[iOutBias + (1+j)*stride] += activation[j * stride]; #ifdef DEBUG_GRADIENT_CALC printf("[accumulate_gradient] hidden node %d is at %d\n", j, iOutBias + (1+j)*stride); printf("hidden%d to output changed by %9.6f and is now %9.6f\n", j, activation[j*stride], W[iOutBias + (1+j)*stride]); #endif } // update the gradients with respect to the weights from input to hidden for (int j = 0; j < num_hidden; j++) { // first the bias weight unsigned iHidBias = offsetToHiddenBias(j, stride, num_hidden); // gradient of output i wrt wgt from input k to hidden j equals // grad(in_j wrt wgt_kj) * grad(activation_j wrt in_j) * grad(output activation wrt activation_j) = // activation_k * activation_j * (1-activation_j) * wgt_ji // The last two terms are only a function of j (and there is only one output node), so // calculate grad to be the last two terms float grad = activation[j*stride] * (1-activation[j*stride]) * theta[iOutBias + (1+j)*stride]; // total gradient is the activation of the input node times grad // The updated value includes eligibility trace of prior gradient W[iHidBias] += -1.0f * grad; #ifdef DEBUG_GRADIENT_CALC printf("[accumulate_gradient] iHidBias is %d\n", iHidBias); printf("bias to hidden%d changed by %9.6f and is now %9.6f\n", j, -1.0f*grad, W[iHidBias]); #endif // next the states for (int k = 0; k < STATE_SIZE; k++) { W[iHidBias + (k+1)*stride] += s[k * stride] * grad; #ifdef DEBUG_GRADIENT_CALC printf("[accumulate_gradient] state %d is at %d\n", k, iHidBias + (k+1)*stride); printf("state%d to hidden%d changed by %9.6f and is now %9.6f\n", k, j, s[k*stride]*grad, W[iHidBias + (k+1)*stride]); #endif } } } DUAL_PREFIX void accumulate_gradient2(float *s, unsigned a, float *theta, float *activation, float *W) { // First, decay all the existing gradients by lambda * gamma for (int i = 0; i < NUM_WGTS; i++) { W[i*LEARN_BLOCK_SIZE] *= dc_lambda * dc_gamma; } // Next, need to add in the new gradient for the specified action. // adjust W & theta to point to this action's weights unsigned offset = iActionStart(a, LEARN_BLOCK_SIZE, NUM_HIDDEN); // theta += offset; // W += offset; theta += offset; W += offset; // for gradients to output node, the gradient equals the activation of the hidden layer node (or bias) // first update the gradient for bias -> output unsigned iOutBias = offsetToOutputBias(LEARN_BLOCK_SIZE, NUM_HIDDEN); W[iOutBias] += -1.0f; // next update the gradients with respect to weights from hidden to output for (int j = 0; j < NUM_HIDDEN; j++) { W[iOutBias + (1+j)*LEARN_BLOCK_SIZE] += activation[j * LEARN_BLOCK_SIZE]; } // update the gradients with respect to the weights from input to hidden for (int j = 0; j < NUM_HIDDEN; j++) { // first the bias weight unsigned iHidBias = offsetToHiddenBias(j, LEARN_BLOCK_SIZE, NUM_HIDDEN); // gradient of output i wrt wgt from input k to hidden j equals // grad(in_j wrt wgt_kj) * grad(activation_j wrt in_j) * grad(output activation wrt activation_j) = // activation_k * activation_j * (1-activation_j) * wgt_ji // The last two terms are only a function of j (and there is only one output node), so // calculate grad to be the last two terms float grad = activation[j*LEARN_BLOCK_SIZE] * (1-activation[j*LEARN_BLOCK_SIZE]) * theta[iOutBias + (1+j)*LEARN_BLOCK_SIZE]; // total gradient is the activation of the input node times grad // The updated value includes eligibility trace of prior gradient W[iHidBias] += -1.0f * grad; // next the states for (int k = 0; k < STATE_SIZE; k++) { W[iHidBias + (k+1)*LEARN_BLOCK_SIZE] += s[k * LEARN_BLOCK_SIZE] * grad; } } } // Update the weights in the neural net (theta's) using back-propagation of the output error // Current activation for the hidden layer is pre-calculated in activation DUAL_PREFIX void update_thetas(float *s, float *theta0, float *W0, float alpha, float error, unsigned stride, unsigned num_hidden, float *activation) { // Repeat for all actions for (int a = 0; a < NUM_ACTIONS; a++) { // adjust theta and W to point to start of weights/gradients for this action unsigned offset = iActionStart(a, stride, num_hidden); float *theta = theta0 + offset; float *W = W0 + offset; // First the bias // wgt_j_i += alpha * error * W_ji unsigned iOutBias = offsetToOutputBias(stride, num_hidden); theta[iOutBias] += alpha * error * W[iOutBias]; // if (isnan(theta[iOutBias])){ // printf("theta ISNAN !! added error of %9.6f with alpha of %9.6f\n", error, alpha); // } #ifdef DEBUG_THETA_UPDATE printf("\nupdate_thetas for error of %9.7f\n", error); printf("output bias: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", alpha, error, W[iOutBias], theta[iOutBias]); #endif // next update each weight from hidden nodes to output node for (int j = 0; j < num_hidden; j++) { // wgt_j_i += alpha * error * W_ji theta[iOutBias + (1+j) * stride] += alpha * error * W[iOutBias + (1+j)*stride]; #ifdef DEBUG_THETA_UPDATE printf("hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", j, alpha, error, W[iOutBias + (1+j)*stride], theta[iOutBias + (1+j)*stride]); #endif } // update weights from input layer to hidden layer for each node in hidden layer for (int j = 0; j < num_hidden; j++) { // first update the bias weight // wgt_k_j = alpha * error * W_k_j unsigned iHidBias = offsetToHiddenBias(j, stride, num_hidden); theta[iHidBias] += alpha * error * W[iHidBias]; #ifdef DEBUG_THETA_UPDATE printf("bias -> hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", j, alpha, error, W[iHidBias], theta[iHidBias]); #endif // update the weights from the state nodes for (int k = 0; k < STATE_SIZE; k++) { // wgt_k_j = alpha * error * W_k_j theta[iHidBias + (k+1) * stride] += alpha * error * W[iHidBias + (k+1)*stride]; #ifdef DEBUG_THETA_UPDATE printf("state%d -> hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", k, j, alpha, error, W[iHidBias + (k+1)*stride], theta[iHidBias + (k+1)*stride]); #endif } } } } // theta and activation have stride of LEARN_BLOCK_SIZE, W has stride based on value passed in (# agents) DUAL_PREFIX void update_thetas2(float *theta0, float *W0, float alpha, float error, float *activation) { // Repeat for all actions for (int a = 0; a < NUM_ACTIONS; a++) { // adjust theta and W to point to start of weights/gradients for this action float *theta = theta0 + iActionStart(a, LEARN_BLOCK_SIZE, NUM_HIDDEN); float *W = W0 + iActionStart(a, LEARN_BLOCK_SIZE, NUM_HIDDEN); // First the bias // wgt_j_i += alpha * error * W_ji unsigned iOutBias = offsetToOutputBias(LEARN_BLOCK_SIZE, NUM_HIDDEN); theta[iOutBias] += alpha * error * W[iOutBias]; // if (isnan(theta[iOutBias])){ // printf("theta ISNAN !! added error of %9.6f with alpha of %9.6f\n", error, alpha); // } #ifdef DEBUG_THETA_UPDATE printf("\nupdate_thetas for error of %9.7f\n", error); printf("output bias: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", alpha, error, W[iOutBias], theta[iOutBias]); #endif // next update each weight from hidden nodes to output node for (int j = 0; j < NUM_HIDDEN; j++) { // wgt_j_i += alpha * error * W_ji theta[iOutBias + (1+j) * LEARN_BLOCK_SIZE] += alpha * error * W[iOutBias + (1+j)*LEARN_BLOCK_SIZE]; #ifdef DEBUG_THETA_UPDATE printf("hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", j, alpha, error, W[iOutBias + (1+j)*LEARN_BLOCK_SIZE], theta[iOutBias + (1+j)*LEARN_BLOCK_SIZE]); #endif } // update weights from input layer to hidden layer for each node in hidden layer for (int j = 0; j < NUM_HIDDEN; j++) { // first update the bias weight // wgt_k_j = alpha * error * W_k_j unsigned iHidBias = offsetToHiddenBias(j, LEARN_BLOCK_SIZE, NUM_HIDDEN); theta[iHidBias] += alpha * error * W[iHidBias]; #ifdef DEBUG_THETA_UPDATE printf("bias -> hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", j, alpha, error, W[iHidBias], theta[iHidBias]); #endif // update the weights from the state nodes for (int k = 0; k < STATE_SIZE; k++) { // wgt_k_j = alpha * error * W_k_j theta[iHidBias + (k+1) * LEARN_BLOCK_SIZE] += alpha * error * W[iHidBias + (k+1)*LEARN_BLOCK_SIZE]; #ifdef DEBUG_THETA_UPDATE printf("state%d -> hidden%d: change is alpha (%9.7f) * error (%9.7f) * gradient (%9.7f) to get new value of %9.7f\n", k, j, alpha, error, W[iHidBias + (k+1)*LEARN_BLOCK_SIZE], theta[iHidBias + (k+1)*LEARN_BLOCK_SIZE]); #endif } } } } //DUAL_PREFIX void update_thetas2(float *s, float *theta0, float *W0, float alpha, float error, unsigned stride_s, unsigned stride_g, unsigned num_hidden, float *activation) //{ // // Repeat for all actions // for (int a = 0; a < NUM_ACTIONS; a++) { // // adjust theta and W to point to start of weights/gradients for this action // unsigned offset = iActionStart(a, stride_g, num_hidden); // float *theta = theta0 + offset; // float *W = W0 + offset; // // // First the bias // unsigned iOutBias = offsetToOutputBias(stride_g, num_hidden); // theta[iOutBias] += alpha * error * W[iOutBias]; // // // next update each weight from hidden nodes to output node // for (int j = 0; j < num_hidden; j++) { // // wgt_j_i += alpha * error * W_ji // theta[iOutBias + (1+j) * stride_g] += alpha * error * W[iOutBias + (1+j)*stride_g]; // } // // // update weights from input layer to hidden layer for each node in hidden layer // for (int j = 0; j < num_hidden; j++) { // // first update the bias weight // // wgt_k_j = alpha * error * W_k_j // unsigned iHidBias = offsetToHiddenBias(j, stride_g, num_hidden); // theta[iHidBias] += alpha * error * W[iHidBias]; // // // update the weights from the state nodes // for (int k = 0; k < STATE_SIZE; k++) { // // wgt_k_j = alpha * error * W_k_j // theta[iHidBias + (k+1) * stride_g] += alpha * error * W[iHidBias + (k+1)*stride_g]; // } // } // } //} // Calculate the Q value for each action from the given state, returning the best Q value // and storing the action in *pAction DUAL_PREFIX float best_action(float *s, unsigned *pAction, float *theta, unsigned stride, unsigned num_hidden, float *activation) { // calculate Q value for each action unsigned best_action = 0; float bestQ = calc_Q(s, 0, theta, stride, num_hidden, activation); for (int k = 1; k < NUM_ACTIONS; k++) { float tempQ = calc_Q(s, k, theta, stride, num_hidden, activation); if (tempQ > bestQ) { bestQ = tempQ; best_action = k; } } *pAction = best_action; return bestQ; } DUAL_PREFIX float best_action2(float *s, unsigned *pAction, float *theta, unsigned stride_g, unsigned num_hidden, float *activation) { // calculate Q value for each action unsigned best_action = 0; float bestQ = calc_Q2(s, 0, theta, stride_g, num_hidden, activation); for (int k = 1; k < NUM_ACTIONS; k++) { float tempQ = calc_Q2(s, k, theta, stride_g, num_hidden, activation); if (tempQ > bestQ) { bestQ = tempQ; best_action = k; } } *pAction = best_action; return bestQ; } // theta has stride of 1 and state has stride of stride_s DUAL_PREFIX float best_action3(float *s, unsigned *pAction, float *theta, unsigned num_hidden, float *activation, unsigned stride_s) { // calculate Q value for each action unsigned best_action = 0; float bestQ = calc_Q3(s, 0, theta, num_hidden, activation, stride_s); for (int k = 1; k < NUM_ACTIONS; k++) { float tempQ = calc_Q3(s, k, theta, num_hidden, activation, stride_s); if (tempQ > bestQ) { bestQ = tempQ; best_action = k; } } *pAction = best_action; return bestQ; } // choose action from current state, return the Q value for the chosen action // and store the action in *pAction DUAL_PREFIX float choose_action(float *s, unsigned *pAction, float *theta, float epsilon, unsigned stride, unsigned num_hidden, float *activation, unsigned *seeds) { if (epsilon > 0.0f && RandUniform(seeds, stride) < epsilon){ // choose random action float r = RandUniform(seeds, stride); *pAction = (unsigned)(r * NUM_ACTIONS); return calc_Q(s, *pAction, theta, stride, num_hidden, activation); }else{ // choose the best action return best_action(s, pAction, theta, stride, num_hidden, activation); } } DUAL_PREFIX float choose_action2(float *s, unsigned *pAction, float *theta, float *activation, unsigned *seeds) { unsigned stride_s = LEARN_BLOCK_SIZE; if (dc_epsilon > 0.0f && RandUniform(seeds, stride_s) < dc_epsilon){ // choose random action float r = RandUniform(seeds, stride_s); *pAction = (unsigned)(r * NUM_ACTIONS); return calc_Q2(s, *pAction, theta, LEARN_BLOCK_SIZE, NUM_HIDDEN, activation); }else{ // choose the best action return best_action2(s, pAction, theta, LEARN_BLOCK_SIZE, NUM_HIDDEN, activation); } } //__device__ float choose_action3(unsigned idx, unsigned *s_u, float *s_f) //{ // if (dc_epsilon > 0.0f && RandUniform(s_u+idx, LEARN_BLOCK_SIZE) < dc_epsilon){ // // choose random action // float r = RandUniform(s_u+idx, LEARN_BLOCK_SIZE); // *pAction = (unsigned)(r * NUM_ACTIONS); // return calc_Q4(idx, s_u, s_f); // }else{ // // choose the best action // return best_action3(idx, s_u, s_f); // } //} //DUAL_PREFIX float choose_action2(float *s, unsigned *pAction, float *theta, float epsilon, unsigned stride_g, unsigned num_hidden, float *activation, unsigned *seeds) //{ // if (epsilon > 0.0f && RandUniform(seeds, LEARN_BLOCK_SIZE) < epsilon){ // // choose random action // float r = RandUniform(seeds, LEARN_BLOCK_SIZE); // *pAction = r * NUM_ACTIONS; // return calc_Q2(s, *pAction, theta, stride_g, num_hidden, activation); // }else{ // // choose the best action // return best_action2(s, pAction, theta, stride_g, num_hidden, activation); // } //} DUAL_PREFIX unsigned terminal_state(float *s) { return s[0] >= MAX_X; } // take an action from the current state, s, returning the reward and saving new state in s_prime // Note, s & s_prime may be the same location. DUAL_PREFIX float take_action(float *s, unsigned a, float *s_prime, unsigned stride, float *accel) { // Forumlation of mountain car problem is from Sutton & Barto, // "Reinforcement Learning, An Introduction" #ifdef DEBUG_CPU printf("take_action %s from state (%9.4f, %9.4f)\n", string_for_action(a), s[0], s[stride]); #endif // normal reward is -1.0f per time step float reward = -1.0f; // update velocity and limit it to within bounds s_prime[stride] = s[stride] + accel[a] + GRAVITY_FACTOR * cosf(GRAVITY_X_SCALE * s[0]); #ifdef DEBUG_CPU printf("accel is %9.6f from force and %9.6f from gravity resulting in new velocity of %9.6f\n", accel[a], GRAVITY_FACTOR * cosf(GRAVITY_X_SCALE * s[0]), s_prime[stride]); #endif if (s_prime[stride] < MIN_VEL) s_prime[stride] = MIN_VEL; if (s_prime[stride] > MAX_VEL) s_prime[stride] = MAX_VEL; // update position and test for success and limit with minimum bound s_prime[0] = s[0] + s_prime[stride]; if (s_prime[0] >= MAX_X) reward = 0.0f; if (s_prime[0] <= MIN_X) { s_prime[0] = MIN_X; s_prime[stride] = 0.0f;} #ifdef DEBUG_CPU printf("new state is (%9.6f, %9.6f) and reward is %9.6f\n", s_prime[0], s_prime[stride], reward); #endif return reward; } // random number in the specified range DUAL_PREFIX float rand_in_range(unsigned *seeds, unsigned stride, float min, float max) { float r = min + (max-min)*RandUniform(seeds, stride); return r; } // randomize the position and velocity uniformly over their range DUAL_PREFIX void randomize_state(float *s, unsigned *seeds, unsigned stride) { s[0] = rand_in_range(seeds, stride, MIN_X, MAX_X); s[stride] = rand_in_range(seeds, stride, MIN_VEL, MAX_VEL); // s[0] = MIN_X + (MAX_X-MIN_X)*RandUniform(seeds, stride); // s[stride] = MIN_VEL + (MAX_VEL-MIN_VEL)*RandUniform(seeds, stride); } //DUAL_PREFIX void randomize_state2(float *s, unsigned *seeds, unsigned stride_s, unsigned stride_g) //{ // s[0] = rand_in_range(seeds, stride_g, MIN_X, MAX_X); // s[stride_s] = rand_in_range(seeds, stride_g, MIN_VEL, MAX_VEL); //} //__device__ void randomize_stateGPU(unsigned ag) //{ // dc_ag.s[ag] = rand_in_range(dc_ag.seeds + ag, dc_p.stride, MIN_X, MAX_X); // dc_ag.s[ag + dc_p.stride] = rand_in_range(dc_ag.seeds + ag, dc_p.stride, MIN_VEL, MAX_VEL); //} // void randomize_all_states(AGENT_DATA *ag) { // randomize state for all agents, deterine first action and set activation values for hidden for (int agent = 0; agent < _p.agents; agent++) { randomize_state(ag->s + agent, ag->seeds + agent, _p.agents); reset_gradient(ag->W + agent, _p.agents, NUM_WGTS); // printf("randomize_state, state is now (%9.6f, %9.6f)\n", ag->s[agent], ag->s[agent + _p.agents]); choose_action(ag->s + agent, ag->action + agent, ag->theta + agent, _p.epsilon, _p.agents, NUM_HIDDEN, ag->activation + agent, ag->seeds + agent); // force activation values to be recalculated for the chosen action // printf("chosen action will be %s\n", string_for_action(ag->action[agent])); calc_Q(ag->s + agent, ag->action[agent], ag->theta + agent, _p.agents, NUM_HIDDEN, ag->activation + agent); // update_trace(... } } #pragma mark - #pragma mark CPU void set_params(PARAMS p){ _p = p; // fill in some calculated values in the parameters _p.iActionStart[0] = iActionStart(0, p.agents, NUM_HIDDEN); _p.iActionStart[1] = iActionStart(1, p.agents, NUM_HIDDEN); _p.iActionStart[2] = iActionStart(2, p.agents, NUM_HIDDEN); // printf("iActionStart values %d, %d, %d\n", _p.iActionStart[0], _p.iActionStart[1], _p.iActionStart[2]); _p.offsetToOutputBias = offsetToOutputBias(p.agents, NUM_HIDDEN); // printf("_p.agents = %d, _p.hidden_nodes = %d\n", _p.agents, _p.hidden_nodes); } // dump agent data to stdout // uses parameter values in _p // (hard-coded to 2 dimensional state) void dump_agent(AGENT_DATA *ag, unsigned agent, unsigned crude) { printf("[agent %d]: ", agent); printf(" seeds = %u, %u, %u, %u\n", ag->seeds[agent], ag->seeds[agent + _p.agents], ag->seeds[agent + 2*_p.agents], ag->seeds[agent + 3*_p.agents]); printf(" FROM TO THETA W \n"); unsigned i = agent; for (int a = 0; a < NUM_ACTIONS; a++) { for (int h = 0; h < NUM_HIDDEN; h++) { printf("[%6s] bias --> hidden%2d %9.6f %9.6f\n", string_for_action(a), h, ag->theta[i], ag->W[i]); i += _p.agents; printf(" x --> hidden%2d %9.6f %9.6f\n", h, ag->theta[i], ag->W[i]); i += _p.agents; printf(" x' --> hidden%2d %9.6f %9.6f\n", h, ag->theta[i], ag->W[i]); i += _p.agents; } printf( " bias --> output %9.6f %9.6f\n", ag->theta[i], ag->W[i]); i += _p.agents; for (int h = 0; h < NUM_HIDDEN; h++) { printf(" hidden%2d --> output %9.6f %9.6f\n", h, ag->theta[i], ag->W[i]); i += _p.agents; } } printf("fitness = %5.3f alpha = %7.4f\n", ag->fitness[agent]/(crude ? CRUDE_NUM_TOT_DIV : NUM_TOT_DIV), ag->alpha[agent]); printf("\nCurrent State: x = %9.6f x' = %9.6f, stored action is %s\n", ag->s[agent], ag->s[agent + _p.agents], string_for_action(ag->action[agent])); printf(" HIDDEN NODE ACTIVATION\n"); for (int j = 0; j < NUM_HIDDEN; j++) { printf("[%6s] %3d %9.6f\n", string_for_action(ag->action[agent]), j, ag->activation[agent + j * _p.agents]); } printf("\n"); } void dump_agent_pointers(const char *str, AGENT_DATA *ag) { printf("\n===================================================\n%s\n", str); printf("---------------------------------------------------\n", str); printf(" seeds: %p\n", ag->seeds); printf(" theta: %p\n", ag->theta); printf(" W: %p\n", ag->W); printf(" state: %p\n", ag->s); printf("activation: %p\n", ag->activation); printf(" action: %p\n", ag->action); printf(" fitness: %p\n", ag->fitness); printf("====================================================\n\n", str); } // print message and dump all agent data void dump_agents(const char *str, AGENT_DATA *ag, unsigned crude) { printf("\n===================================================\n%s\n", str); printf("---------------------------------------------------\n", str); for (int agent = 0; agent < _p.agents; agent++) { dump_agent(ag, agent, crude); } printf("====================================================\n\n", str); } void dump_one_agent(const char *str, AGENT_DATA *ag, unsigned crude) { printf("%s\n", str); dump_agent(ag, 0, crude); } RESULTS *initialize_results() { RESULTS *r = (RESULTS *)malloc(sizeof(RESULTS)); r->avg_fitness = (float *)malloc(_p.num_tests * sizeof(float)); r->best_fitness = (float *)malloc(_p.num_tests * sizeof(float)); r->best_agent = (unsigned *)malloc(_p.num_tests * sizeof(unsigned)); return r; } void free_results(RESULTS *r) { if (r){ if (r->avg_fitness){ free(r->avg_fitness); r->avg_fitness = NULL;} if (r->best_fitness){ free(r->best_fitness); r->best_fitness = NULL;} if (r->best_agent){ free(r->best_agent); r->best_agent = NULL;} free(r); } } void display_results(const char *str, RESULTS *r) { printf("%s \n", str); printf(" TEST Avg Steps\n"); for (int i = 0; i < _p.num_tests; i++) { printf(" [%10d]%8.0f, %8.0f, %8d\n", i*_p.test_interval, r->avg_fitness[i], r->best_fitness[i], r->best_agent[i]); } } // generate random seeds for the sepecified number of agents unsigned *create_seeds(unsigned num_agents) { #ifdef VERBOSE printf("create_seeds for %d agents\n", num_agents); #endif unsigned *seeds = (unsigned *)malloc(num_agents * SEEDS_PER_AGENT * sizeof(unsigned)); for (int i = 0; i < num_agents * SEEDS_PER_AGENT; i++) { seeds[i] = RandUniformui(g_seeds, 1); } return seeds; } // create wgts set initially to random values between theta_min and theta_max float *create_theta(unsigned num_agents, unsigned num_wgts, float theta_min, float theta_max) { #ifdef VERBOSE printf("create_theta for %d agents and %d weights in range %9.7f to %9.7f\n", num_agents, num_wgts, theta_min, theta_max); #endif float *theta = (float *)malloc(num_agents * num_wgts * sizeof(float)); for (int i = 0; i < num_agents * num_wgts; i++) { theta[i] = rand_in_range(g_seeds, 1, theta_min, theta_max); } return theta; } // create gradient trace set initially to 0.0f float *create_W(unsigned num_agents, unsigned num_wgts) { #ifdef VERBOSE printf("create_W for %d agents and %d weights\n", num_agents, num_wgts); #endif float *W = (float *)malloc(num_agents * num_wgts * sizeof(float)); for (int i = 0; i < num_agents * num_wgts; i++) W[i] = 0.0f; return W; } // create initial random states float *create_states(unsigned num_agents, unsigned state_size, unsigned *seeds) { #ifdef VERBOSE printf("create_states for %d agents and state size of %d\n", num_agents, state_size); #endif float *states = (float *)malloc(num_agents * state_size * sizeof(float)); for (int i = 0; i < num_agents * state_size; i++) states[i] = 0.0f; return states; } unsigned *create_actions(unsigned num_agents, unsigned num_actions) { #ifdef VERBOSE printf("create_actions for %d agents\n", num_agents); #endif unsigned *actions = (unsigned *)malloc(num_agents * num_actions * sizeof(unsigned)); for (int i = 0; i < num_agents * num_actions; i++) actions[i] = num_actions; // not valid value return actions; } float *create_fitness(unsigned num_agents) { float *fitness = (float *)malloc(num_agents * sizeof(float)); for (int i = 0; i < num_agents; i++) fitness[i] = MAX_FITNESS; return fitness; } float *create_alpha(unsigned num_agents) { float *alpha = (float *)malloc(num_agents * sizeof(float)); for (int i = 0; i < num_agents; i++) alpha[i] = _p.alpha; return alpha; } float *create_activation(unsigned num_agents, unsigned num_hidden) { #ifdef VERBOSE printf("create_activation for %d agents wiht %d hidden nodes\n", num_agents, num_hidden); #endif float *activation = (float *)malloc(num_agents * (num_hidden) * sizeof(float)); for (int i = 0; i < num_agents * num_hidden; i++) activation[i] = 0.0f; return activation; } // initialize agents on CPU, including the initial randomization of state and choice of first action AGENT_DATA *initialize_agentsCPU() { #ifdef VERBOSE printf("initializing agents on CPU...\n"); #endif AGENT_DATA *ag = (AGENT_DATA *)malloc(sizeof(AGENT_DATA)); ag->seeds = create_seeds(_p.agents); ag->theta = create_theta(_p.agents, NUM_WGTS, _p.initial_theta_min, _p.initial_theta_max); ag->W = create_W(_p.agents, NUM_WGTS); ag->s = create_states(_p.agents, STATE_SIZE, ag->seeds); ag->action = create_actions(_p.agents, NUM_ACTIONS); ag->activation = create_activation(_p.agents, NUM_HIDDEN); ag->fitness = create_fitness(_p.agents); ag->alpha = create_alpha(_p.agents); randomize_all_states(ag); return ag; } void free_agentsCPU(AGENT_DATA *ag) { #ifdef VERBOSE printf("freeing agents on CPU...\n"); #endif if (ag) { if (ag->seeds){ free(ag->seeds); ag->seeds = NULL;} if (ag->theta){ free(ag->theta); ag->theta = NULL;} if (ag->W){ free(ag->W); ag->W = NULL;} if (ag->s){ free(ag->s); ag->s = NULL;} if (ag->action){ free(ag->action); ag->action = NULL;} if (ag->activation){ free(ag->activation); ag->activation = NULL;} if (ag->fitness){ free(ag->fitness); ag->fitness = NULL;} if (ag->alpha) {free(ag->alpha); ag->alpha = NULL;} free(ag); } } /* On entry, the agent data has the current state and chosen action based on current weights. */ void learning_session(AGENT_DATA *ag) { // for each time step for (int t = 0; t < _p.chunk_interval; t++) { #ifdef VERBOSE printf("\n*****************************************\n"); printf( "************ TIME STEP %d ****************\n", t); printf( "*****************************************\n"); #endif // for each agent for (int agent = 0; agent < _p.agents; agent++) { #ifdef DEBUG_CPU printf("[[ AGENT %d ]]\n", agent); #endif // Calculate Q_curr based on current state and action // Activation values will be stored for use in updating the gradient float Q_curr = calc_Q(ag->s + agent, ag->action[agent], ag->theta + agent, _p.agents, NUM_HIDDEN, ag->activation + agent); #ifdef DEBUG_CPU printf("Q_curr is %9.6f based on state (%9.6f, %9.6f) and action %s\n", Q_curr, ag->s[agent], ag->s[agent + _p.agents], string_for_action(ag->action[agent])); #endif //accumulate_gradient uses current activations and weights to update the gradient array, W accumulate_gradient(ag->s + agent, ag->action[agent], ag->theta + agent, _p.agents, NUM_HIDDEN, NUM_WGTS, ag->activation + agent, ag->W + agent, _p.lambda, _p.gamma); //#ifdef DUMP_AGENT_UPDATES // dump_agents("after accumulate_gradient", ag); //#endif // take_action will calculate the new state based on the current state and current action, // storing the new state in the agent, returning the reward float reward = take_action(ag->s + agent, ag->action[agent], ag->s + agent, _p.agents, accel); #ifdef DUMP_STATES printf("[AGENT%3d] x = %9.6f x' = %9.6f after action = %s\n", agent, ag->s[agent], ag->s[agent + _p.agents], string_for_action(ag->action[agent])); #endif unsigned success = terminal_state(ag->s + agent); if (success){ // printf("success for ageent %d at time step %d\n", agent, t); randomize_state(ag->s + agent, ag->seeds + agent, _p.agents); } // choose the next action, storing it in the agent and returning the Q_next value float Q_next = choose_action(ag->s + agent, ag->action + agent, ag->theta + agent, _p.epsilon, _p.agents, NUM_HIDDEN, ag->activation + agent, ag->seeds + agent); #ifdef DEBUG_CPU printf("Q_next is %12.6f based on state (%9.6f, %9.6f) and action %s\n", Q_next, ag->s[agent], ag->s[agent + _p.agents], string_for_action(ag->action[agent])); #endif float error = reward + _p.gamma*Q_next - Q_curr; // printf("reward + _p.gamma*Q_next = %9.6f, (Q_next is %9.6f), Q_curr = %9.6f, so error is %9.6f\n", reward + _p.gamma*Q_next, Q_next, Q_curr, error); #ifdef DEBUG_CPU printf("error is %12.6f\n", error); #endif update_thetas(ag->s + agent, ag->theta + agent, ag->W + agent, _p.alpha, error, _p.agents, NUM_HIDDEN, ag->activation + agent); //#ifdef DUMP_AGENT_UPDATES // dump_agents("after update_thetas", ag); //#endif if (success) reset_gradient(ag->W + agent, _p.agents, NUM_WGTS); } #ifdef DUMP_AGENT_UPDATES printf("***** end of time step %d *****\n", t); dump_agents("after update_thetas", ag); #endif // update_stored_Q(ag->Q + agent, ag->s + agent, ag->theta + agent, _p.agents, STATE_SIZE, NUM_ACTIONS, NUM_HIDDEN, ag->activation + agent); // update_trace(... } } // copy theta valuels from agent iFrom and over-write agent iTo void copy_theta(AGENT_DATA *ag, unsigned iFrom, unsigned iTo, unsigned num_wgts, unsigned stride) { for (int i = 0; i < num_wgts; i++) { ag->theta[iTo + i * stride] = ag->theta[iFrom + i*stride]; } } // share is where the best agents will be selected and duplicated //void share(AGENT_DATA *ag, float share_best_pct, unsigned agent_group_size, unsigned num_agents, unsigned num_wgts) //{ // printf("share...\n"); // for (int group = 0; group < num_agents / agent_group_size; group++) { // unsigned iGroup = group * agent_group_size; // // determine the best agent in this group // unsigned iBest = 0; // float best_fitness = ag->fitness[iGroup]; // for (int a = 1; a < agent_group_size; a++) { // if (ag->fitness[iGroup + a] < best_fitness) { // best_fitness = ag->fitness[iGroup + a]; // iBest = a; // } // } // // printf("agent %d is the best in group %d\n", iGroup + iBest, group); // // // now copy the best agents to the others with probability share_best_pct // for (int a = 0; a < agent_group_size; a++) { // if (a == iBest) continue; // float r = RandUniform(ag->seeds + iGroup + a, num_agents); // if (r < share_best_pct) { // printf("copy weights from agent %d to agent %d\n", iGroup + iBest, iGroup + a); // copy_theta(ag, iBest, iGroup + a, num_wgts, num_agents); // } // } // } //} // test the agents and store the results in the iTest entry in the RESULTS arrays void run_test(AGENT_DATA *ag, unsigned iTest) { float total_steps = 0.0f; float best_fitness = MAX_FITNESS; float save_s[STATE_SIZE]; unsigned save_action; //**TODO** may not need to be saved unsigned save_seeds[4]; static float *junk_activation = NULL; if(!junk_activation) junk_activation = (float *)malloc(NUM_HIDDEN * sizeof(float)); // test all agents and average the result for (int agent = 0; agent < _p.agents; agent++) { #ifdef TRACE_TEST printf("Testing agent %d...\n", agent); #endif // save agent state prior to testing save_s[0] = ag->s[agent]; save_s[1] = ag->s[agent + _p.agents]; save_action = ag->action[agent]; save_seeds[0] = ag->seeds[agent]; save_seeds[1] = ag->seeds[agent + _p.agents]; save_seeds[2] = ag->seeds[agent + 2*_p.agents]; save_seeds[3] = ag->seeds[agent + 3*_p.agents]; float agent_steps = 0.0f; for (int rep = 0; rep < _p.test_reps; rep++) { ag->seeds[agent] = save_seeds[0] + rep; ag->seeds[agent + _p.agents] = save_seeds[1] + rep; ag->seeds[agent + 2*_p.agents] = save_seeds[2] + rep; ag->seeds[agent + 3*_p.agents] = save_seeds[3] + rep; randomize_state(ag->s + agent, ag->seeds + agent, _p.agents); int t; unsigned action; for (t = 0; t < _p.test_max; t++) { best_action(ag->s + agent, &action, ag->theta + agent, _p.agents, NUM_HIDDEN, junk_activation); #ifdef TRACE_TEST printf("[test%4d] state = (%9.6f, %9.6f) action will be %s\n", t, ag->s[agent], ag->s[agent + _p.agents], string_for_action(action)); #endif take_action(ag->s + agent, action, ag->s + agent, _p.agents, accel); if (terminal_state(ag->s + agent)) { #ifdef TRACE_TEST printf("Done at step %d!!!\n", t); #endif break; } } #ifdef TRACE_TEST if (t == _p.test_reps) printf("failure\n"); #endif agent_steps += t; } ag->fitness[agent] = agent_steps / _p.test_reps; if (ag->fitness[agent] < best_fitness){ best_fitness = ag->fitness[agent]; // best_agent = agent; } total_steps += agent_steps; //restore state and action ag->s[agent] = save_s[0]; ag->s[agent + _p.agents] = save_s[1]; ag->action[agent] = save_action; ag->seeds[agent] = save_seeds[0]; ag->seeds[agent + _p.agents] = save_seeds[1]; ag->seeds[agent + 2*_p.agents] = save_seeds[2]; ag->seeds[agent + 3*_p.agents] = save_seeds[3]; } #ifdef DUMP_TESTED_AGENTS printf("Testing %d\n", iTest); dump_agents("after testing", ag); #endif // r->avg_fitness[iTest] = total_steps / float(_p.agents) / float(_p.test_reps); // r->best_fitness[iTest] = best_fitness; // r->best_agent[iTest] = best_agent; } void run_CPU(AGENT_DATA *ag) { #ifdef VERBOSE printf("\n==============================================\nrunning on CPU...\n"); #endif // dump_agents("run_CPU entry", ag); unsigned timer; CREATE_TIMER(&timer); START_TIMER(timer); timing_feedback_header(_p.num_chunks); for (int i = 0; i < _p.num_chunks; i++) { timing_feedback_dot(i); if ((i > 0) && 0 == (i % _p.chunks_per_restart)){ // printf("randomize all states...\n"); randomize_all_states(ag); #ifdef DUMP_AGENTS_AFTER_RESTART dump_agents("after restart", ag); #endif } if (i == 0) { #ifdef DUMP_INITIAL_AGENTS dump_agents("Initial agents on CPU, prior to learning session", ag); #endif // run_test(ag, r, i); } learning_session(ag); // dump_agents("after learning session", ag); if (0 == ((i+1) % _p.chunks_per_test)) run_test(ag, (i+1)/_p.chunks_per_test); // dump_agents("after testing", ag); // if ((_p.agent_group_size > 1) && 0 == ((i+1) % _p.chunks_per_share)) { // share(ag, _p.share_best_pct, _p.agent_group_size, _p.agents, _p.num_wgts); // } } printf("\n"); STOP_TIMER(timer, "run on CPU"); //#ifdef DUMP_FINAL_AGENTS // dump_agents("Final agents on CPU", ag); //#endif } #pragma mark - #pragma mark GPU // copy agents from device back to host AGENT_DATA *copy_GPU_agents(AGENT_DATA *agGPU) { // printf("copy_GPU_agents\n"); AGENT_DATA *agCopy = (AGENT_DATA *)malloc(sizeof(AGENT_DATA)); // dump_agent_pointers("agGPU", agGPU); // printf(" %d seeds from %p\n", _p.agents * 4, agGPU->seeds); agCopy->seeds = host_copyui(agGPU->seeds, _p.agents * 4); agCopy->theta = host_copyf(agGPU->theta, _p.agents * NUM_WGTS); agCopy->W = host_copyf(agGPU->W, _p.agents * NUM_WGTS); agCopy->s = host_copyf(agGPU->s, _p.agents * STATE_SIZE); agCopy->activation = host_copyf(agGPU->activation, _p.agents * NUM_HIDDEN); agCopy->action = host_copyui(agGPU->action, _p.agents); agCopy->fitness = host_copyf(agGPU->fitness, _p.agents); agCopy->alpha = host_copyf(agGPU->alpha, _p.agents); return agCopy; } // dump the agents from the GPU by first copying to CPU and then dumping the CPU copy void dump_agentsGPU(const char *str, AGENT_DATA *agGPU, unsigned crude) { AGENT_DATA *agCopy = copy_GPU_agents(agGPU); dump_agents(str, agCopy, crude); free(agCopy); } void dump_one_agentGPU(const char *str, AGENT_DATA *agGPU, unsigned ag, unsigned crude) { AGENT_DATA *agCopy = copy_GPU_agents(agGPU); printf("%s\n", str); dump_agent(agCopy, ag, crude); free(agCopy); } // Copy the provided CPU agent data to the GPU, storing device pointers in a new AGENT_DATA structure // Also copy the AGENT_DATA and PARAMS structures to constant memory on the device AGENT_DATA *initialize_agentsGPU(AGENT_DATA *agCPU) { #ifdef VERBOSE printf("\n==============================================\nrunning on GPU...\n"); #endif #ifdef VERBOSE printf("initializing agents on GPU...\n"); #endif AGENT_DATA *ag = (AGENT_DATA *)malloc(sizeof(AGENT_DATA)); ag->seeds = device_copyui(agCPU->seeds, _p.agents * 4); ag->theta = device_copyf(agCPU->theta, _p.agents * NUM_WGTS); ag->W = device_copyf(agCPU->W, _p.agents * NUM_WGTS); ag->s = device_copyf(agCPU->s, _p.agents * STATE_SIZE); ag->activation = device_copyf(agCPU->activation, _p.agents * NUM_HIDDEN); ag->action = device_copyui(agCPU->action, _p.agents); ag->fitness = device_copyf(agCPU->fitness, _p.agents); ag->alpha = device_copyf(agCPU->alpha, _p.agents); ag->alphaOn = device_alloc_filledui(_p.agents, 1); // cudaMemcpyToSymbol("dc_p", &_p, sizeof(PARAMS)); cudaMemcpyToSymbol("dc_agents", &_p.agents, sizeof(unsigned)); cudaMemcpyToSymbol("dc_epsilon", &_p.epsilon, sizeof(float)); cudaMemcpyToSymbol("dc_gamma", &_p.gamma, sizeof(float)); cudaMemcpyToSymbol("dc_lambda", &_p.lambda, sizeof(float)); cudaMemcpyToSymbol("dc_alpha", &_p.alpha, sizeof(float)); cudaMemcpyToSymbol("dc_test_reps", &_p.test_reps, sizeof(unsigned)); cudaMemcpyToSymbol("dc_test_max", &_p.test_max, sizeof(unsigned)); cudaMemcpyToSymbol("dc_restart_interval", &_p.restart_interval, sizeof(unsigned)); // cudaMemcpyToSymbol("dc_share_best_pct", &_p.share_best_pct, sizeof(unsigned)); cudaMemcpyToSymbol("dc_copy_alpha_multiplier", &_p.copy_alpha_multiplier, sizeof(unsigned)); cudaMemcpyToSymbol("dc_ag", ag, sizeof(AGENT_DATA)); cudaMemcpyToSymbol("dc_accel", accel, 3 * sizeof(float)); // dump_agent_pointers("agent copied to GPU", ag); return ag; } // Free the deivce memory pointed to by elements of AGENT_DATA ag, then free ag void free_agentsGPU(AGENT_DATA *ag) { #ifdef VERBOSE printf("freeing agents on GPU...\n"); #endif if (ag) { if (ag->seeds){ deviceFree(ag->seeds); ag->seeds = NULL;} if (ag->theta){ deviceFree(ag->theta); ag->theta = NULL;} if (ag->W){ deviceFree(ag->W); ag->W = NULL;} if (ag->s){ deviceFree(ag->s); ag->s = NULL;} if (ag->activation){ deviceFree(ag->activation); ag->activation = NULL;} if (ag->action){ deviceFree(ag->action); ag->action = NULL;} if (ag->fitness){ deviceFree(ag->fitness); ag->fitness = NULL;} if (ag->alpha){ deviceFree(ag->alpha); ag->alpha = NULL;} free(ag); } } //__global__ void kernel_randomize_all_states() //{ //} __global__ void reset_gradient_kernel() { unsigned iGlobal = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; if (iGlobal < dc_agents * NUM_WGTS) dc_ag.W[iGlobal] = 0.0f; } // take the number of wins for each agent and convert to a fitness value and store in results // thread number if the number for the opponent // wins has the number of wins for each agent with stride against each other agent (dc_agents x dc_agents) // results and array of size dc_agents where the results for this test go with stride 1 //__global__ void update_fitness_kernel1(float *wins, float *results) //{ // unsigned idx = threadIdx.x; // // //} /* Run a competition between agents, storing wins - losses in the d_wins array on the device. threads per block = the number of competitions between each agent pair = TEST_REPS blocks are in a square grid with the number of agents per group on each side Agents compete for a maximum of TEST_MAX steps or until one of them reaches finish line. The score is stored in the results array in the row for agent a1, column for agent a2. */ __global__ void test_kernel3(float *d_wins) { unsigned ag1 = blockIdx.x; unsigned ag2 = blockIdx.y; if (ag1 == ag2){ d_wins[ag1*dc_agents + ag2] = 0; return;} // the entire block may exit here unsigned idx = threadIdx.x; // seeds, state, and # of wins must be kept separate for each competition between the two agents __shared__ unsigned s_seeds[4*TEST3_BLOCK_SIZE]; __shared__ float s_s1[2*TEST3_BLOCK_SIZE]; __shared__ float s_s2[2*TEST3_BLOCK_SIZE]; __shared__ float s_wins[TEST3_BLOCK_SIZE]; // only one copy of thetas is needed for each agent, since it is constant __shared__ float s_theta1[NUM_WGTS]; __shared__ float s_theta2[NUM_WGTS]; // copy seeds from ag1 to seeds[0] and [2] and from ag2 to seeds[1] and seeds[3] // adding in the idx value so each competition has different seeds // s_wins will have +1 for ag1 wins and -1 for ag2 wins and 0 for ties s_seeds[idx] = dc_ag.seeds[ag1] + idx; s_seeds[idx + TEST3_BLOCK_SIZE] = dc_ag.seeds[ag2 + dc_agents] + idx; s_seeds[idx + 2*TEST3_BLOCK_SIZE] = dc_ag.seeds[ag1 + 2*dc_agents] + idx; s_seeds[idx + 3*TEST3_BLOCK_SIZE] = dc_ag.seeds[ag2 + 3*dc_agents] + idx; s_wins[idx] = 0.0f; // this is the number of wins for ag1 minus wins for ag2 // copy thetas for each agent to shared memory // This is a loop because the number of threads my be less than the block size for (int iOffset = 0; iOffset < NUM_WGTS; iOffset += blockDim.x) { if (idx + iOffset < NUM_WGTS){ s_theta1[idx + iOffset] = dc_ag.theta[ag1 + (idx + iOffset) * dc_agents]; s_theta2[idx + iOffset] = dc_ag.theta[ag2 + (idx + iOffset) * dc_agents]; } }; __syncthreads(); // randomize the state for ag1 and copy the same state for ag2 randomize_state(s_s1 + idx, s_seeds + idx, TEST3_BLOCK_SIZE); s_s2[idx] = s_s1[idx]; s_s2[idx + TEST3_BLOCK_SIZE] = s_s1[idx + TEST3_BLOCK_SIZE]; unsigned action1, action2; if (idx < dc_test_reps) { int done1 = 0; int done2 = 0; int t; for (t = 0; t < dc_test_max; t++) { if (!done1) { best_action3(s_s1 + idx, &action1, s_theta1, NUM_HIDDEN, NULL, TEST3_BLOCK_SIZE); // best_action2(s_s1 + idx, &action1, dc_ag.theta + ag1, dc_agents, NUM_HIDDEN, NULL); take_action(s_s1 + idx, action1, s_s1 + idx, TEST3_BLOCK_SIZE, dc_accel); if (terminal_state(s_s1 + idx)) { done1 = t+1; } } if (!done2) { best_action3(s_s2 + idx, &action2, s_theta2, NUM_HIDDEN, NULL, TEST3_BLOCK_SIZE); // best_action2(s_s2 + idx, &action2, dc_ag.theta + ag2, dc_agents, dc_p.hidden_nodes, NULL); take_action(s_s2 + idx, action2, s_s2 + idx, TEST3_BLOCK_SIZE, dc_accel); if (terminal_state(s_s2 + idx)) done2 = 1 + t; } if (done1 || done2) break; // stop when either agent is done } if (!done1) done1 = t + 2; if (!done2) done2 = t + 2; if (done1 < done2) s_wins[idx] += 1.0f; if (done1 > done2) s_wins[idx] += -1.0f; } __syncthreads(); // do a reduction on the results unsigned half = TEST3_BLOCK_SIZE / 2; while (half > 0) { if (idx < half && idx + half < dc_test_reps) { s_wins[idx] += s_wins[idx + half]; } half /= 2; __syncthreads(); } // copy the wins to global memory if (idx == 0) { d_wins[ag1 * dc_agents + ag2] = s_wins[0] / dc_test_reps; } } __global__ void learn_kernel(unsigned steps) { unsigned idx = threadIdx.x; unsigned iGlobal = threadIdx.x + blockIdx.x * blockDim.x; if (iGlobal >= dc_agents) return; __shared__ unsigned s_seeds[5*LEARN_BLOCK_SIZE]; __shared__ float s_s[34*LEARN_BLOCK_SIZE]; unsigned *s_action = s_seeds + 4*LEARN_BLOCK_SIZE; float *s_alpha = s_s + 2*LEARN_BLOCK_SIZE; float *s_theta = s_s + 3*LEARN_BLOCK_SIZE; float *s_W = s_s + 18*LEARN_BLOCK_SIZE; float *s_activation = s_s + 33*LEARN_BLOCK_SIZE; // __shared__ unsigned s_seeds[4*LEARN_BLOCK_SIZE]; // __shared__ unsigned s_action[LEARN_BLOCK_SIZE]; // __shared__ float s_s[2*LEARN_BLOCK_SIZE]; // __shared__ float s_alpha[LEARN_BLOCK_SIZE]; // __shared__ float s_theta[15*LEARN_BLOCK_SIZE]; // __shared__ float s_W[15*LEARN_BLOCK_SIZE]; // __shared__ float s_activation[LEARN_BLOCK_SIZE]; // copy state, action, and seeds to shared memory s_s[idx] = dc_ag.s[iGlobal]; s_s[idx + LEARN_BLOCK_SIZE] = dc_ag.s[iGlobal + dc_agents]; s_action[idx] = dc_ag.action[iGlobal]; // unsigned iG = iGlobal; // unsigned ii = idx; unsigned *sSeeds = s_seeds + idx; unsigned *gSeeds = dc_ag.seeds + iGlobal; // s_seeds[idx] = dc_ag.seeds[iGlobal]; // s_seeds[idx + LEARN_BLOCK_SIZE] = dc_ag.seeds[iGlobal + dc_agents]; // s_seeds[idx + 2*LEARN_BLOCK_SIZE] = dc_ag.seeds[iGlobal + 2*dc_agents]; // s_seeds[idx + 3*LEARN_BLOCK_SIZE] = dc_ag.seeds[iGlobal + 3*dc_agents]; // s_seeds[ii] = dc_ag.seeds[iG]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // s_seeds[ii] = dc_ag.seeds[iG]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // s_seeds[ii] = dc_ag.seeds[iG]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // s_seeds[ii] = dc_ag.seeds[iG]; sSeeds[0] = gSeeds[0]; sSeeds+= LEARN_BLOCK_SIZE; gSeeds += dc_agents; sSeeds[0] = gSeeds[0]; sSeeds+= LEARN_BLOCK_SIZE; gSeeds += dc_agents; sSeeds[0] = gSeeds[0]; sSeeds+= LEARN_BLOCK_SIZE; gSeeds += dc_agents; sSeeds[0] = gSeeds[0]; s_alpha[idx] = dc_ag.alpha[iGlobal];// * dc_ag.alphaOn[iGlobal]; // copy weights and gradients from global memory to shared memory for (int i = 0, j=0; i < NUM_WGTS*LEARN_BLOCK_SIZE; i +=LEARN_BLOCK_SIZE, j += dc_agents) { s_theta[idx + i] = dc_ag.theta[iGlobal + j]; s_W[idx + i] = dc_ag.W[iGlobal + j]; } s_activation[idx] = dc_ag.activation[iGlobal]; unsigned restart_counter = 0; for (int t = 0; t < steps; t++, restart_counter--) { // reset state at restart intervals if (0 == restart_counter) { randomize_state(s_s + idx, s_seeds + idx, LEARN_BLOCK_SIZE); choose_action2(s_s + idx, s_action + idx, s_theta + idx, s_activation + idx, s_seeds + idx); calc_Q2(s_s + idx, s_action[idx], s_theta + idx, LEARN_BLOCK_SIZE, NUM_HIDDEN, s_activation + idx); reset_gradient(s_W + idx, LEARN_BLOCK_SIZE, NUM_WGTS); restart_counter = dc_restart_interval; } float Q_curr = calc_Q2(s_s + idx, s_action[idx], s_theta + idx, LEARN_BLOCK_SIZE, NUM_HIDDEN, s_activation + idx); accumulate_gradient2(s_s + idx, s_action[idx], s_theta + idx, s_activation + idx, s_W + idx); float reward = take_action(s_s + idx, s_action[idx], s_s + idx, LEARN_BLOCK_SIZE, dc_accel); unsigned success = terminal_state(s_s + idx); if (success) randomize_state(s_s + idx, s_seeds + idx, LEARN_BLOCK_SIZE); float Q_next = choose_action2(s_s + idx, s_action + idx, s_theta + idx, s_activation + idx, s_seeds + idx); float error = reward + dc_gamma * Q_next - Q_curr; update_thetas2(s_theta + idx, s_W + idx, s_alpha[idx], error, s_activation + idx); if (success) reset_gradient(s_W + idx, LEARN_BLOCK_SIZE, NUM_WGTS); } // copy state, action and seeds back to global memory dc_ag.action[iGlobal] = s_action[idx]; // iG = iGlobal; // ii = idx; // dc_ag.seeds[iG] = s_seeds[ii]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // dc_ag.seeds[iG] = s_seeds[ii]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // dc_ag.seeds[iG] = s_seeds[ii]; iG += dc_agents; ii += LEARN_BLOCK_SIZE; // dc_ag.seeds[iG] = s_seeds[ii]; sSeeds = s_seeds + idx; gSeeds = dc_ag.seeds + iGlobal; gSeeds[0] = sSeeds[0]; gSeeds += dc_agents; sSeeds += LEARN_BLOCK_SIZE; gSeeds[0] = sSeeds[0]; gSeeds += dc_agents; sSeeds += LEARN_BLOCK_SIZE; gSeeds[0] = sSeeds[0]; gSeeds += dc_agents; sSeeds += LEARN_BLOCK_SIZE; gSeeds[0] = sSeeds[0]; dc_ag.s[iGlobal] = s_s[idx]; dc_ag.s[iGlobal + dc_agents] = s_s[idx + LEARN_BLOCK_SIZE]; // copy weights and gradients from global memory to shared memory for (int i = 0, j=0; i < NUM_WGTS*LEARN_BLOCK_SIZE; i +=LEARN_BLOCK_SIZE, j += dc_agents) { dc_ag.theta[iGlobal + j] = s_theta[idx + i]; dc_ag.W[iGlobal + j] = s_W[idx + i]; } dc_ag.activation[iGlobal] = s_activation[idx]; } // total x dimension is the agent number __global__ void share_best_kernel(float *d_agent_scores, float threshold, unsigned iBest, unsigned higherIsBetter, float share_pct) { unsigned idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= dc_agents) return; // // if this is the best agent, set it's alpha to 0.0f to preserve // // otherwise reset the alpha // if (idx == iBest) dc_ag.alphaOn[idx] = 0; // else dc_ag.alphaOn[idx] = 1; if (idx == iBest) dc_ag.alpha[idx] = 0.0f; else dc_ag.alpha[idx] = dc_alpha; // do nothing if agent has a better score than the threshold // if (d_agent_scores[idx] >= 0.0f) return; if (higherIsBetter && (d_agent_scores[idx] >= threshold)) return; if (!higherIsBetter && (d_agent_scores[idx] <= threshold)) return; // with a probability share_best_pct, copy best agents weights to this agent float r = RandUniform(dc_ag.seeds+idx, dc_agents); if (r < share_pct) { for (int i = 0; i < NUM_WGTS; i++) { dc_ag.theta[idx + i * dc_agents] = dc_ag.theta[iBest + i * dc_agents]; } dc_ag.alpha[idx] = dc_alpha * dc_copy_alpha_multiplier; } } /* x-dimension represents all the possible starting states number of threads is CALC_QUALITY_BLOCK_SIZE (which must be greater than NUM_WGTS) iBest is the agent to be tested maxSteps is the maximum number of time steps before giving up d_steps is where the results are stored for each of the possible starting states */ __global__ void calc_quality_kernel(unsigned iBest, unsigned maxSteps, float *d_steps) { unsigned idx = threadIdx.x; unsigned iGlobal = idx + blockIdx.x * blockDim.x; __shared__ float s_theta[NUM_WGTS]; __shared__ float s_s[2 * CALC_QUALITY_BLOCK_SIZE]; // set up values in shared memory... // ... agent weights // unsigned v_div = iGlobal / NUM_X_DIV; float x_div_num = 0.5f + (float)(iGlobal % NUM_X_DIV); float vel_div_num = 0.5f + (float)(iGlobal / NUM_X_DIV); // float x_div_num = 0.25 + 0.5f*RandUniform(dc_ag.seeds+iBest, dc_agents) + (float)(iGlobal % NUM_X_DIV); // float y_div_num = 0.25 + 0.5f*RandUniform(dc_ag.seeds+iBest, dc_agents) + (float)(iGlobal / NUM_X_DIV); if (idx < NUM_WGTS) s_theta[idx] = dc_ag.theta[iBest + idx * dc_agents]; // ... state based on thread and block indexes **TODO this can be modified to have larger blocks of threads if (idx < CALC_QUALITY_BLOCK_SIZE) { s_s[idx] = MIN_X + x_div_num * DIV_X; s_s[idx + CALC_QUALITY_BLOCK_SIZE] = MIN_VEL + vel_div_num * DIV_VEL; } __syncthreads(); unsigned t; unsigned action; for (t = 0; t < maxSteps; t++) { best_action3(s_s+idx, &action, s_theta, NUM_HIDDEN, NULL, CALC_QUALITY_BLOCK_SIZE); take_action(s_s+idx, action, s_s+idx, CALC_QUALITY_BLOCK_SIZE, dc_accel); if (terminal_state(s_s+idx)) break; } d_steps[iGlobal] = (float)(1+t); } /* similar to calc_quality_kernel, but does the calculations for all the agents the x-dimension represents all the possible starting states number of threads is CALC_QUALITY_BLOCK_SIZE (must be greater than NUM_WGTS) the agent number is in blockIdx.y */ __global__ void calc_all_quality_kernel(unsigned maxSteps, float *d_steps) { unsigned idx = threadIdx.x; unsigned iGlobal = threadIdx.x + blockIdx.x * blockDim.x; unsigned ag = blockIdx.y; __shared__ float s_theta[NUM_WGTS]; // __shared__ float s_s[2*CRUDE_NUM_TOT_DIV]; __shared__ float s_s[2*CALC_QUALITY_BLOCK_SIZE]; // setup values in shared memory... // ... first agent weights float x_div_num = 0.5f + (float)(iGlobal % CRUDE_NUM_X_DIV); float vel_div_num = 0.5f + (float)(iGlobal / CRUDE_NUM_X_DIV); if (idx < NUM_WGTS) s_theta[idx] = dc_ag.theta[ag + idx * dc_agents]; // ... then the state based on the x-dimension // if (idx < CRUDE_NUM_TOT_DIV) { if (idx < CALC_QUALITY_BLOCK_SIZE) { s_s[idx] = MIN_X + x_div_num * CRUDE_DIV_X; // s_s[idx + CRUDE_NUM_TOT_DIV] = MIN_VEL + vel_div_num * CRUDE_DIV_VEL; s_s[idx + CALC_QUALITY_BLOCK_SIZE] = MIN_VEL + vel_div_num * CRUDE_DIV_VEL; } __syncthreads(); if (iGlobal >= CRUDE_NUM_TOT_DIV) return; unsigned t; unsigned action; for (t = 0; t < maxSteps; t++) { // best_action3(s_s+idx, &action, s_theta, NUM_HIDDEN, NULL, CRUDE_NUM_TOT_DIV); // take_action(s_s+idx, action, s_s+idx, CRUDE_NUM_TOT_DIV, dc_accel); best_action3(s_s+idx, &action, s_theta, NUM_HIDDEN, NULL, CALC_QUALITY_BLOCK_SIZE); take_action(s_s+idx, action, s_s+idx, CALC_QUALITY_BLOCK_SIZE, dc_accel); if (terminal_state(s_s+idx)) break; } d_steps[ag * CRUDE_NUM_TOT_DIV + iGlobal] = (float)(1+t); // d_steps[ag * CRUDE_NUM_TOT_DIV + iGlobal] = s_s[idx + CRUDE_NUM_TOT_DIV]; // d_steps[ag * CRUDE_NUM_TOT_DIV + iGlobal] = s_s[idx]; } // raw fitness value is the total steps summed over the test starting positions __global__ void copy_fitness_to_agent_kernel(float *d_steps) { unsigned iGlobal = threadIdx.x + blockIdx.x * blockDim.x; if (iGlobal >= dc_agents) return; dc_ag.fitness[iGlobal] = d_steps[iGlobal * CRUDE_NUM_TOT_DIV]; } /* calculate the average quality of an agent by running it for specific starting positions spanning the state space The value returned is the sum of the steps for */ float calc_agent_quality(AGENT_DATA *agGPU, unsigned iBest, float *d_steps, unsigned max_steps) { // // calculate the number of values for x and velocity // unsigned num_x = 1.5f + (MAX_X - MIN_X) / DIV_X; // unsigned num_vel = 1.5f + (MAX_VEL - MIN_VEL) / DIV_VEL; // unsigned num_tot = num_x * num_vel; #ifdef VERBOSE printf("calc_agent_quality for best agent #%d\n using %d x values, %d veloicty values, total of %d values\n", iBest, NUM_X_DIV, NUM_VEL_DIV, NUM_TOT_DIV); #endif // *** TODO increase block dimension to at least 32, calculate the x and velocity values in the kernel, // instead of just using the thread and block indexes // dim3 blockDim(NUM_X_DIV); // dim3 gridDim(NUM_VEL_DIV); dim3 blockDim(CALC_QUALITY_BLOCK_SIZE); dim3 gridDim(1 + (NUM_TOT_DIV-1)/CALC_QUALITY_BLOCK_SIZE); // allocate a location to store the number of steps for every trial // float *d_steps = device_allocf(num_tot); // printf("launching calc_quality_kernel with blocks of (%d x %d) and grid of (%d x %d)\n", blockDim.x, blockDim.y, gridDim.x, gridDim.y); PRE_KERNEL2("calc_quality_kernel", blockDim, gridDim); calc_quality_kernel<<<gridDim, blockDim>>>(iBest, max_steps, d_steps); POST_KERNEL("calc_quality_kernel"); #ifdef VERBOSE device_dumpf("steps for each x, velocity value", d_steps, NUM_VEL_DIV, NUM_X_DIV); #endif row_reduce(d_steps, NUM_TOT_DIV, 1); float quality; CUDA_SAFE_CALL(cudaMemcpy(&quality, d_steps, sizeof(float), cudaMemcpyDeviceToHost)); // CUDA_SAFE_CALL(cudaMemcpy(agGPU->fitness + iBest, d_steps, sizeof(float), cudaMemcpyDeviceToDevice)); #ifdef VERBOSE printf("[calc_agent_quality] quality of %d is %7.2f\n", iBest, quality / NUM_TOT_DIV); #endif return quality; } static float *d_bestVal = NULL; static unsigned *d_iBest = NULL; static unsigned *h_iBest = NULL; void describe_crude_divs() { printf("There are %d divs for X of size %6.3f\nThere are %d divs for VEL of size %6.3f\n Total test points equals %d\n", CRUDE_NUM_X_DIV, CRUDE_DIV_X, CRUDE_NUM_VEL_DIV, CRUDE_DIV_VEL, CRUDE_NUM_TOT_DIV); } // calc the quality value for all agents unsigned calc_all_agents_quality(unsigned t, AGENT_DATA *agGPU, float *d_steps) { unsigned best_size = 1 + (_p.agents - 1)/(2*LEARN_BLOCK_SIZE); if (NULL == d_bestVal) d_bestVal = (float *)device_allocf(best_size); if (NULL == d_iBest) d_iBest = (unsigned *)device_allocui(best_size); if (NULL == h_iBest) h_iBest = (unsigned *)malloc(best_size * sizeof(unsigned)); // keep track of the best agent and its precies quality with static variables static int iOldBest = -1; static float oldBestQuality = BIG_FLOAT; row_reduce(d_steps, CRUDE_NUM_TOT_DIV, _p.agents); #ifdef VERBOSE device_dumpf("d_steps, after row reduce", d_steps, _p.agents, CRUDE_NUM_TOT_DIV); #endif // increase BLOCK_SIZE for this kernel (?) // ??? why copy all fitness values back to the agent data? It's only needed there if we are going to be sharing // Instead, just do a col_armin to get the best fitness and it's agent number ??? // have to copy the raw fitness value back to the agent data structure since // it will be used to determine the worst agents that can be over-written with copy of best one // use maximum blocksize dim3 blockDim(_p.agents); if (blockDim.x > 512) blockDim.x = 512; dim3 gridDim(1 + (_p.agents - 1) / blockDim.x); PRE_KERNEL("copy_fitness_to_agent_kernel"); copy_fitness_to_agent_kernel<<<gridDim, blockDim>>>(d_steps); POST_KERNEL("copy_fitness_to_agent_kernel"); #ifdef VERBOSE dump_agentsGPU("after copy_fitness_to_agent", agGPU, 1); #endif // determine the best fitness value row_argmin2(agGPU->fitness, _p.agents, 1, d_bestVal, d_iBest); // see if the best agent is a new one // unsigned iBest; unsigned newBestFlag = 0; // printf("copying %d unsigned values from %p on device to %p on host\n", best_size, d_iBest, h_iBest); CUDA_SAFE_CALL(cudaMemcpy(h_iBest, d_iBest, best_size * sizeof(unsigned), cudaMemcpyDeviceToHost)); #ifdef VERBOSE printf("agent %d has the best fitness\n", h_iBest[0]); #endif if (h_iBest[0] != iOldBest) { // we have a possible new best agent! // calc the accurate fitness for iBest float iBestQuality = calc_agent_quality(agGPU, h_iBest[0], d_steps, FINAL_QUALITY_MAX_STEPS); if (iBestQuality < oldBestQuality) { // we really do have a new best agent #ifdef VERBOSE printf("new best with quality of %9.1f, old quality was %9.1f\n", iBestQuality, oldBestQuality); #endif newBestFlag = 1; iOldBest = h_iBest[0]; oldBestQuality = iBestQuality; if (_p.dump_all_winners) dump_one_agentGPU("new best agent", agGPU, h_iBest[0], 0); add_to_GPU_result_list(agGPU, h_iBest[0], t, iBestQuality); }else { #ifdef VERBOSE printf("current best agent, %d, is still the best!\n", iOldBest); #endif // the reigning best agent is still the best, but it's fitness has been over-written // in agGPU. This is not a problem except when the agGPU fitness is printed it needs // to be multiplied by NUM_TOT_DIV / CRUDE_NUM_TOT_DIV } } if (newBestFlag || (_p.share_always_pct > 0.0f)) { #ifdef VERBOSE printf("--> going to share the best agent...\n"); #endif // going to share the best agent // need to create an agent score that is negative for agents that might be cloned from the best float avg_fitness = clean_reduce(agGPU->fitness, _p.agents) / _p.agents; #ifdef VERBOSE printf("average fitness is %f\n", avg_fitness / CRUDE_NUM_TOT_DIV); #endif blockDim.x = SHARE_BEST_BLOCK_SIZE; gridDim.x = 1 + (_p.agents - 1) / blockDim.x; PRE_KERNEL("share_best_kernel"); #ifdef VERBOSE printf("avg_fitness is %f and iBest is %d\n", avg_fitness, h_iBest[0]); device_dumpf("fitness values", agGPU->fitness, 1, _p.agents); #endif // share_best_kernel<<<gridDim, blockDim>>>(agGPU->fitness, avg_fitness, h_iBest[0], 0, newBestFlag ? _p.share_best_pct : _p.share_always_pct); share_best_kernel<<<gridDim, blockDim>>>(agGPU->fitness, avg_fitness, iOldBest, 0, newBestFlag ? _p.share_best_pct : _p.share_always_pct); POST_KERNEL("share_best_kernel"); #ifdef VERBOSE dump_agentsGPU("after share_best_kernel", agGPU, 1); #endif } // deviceFree(d_bestVal); // deviceFree(d_iBest); return h_iBest[0]; } /* determine the new best agent based on the new winner, with a possible fitness comparison returns 1 if the best agent is new and always sets the value of pBest to the reigning best agent */ unsigned determine_new_best(AGENT_DATA *agGPU, unsigned *d_iWinner, unsigned *pBest, float * pBestFitness, float *d_steps) { static int iBest = -1; // will hold the current best agent static float iBestQuality = BIG_FLOAT; // has the fitness value of current best agent unsigned iWinner; CUDA_SAFE_CALL(cudaMemcpy(&iWinner, d_iWinner, sizeof(unsigned), cudaMemcpyDeviceToHost)); if (iWinner == iBest){ #ifdef VERBOSE printf("best agent, %d, won the competition, nothing new here\n", iWinner); #endif *pBest = iBest; // nothing new here }else{ #ifdef VERBOSE printf("%d won the competition!!!\n", iWinner); #endif if (_p.dump_all_winners) dump_one_agentGPU("competition winner", agGPU, iWinner, 0); // The competition winner is different than the current best agent. if (_p.share_fitness) { // check fitness of winner and compare to fitness of best agent float winnerQuality = calc_agent_quality(agGPU, iWinner, d_steps, FINAL_QUALITY_MAX_STEPS); #ifdef VERBOSE printf("quality of %d is %f\n", iWinner, winnerQuality); #endif if (winnerQuality >= iBestQuality){ #ifdef VERBOSE printf("%d is not good enough to become the new best\n", iWinner); #endif *pBest = iBest; // no change because winner has worse quality than current best }else { #ifdef VERBOSE printf("%d is the new best!!! (replacing %d)\n", iWinner, iBest); #endif if (_p.dump_all_new_best) dump_one_agentGPU("new best agent", agGPU, iWinner, 0); *pBest = iWinner; // the winner is better than the current best!! *pBestFitness = winnerQuality; iBestQuality = winnerQuality; // save the information } }else { // calc quality for information purposes if (iWinner != iBest){ calc_agent_quality(agGPU, iWinner, d_steps, FINAL_QUALITY_MAX_STEPS); if (_p.dump_all_new_best) dump_one_agentGPU("new best agent", agGPU, iWinner, 0); } // no fitness check, the winner automatically becomes the best *pBest = iWinner; } } unsigned newBestFlag = (iBest != *pBest); iBest = *pBest; // remember the best agent for next time return newBestFlag; } /* Reduce the results of the competition to determine the winner and record the information. If the winner is not the current best agent then... if _p.share_fitness is false, the competition winner becomes the best agent if _p.share_fitness is true, calculate fitness of winner and if better than current best agent, the winner becomes the new best agent. If the best agent is different, or the _p.always_share flag is set, then only copy best agent over the losers, using probability _p.share_best_pct d_wins is an (_p.agents x _p.agents) array on the device with the results of the round-robin d_agent_scores will be filled in with the net score for each agent d_steps is a temporary working area on device for use by calc_quality Strategy: All agents with a non-negative row score will be preserved agents with row score < zero will be copied from the best agent with probability _p.share_best_pct */ void share_after_competition(unsigned t, AGENT_DATA *agGPU, unsigned *pBest, float *d_wins, float *d_agent_scores, float *d_steps) { // Determine who won the competition #ifdef VERBOSE printf("sharing after competition... \n"); device_dumpf("agent scores from competition", d_wins, _p.agents, _p.agents); #endif // first accumulate the column totals of d_wins times -1 and store it in d_agent_scores col_reduce_x_k(d_wins, d_agent_scores, _p.agents, _p.agents, -1.0f); // next, calculate the row totals, keeping the total in column 0 of d_wins row_reduce(d_wins, _p.agents, _p.agents); // add row totals to the column totals in d_agent_scores vsum(d_agent_scores, d_wins, _p.agents, _p.agents); #ifdef VERBOSE device_dumpf("agent total score", d_agent_scores, 1, _p.agents); #endif float *d_winnerVal; unsigned *d_iWinner; row_argmax(d_agent_scores, _p.agents, 1, &d_winnerVal, &d_iWinner); // d_iWinner now contains the agent that won the competition // Determine if there is a new best agent, and record the best agent (whoever it is) in *pBest float newBestFitness; unsigned newBestFlag = determine_new_best(agGPU, d_iWinner, pBest, &newBestFitness, d_steps); if (newBestFlag) add_to_GPU_result_list(agGPU, *pBest, t, newBestFitness); // if there is a new best agent, or if SHARE_ALWAYS is on, then share the if (newBestFlag || _p.share_always_pct > 0.0f) { printf("%d is the new best agent\n", *pBest); dim3 blockDim(SHARE_BEST_BLOCK_SIZE); dim3 gridDim(1 + (_p.agents-1)/SHARE_BEST_BLOCK_SIZE); PRE_KERNEL2("share_best_kernel", blockDim, gridDim); share_best_kernel<<<gridDim, blockDim>>>(d_agent_scores, 0.0f, *pBest, 1, newBestFlag ? _p.share_best_pct : _p.share_always_pct); POST_KERNEL("share_best_kernel"); #ifdef VERBOSE dump_agentsGPU("after sharing", agGPU, 1); #endif } deviceFree(d_winnerVal); deviceFree(d_iWinner); } unsigned iBest; // will hold the best agent value // run on the GPU, storing results in the RESULTS array provided void run_GPU(AGENT_DATA *agGPU) { // prepare the place to store results of run prepare_GPU_result_list(_p.num_tests / 2, _p.dump_updates); // on entry, device pointers are stored in dc_ag for agent data, and // parameters are stored in dc_p // dump_agentsGPU("run_GPU entry", agGPU); // allocate memory on device to hold results float *d_results = device_allocf(_p.agents * _p.num_tests); // allocate a temporary area on device to hold the steps array for the quality calculation // if doing competition, then only need room for one agent with NUM_TOT_DIV values // if doing competion, must be able to hold the greater of NUM_TOT_DIV and // _p.agents * CRUDE_NUM_TOT_DIV unsigned steps_size = NUM_TOT_DIV; if (!_p.share_compete && (steps_size < (CRUDE_NUM_TOT_DIV * _p.agents))) steps_size = CRUDE_NUM_TOT_DIV * _p.agents; printf("NUM_TOT_DIV is %d, CRUDE_NUM_TOT_DIV is %d, num agents is %d, so size of d_steps is %d\n", NUM_TOT_DIV, CRUDE_NUM_TOT_DIV, _p.agents, steps_size); float *d_steps = device_allocf(steps_size); // allocate memory on device to hold temporary wins and temporary agent scores float *d_wins = device_allocf(_p.agents * _p.agents); float *d_agent_scores = device_allocf(_p.agents); // calculate block and grid sizes for kernels // learning kernel used the value in LEARN_BLOCK_SIZE. // The entire x dimension is the agent number. // The y dimension is not used. dim3 learnBlockDim(LEARN_BLOCK_SIZE); dim3 learnGridDim(1 + (_p.agents-1) / LEARN_BLOCK_SIZE); // test3 runs the competition between agents to determine the best agent. // The thread x value is the test number. The number of test repititions must be // less than or equal ot the TEST3_MAX_BLOCK_SIZE value. // The block x and y values are the agent numbers for the two competing agents. dim3 test3BlockDim(_p.test_reps); if (_p.agents > 65535) printf("***** too many agents for round-robin competition *****"); dim3 test3GridDim(_p.agents, _p.agents); // reset gradient kernel has total number of threads equal to the gradient values dim3 resetGradientBlockDim(512); dim3 resetGradientGridDim(1 + (_p.agents * NUM_WGTS - 1) / 512); if (resetGradientGridDim.x > 65535) { resetGradientGridDim.y = 1 + (resetGradientGridDim.x - 1) / 65535; resetGradientGridDim.x = 1 + (resetGradientGridDim.x - 1) / resetGradientGridDim.y; } // set up timing values CUDA_EVENT_PREPARE; float timeLearn = 0.0f; // learning kernel float timeTest = 0.0f; // competition float timeShare = 0.0f; // all the work for sharing results (except the competition) float timeFitCalc = 0.0f; // testLogTime will hold the time recorded at the start of each test and at the end of the run float *testLogTime = (float *)malloc(_p.num_tests*sizeof(float)); unsigned timerGPU; CREATE_TIMER(&timerGPU); START_TIMER(timerGPU); timing_feedback_header(_p.num_tests); for (int i = 0; i < _p.num_tests; i++) { timing_feedback_dot(i); #ifdef VERBOSE printf("\n**************************** main loop %d ************************\n", i); #endif // do some learning CUDA_EVENT_START; PRE_KERNEL2("learn_kernel", learnBlockDim, learnGridDim); learn_kernel<<<learnGridDim, learnBlockDim>>>(_p.test_interval); POST_KERNEL("learn_kernel"); CUDA_EVENT_STOP(timeLearn); // dump_agentsGPU("after learning session", agGPU); // run tests and sharing // if (0 == ((i+1) % _p.chunks_per_test)) { if (_p.share_compete) { // printf("running competition..."); CUDA_EVENT_START; PRE_KERNEL2("test_kernel3", test3BlockDim, test3GridDim); test_kernel3<<<test3GridDim, test3BlockDim>>>(d_wins); POST_KERNEL("test_kernel3"); CUDA_EVENT_STOP(timeTest); // dump_agentsGPU("after testing, before sharing", agGPU); CUDA_EVENT_START share_after_competition((i+1) * _p.test_interval, agGPU, &iBest, d_wins, d_agent_scores, d_steps); CUDA_EVENT_STOP(timeShare); // dump_agentsGPU("after sharing", agGPU); }else if (_p.share_fitness) { // total x coordinate is the index for the point in state space, // the block's y dimension is the agent number dim3 fitCalcBlockDim(CALC_QUALITY_BLOCK_SIZE); dim3 fitCalcGridDim(1 + (CRUDE_NUM_TOT_DIV-1)/CALC_QUALITY_BLOCK_SIZE, _p.agents); // dump_agentsGPU("prior to calc_all_quality_kernel", agGPU, 1); CUDA_EVENT_START PRE_KERNEL2("calc_all_quality_kernel", fitCalcBlockDim, fitCalcGridDim); calc_all_quality_kernel<<<fitCalcGridDim, fitCalcBlockDim>>>(MAX_STEPS_FOR_QUALITY, d_steps); POST_KERNEL("calc_all_quality_kernel"); CUDA_EVENT_STOP(timeFitCalc); // describe_crude_divs(); // device_dumpf("d_steps", d_steps, _p.agents, CRUDE_NUM_TOT_DIV); CUDA_EVENT_START iBest = calc_all_agents_quality((i+1) * _p.test_interval, agGPU, d_steps); CUDA_EVENT_STOP(timeShare); // dump_agentsGPU("after sharing", agGPU); } } printf("\n"); CUDA_EVENT_CLEANUP; STOP_TIMER(timerGPU, "total GPU time"); PRINT_TIME(timeLearn, "learn time"); PRINT_TIME(timeTest, "test time"); PRINT_TIME(timeShare, "share time"); PRINT_TIME(timeFitCalc, "calc fitness time"); #ifdef DUMP_FINAL_AGENTS dump_agentsGPU("--------------------------------------\n Ending Agent States\n", agGPU, 1); #endif if (_p.dump_best){ dump_one_agentGPU("Best Agent on GPU:", agGPU, last_agent_on_GPU_result_list(), 1); printf("quality based on %d MAX_ITERATIONS and (%d x %d) start states is %8.3f\n", FINAL_QUALITY_MAX_STEPS, NUM_X_DIV, NUM_VEL_DIV, last_fitness_on_GPU_result_list()); } if (d_results) deviceFree(d_results); if (d_wins) deviceFree(d_wins); if (d_agent_scores) deviceFree(d_agent_scores); if (d_steps) deviceFree(d_steps); if (testLogTime) free(testLogTime); }
57905a58eff06d2d4941e7deb76cae80491e017c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #define cudaSafe(ans) { cudaAssert((ans), __FILE__, __LINE__); } #define numBlocks 2048 #define numThreads 64 inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"cudaAssert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void init(BigFloat startX, BigFloat startY, BigFloat resolution, BigFloat *vals) { unsigned int ndx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int yNdx, xNdx; BigFloat *x0; BigFloat *y0; BigFloat x; BigFloat y; BigFloat temp; BigFloat multTemp; while (ndx < WIDTH*HEIGHT) { xNdx = ndx & ((1<<DIM_POWER)-1); yNdx = ndx >> DIM_POWER; x0 = vals + (ndx*2+0); y0 = vals + (ndx*2+1); add(&startX, shiftR(mult(&resolution, init(&temp, xNdx), &x, &multTemp), DIM_POWER), x0); add(&startY, shiftR(mult(&resolution, init(&temp, yNdx), &y, &multTemp), DIM_POWER), y0); ndx += gridDim.x * blockDim.x; } } __global__ void iterate(BigFloat *originals, BigFloat *vals, uint32_t *iters, uint32_t lastLimit, uint32_t limit) { unsigned int ndx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t it; BigFloat x0; BigFloat y0; BigFloat x; BigFloat y; BigFloat xSqr; BigFloat ySqr; BigFloat temp; BigFloat multTemp; while (ndx < WIDTH*HEIGHT) { x0 = originals[ndx*2+0]; y0 = originals[ndx*2+1]; x = vals[ndx*2+0]; y = vals[ndx*2+1]; if (lastLimit) { it = iters[ndx]; if (it < lastLimit) { continue; } } else { it = 0; x = x0; y = y0; } while (it < limit && base2Cmp(add(mult(&x, &x, &xSqr, &multTemp), mult(&y, &y, &ySqr, &multTemp), &temp), 2) != GT) { (void)add(&y0, shiftL(mult(&x, &y, &temp, &multTemp), 1), &y); (void)add(sub(&xSqr, &ySqr, &temp), &x0, &x); it++; } iters[ndx] = it; vals[ndx*2+0] = x; vals[ndx*2+1] = y; ndx += gridDim.x * blockDim.x; } } __global__ void doubleIter(double startX, double startY, double resolution, uint32_t *iters) { unsigned int ndx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int yNdx, xNdx; uint32_t it; double x0, y0, x, y, xSqr, ySqr; while (ndx < WIDTH*HEIGHT) { xNdx = ndx & ((1<<DIM_POWER)-1); yNdx = ndx >> DIM_POWER; it=0; x0 = x = startX + xNdx * resolution / WIDTH; y0 = y = startY + yNdx * resolution / HEIGHT; while ((it < MAX) && ((xSqr = x*x) + (ySqr = y*y) <= 4)) { y = 2*x*y + y0; x = xSqr - ySqr + x0; it++; } iters[ndx] = it; ndx += gridDim.x * blockDim.x; } } void Mandelbrot(data_t x, data_t y, data_t resolution, uint32_t *iters) { BigFloat *originals, *vals; uint32_t *cuda; const int size = WIDTH * HEIGHT * sizeof(uint32_t); cudaSafe(hipMalloc(&cuda, size)); cudaSafe(hipMalloc(&originals, WIDTH*HEIGHT*2*sizeof(BigFloat))); cudaSafe(hipMalloc(&vals, WIDTH*HEIGHT*2*sizeof(BigFloat))); hipDeviceSetLimit(hipLimitStackSize, 8*sizeof(BigFloat) + 1024); hipLaunchKernelGGL(( init), dim3(numBlocks), dim3(numThreads), 0, 0, x, y, resolution, originals); cudaSafe(hipPeekAtLastError()); cudaSafe(hipDeviceSynchronize()); int lastLimit = 0; for (int limit = ITER_STEP; limit <= MAX; limit += ITER_STEP) { std::cout<<limit<<std::endl; hipLaunchKernelGGL(( iterate), dim3(numBlocks), dim3(numThreads), 0, 0, originals, vals, cuda, lastLimit, limit); cudaSafe(hipPeekAtLastError()); cudaSafe(hipDeviceSynchronize()); } cudaSafe(hipMemcpy(iters, cuda, size, hipMemcpyDeviceToHost)); cudaSafe(hipFree(cuda)); cudaSafe(hipFree(originals)); cudaSafe(hipFree(vals)); } void DoubleMandelbrot(double startX, double startY, double resolution, uint32_t *iters) { uint32_t *cuda; const int size = WIDTH * HEIGHT * sizeof(uint32_t); cudaSafe(hipMalloc(&cuda, size)); hipLaunchKernelGGL(( doubleIter), dim3(numBlocks), dim3(numThreads), 0, 0, startX, startY, resolution, cuda); cudaSafe(hipPeekAtLastError()); cudaSafe(hipMemcpy(iters, cuda, size, hipMemcpyDeviceToHost)); cudaSafe(hipFree(cuda)); }
57905a58eff06d2d4941e7deb76cae80491e017c.cu
#include "cuda.h" #include "common.h" #define cudaSafe(ans) { cudaAssert((ans), __FILE__, __LINE__); } #define numBlocks 2048 #define numThreads 64 inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"cudaAssert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void init(BigFloat startX, BigFloat startY, BigFloat resolution, BigFloat *vals) { unsigned int ndx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int yNdx, xNdx; BigFloat *x0; BigFloat *y0; BigFloat x; BigFloat y; BigFloat temp; BigFloat multTemp; while (ndx < WIDTH*HEIGHT) { xNdx = ndx & ((1<<DIM_POWER)-1); yNdx = ndx >> DIM_POWER; x0 = vals + (ndx*2+0); y0 = vals + (ndx*2+1); add(&startX, shiftR(mult(&resolution, init(&temp, xNdx), &x, &multTemp), DIM_POWER), x0); add(&startY, shiftR(mult(&resolution, init(&temp, yNdx), &y, &multTemp), DIM_POWER), y0); ndx += gridDim.x * blockDim.x; } } __global__ void iterate(BigFloat *originals, BigFloat *vals, uint32_t *iters, uint32_t lastLimit, uint32_t limit) { unsigned int ndx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t it; BigFloat x0; BigFloat y0; BigFloat x; BigFloat y; BigFloat xSqr; BigFloat ySqr; BigFloat temp; BigFloat multTemp; while (ndx < WIDTH*HEIGHT) { x0 = originals[ndx*2+0]; y0 = originals[ndx*2+1]; x = vals[ndx*2+0]; y = vals[ndx*2+1]; if (lastLimit) { it = iters[ndx]; if (it < lastLimit) { continue; } } else { it = 0; x = x0; y = y0; } while (it < limit && base2Cmp(add(mult(&x, &x, &xSqr, &multTemp), mult(&y, &y, &ySqr, &multTemp), &temp), 2) != GT) { (void)add(&y0, shiftL(mult(&x, &y, &temp, &multTemp), 1), &y); (void)add(sub(&xSqr, &ySqr, &temp), &x0, &x); it++; } iters[ndx] = it; vals[ndx*2+0] = x; vals[ndx*2+1] = y; ndx += gridDim.x * blockDim.x; } } __global__ void doubleIter(double startX, double startY, double resolution, uint32_t *iters) { unsigned int ndx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int yNdx, xNdx; uint32_t it; double x0, y0, x, y, xSqr, ySqr; while (ndx < WIDTH*HEIGHT) { xNdx = ndx & ((1<<DIM_POWER)-1); yNdx = ndx >> DIM_POWER; it=0; x0 = x = startX + xNdx * resolution / WIDTH; y0 = y = startY + yNdx * resolution / HEIGHT; while ((it < MAX) && ((xSqr = x*x) + (ySqr = y*y) <= 4)) { y = 2*x*y + y0; x = xSqr - ySqr + x0; it++; } iters[ndx] = it; ndx += gridDim.x * blockDim.x; } } void Mandelbrot(data_t x, data_t y, data_t resolution, uint32_t *iters) { BigFloat *originals, *vals; uint32_t *cuda; const int size = WIDTH * HEIGHT * sizeof(uint32_t); cudaSafe(cudaMalloc(&cuda, size)); cudaSafe(cudaMalloc(&originals, WIDTH*HEIGHT*2*sizeof(BigFloat))); cudaSafe(cudaMalloc(&vals, WIDTH*HEIGHT*2*sizeof(BigFloat))); cudaDeviceSetLimit(cudaLimitStackSize, 8*sizeof(BigFloat) + 1024); init<<<numBlocks, numThreads>>>(x, y, resolution, originals); cudaSafe(cudaPeekAtLastError()); cudaSafe(cudaDeviceSynchronize()); int lastLimit = 0; for (int limit = ITER_STEP; limit <= MAX; limit += ITER_STEP) { std::cout<<limit<<std::endl; iterate<<<numBlocks, numThreads>>>(originals, vals, cuda, lastLimit, limit); cudaSafe(cudaPeekAtLastError()); cudaSafe(cudaDeviceSynchronize()); } cudaSafe(cudaMemcpy(iters, cuda, size, cudaMemcpyDeviceToHost)); cudaSafe(cudaFree(cuda)); cudaSafe(cudaFree(originals)); cudaSafe(cudaFree(vals)); } void DoubleMandelbrot(double startX, double startY, double resolution, uint32_t *iters) { uint32_t *cuda; const int size = WIDTH * HEIGHT * sizeof(uint32_t); cudaSafe(cudaMalloc(&cuda, size)); doubleIter<<<numBlocks, numThreads>>>(startX, startY, resolution, cuda); cudaSafe(cudaPeekAtLastError()); cudaSafe(cudaMemcpy(iters, cuda, size, cudaMemcpyDeviceToHost)); cudaSafe(cudaFree(cuda)); }
9a25de18bc8dcaa7855f995f40d836f2fbc6d098.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _EMULTIPLY_KERNEL_H_ #define _EMULTIPLY_KERNEL_H_ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include "config.h" /** * @brief performs element wise division on the given matrix * * @param a the matrix to be divided * @param b the constant t * @param c output container, to hold the result * @param n [description] */ template <class T> __global__ void emultiplyKernel(T * a, T * b, T * c, unsigned int M, unsigned int N){ int row = blockDim.y * blockIdx.y + threadIdx.y; int column = blockDim.x * blockIdx.x + threadIdx.x; if(row < M && column < N){ c[row * N +column] = a[row * N +column] * b[row * N +column]; } } /* Wrapper function for emultiplyKernel n - array size */ template <class T> void emultiply(T * a, T * b, T * c, unsigned int M, unsigned int N, unsigned int threadsPerBlock){ //printf("Inside emultiply kernel launcher\n"); dim3 grid((int) ceil(N/(float)threadsPerBlock), (int) ceil(M/(float)threadsPerBlock), 1); dim3 block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1); // launch kernel hipLaunchKernelGGL(( emultiplyKernel<T>), dim3(grid), dim3(block), 0, 0, a, b, c, M, N); //check if launch was successful hipError_t cudaerr = hipDeviceSynchronize(); if (cudaerr != hipSuccess) printf("emultiply kernel launch failed with error \"%s\".\n", hipGetErrorString(cudaerr)); } template void emultiply<int>(int * a, int * b, int * c, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void emultiply<float>(float * a, float * b, float * c, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void emultiply<double>(double * a, double * b, double * c, unsigned int M, unsigned int N, unsigned int threadsPerBlock); #endif
9a25de18bc8dcaa7855f995f40d836f2fbc6d098.cu
#ifndef _EMULTIPLY_KERNEL_H_ #define _EMULTIPLY_KERNEL_H_ #include <cuda_runtime.h> #include <cuda.h> #include <stdio.h> #include "config.h" /** * @brief performs element wise division on the given matrix * * @param a the matrix to be divided * @param b the constant t * @param c output container, to hold the result * @param n [description] */ template <class T> __global__ void emultiplyKernel(T * a, T * b, T * c, unsigned int M, unsigned int N){ int row = blockDim.y * blockIdx.y + threadIdx.y; int column = blockDim.x * blockIdx.x + threadIdx.x; if(row < M && column < N){ c[row * N +column] = a[row * N +column] * b[row * N +column]; } } /* Wrapper function for emultiplyKernel n - array size */ template <class T> void emultiply(T * a, T * b, T * c, unsigned int M, unsigned int N, unsigned int threadsPerBlock){ //printf("Inside emultiply kernel launcher\n"); dim3 grid((int) ceil(N/(float)threadsPerBlock), (int) ceil(M/(float)threadsPerBlock), 1); dim3 block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1); // launch kernel emultiplyKernel<T><<<grid, block>>>(a, b, c, M, N); //check if launch was successful cudaError_t cudaerr = cudaDeviceSynchronize(); if (cudaerr != CUDA_SUCCESS) printf("emultiply kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); } template void emultiply<int>(int * a, int * b, int * c, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void emultiply<float>(float * a, float * b, float * c, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void emultiply<double>(double * a, double * b, double * c, unsigned int M, unsigned int N, unsigned int threadsPerBlock); #endif
879111acaf1d4134bfd0032767807354387dd280.hip
// !!! This is a file automatically generated by hipify!!! #include <benchmark/benchmark.h> #include <thrust/host_vector.h> #include <thrust/transform.h> #include <thrust/device_vector.h> #include <iostream> #include <hipfft.h> #include <thrust/execution_policy.h> struct op_increase_int { __host__ __device__ bool operator()(int i) { return ++i; } }; static void thrust_transform_int(benchmark::State& state) { int N = state.range(0); thrust::host_vector<int> h_vec(N); // transfer data to the device thrust::device_vector<int> d_vec = h_vec; thrust::device_vector<int> d_out(N); for (auto _ : state) { thrust::transform(d_vec.begin(), d_vec.end(), d_out.begin(), op_increase_int()); } // Save statistics state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * N); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * N * sizeof(int)); state.SetComplexityN(N); } BENCHMARK(thrust_transform_int)->RangeMultiplier(2)->Range(1<<10, 1<<26)->Complexity(); struct op_increase_complex { __host__ __device__ hipfftComplex operator()(hipfftComplex s) { s.x++; s.y++; return s; } }; static void thrust_transform_complex(benchmark::State& state) { int N = state.range(0); thrust::host_vector<hipfftComplex> h_vec(N); // transfer data to the device thrust::device_vector<hipfftComplex> d_vec = h_vec; thrust::device_vector<hipfftComplex> d_out(N); for (auto _ : state) { thrust::transform(d_vec.begin(), d_vec.end(), d_out.begin(), op_increase_complex()); } // Save statistics state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * N); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * N * sizeof(hipfftComplex)); state.SetComplexityN(N); } BENCHMARK(thrust_transform_complex)->RangeMultiplier(2)->Range(1<<10, 1<<26)->Complexity(); BENCHMARK_MAIN() ;
879111acaf1d4134bfd0032767807354387dd280.cu
#include <benchmark/benchmark.h> #include <thrust/host_vector.h> #include <thrust/transform.h> #include <thrust/device_vector.h> #include <iostream> #include <cufft.h> #include <thrust/execution_policy.h> struct op_increase_int { __host__ __device__ bool operator()(int i) { return ++i; } }; static void thrust_transform_int(benchmark::State& state) { int N = state.range(0); thrust::host_vector<int> h_vec(N); // transfer data to the device thrust::device_vector<int> d_vec = h_vec; thrust::device_vector<int> d_out(N); for (auto _ : state) { thrust::transform(d_vec.begin(), d_vec.end(), d_out.begin(), op_increase_int()); } // Save statistics state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * N); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * N * sizeof(int)); state.SetComplexityN(N); } BENCHMARK(thrust_transform_int)->RangeMultiplier(2)->Range(1<<10, 1<<26)->Complexity(); struct op_increase_complex { __host__ __device__ cufftComplex operator()(cufftComplex s) { s.x++; s.y++; return s; } }; static void thrust_transform_complex(benchmark::State& state) { int N = state.range(0); thrust::host_vector<cufftComplex> h_vec(N); // transfer data to the device thrust::device_vector<cufftComplex> d_vec = h_vec; thrust::device_vector<cufftComplex> d_out(N); for (auto _ : state) { thrust::transform(d_vec.begin(), d_vec.end(), d_out.begin(), op_increase_complex()); } // Save statistics state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * N); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * N * sizeof(cufftComplex)); state.SetComplexityN(N); } BENCHMARK(thrust_transform_complex)->RangeMultiplier(2)->Range(1<<10, 1<<26)->Complexity(); BENCHMARK_MAIN() ;
6a2151ef91e16da43a940a9cf8471b77e65cd1a2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include "kernel.h" #include "headers.h" #include "runtime.cuh" #define TK_NUM 2048 //num. of task in each category #define task (TK_NUM*4) #define MAX_PACKETS 100 #define MAX_INDEX 32768 double my_timer() { struct timeval time; double _ret_val_0; gettimeofday(( & time), 0); _ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0)); return _ret_val_0; } void init_matrix(int **A, int **B, int **C, int **D); void init_filter(float **r, float **Vect_Up, float **Vect_F, float **Vect_H, float **H, float **F, float *y, float **Vect_F_host); void init_des(unsigned char **packet_in); int des_main_ks( uint32 *SK, uint8 *key ); int des_set_key( uint32 *esk, uint32 *dsk, uint8 key1[8], uint8 key2[8], uint8 key3[8]); int main(){ int i, j; int *h_A[TK_NUM], *h_B[TK_NUM], *h_C[TK_NUM], *h_D[TK_NUM]; int *d_A[TK_NUM], *d_B[TK_NUM], *d_C[TK_NUM]; int *h_count[TK_NUM]; int *d_count[TK_NUM]; int *h_count_host[TK_NUM]; float *h_task_indx; float *d_task_indx; float *h_r[TK_NUM],*d_r[TK_NUM]; float *y, *h_H[TK_NUM], *d_H[TK_NUM]; float *h_F[TK_NUM], *d_F[TK_NUM]; float *h_Vect_H[TK_NUM], *d_Vect_H[TK_NUM]; // output of the F float *h_Vect_Dn[TK_NUM], *d_Vect_Dn[TK_NUM]; // output of the down sampler float *h_Vect_Up[TK_NUM], *d_Vect_Up[TK_NUM]; // output of the up sampler float *h_Vect_F[TK_NUM], *d_Vect_F[TK_NUM], *h_Vect_F_host[TK_NUM]; // this is the output of the unsigned char *h_packet_in[TK_NUM], *d_packet_in[TK_NUM]; unsigned char *h_packet_out[TK_NUM], *d_packet_out[TK_NUM]; unsigned char *h_packet_host[TK_NUM]; uint32 *h_des_esk; uint32 *h_des_dsk; uint32 *d_des_esk; uint32 *d_des_dsk; double start_timer, end_timer; //matrix mult. for(i = 0; i < TK_NUM; i++){ checkCudaErrors(hipHostMalloc(&h_A[i], MSIZE*sizeof(int), hipHostMallocDefault)); checkCudaErrors(hipHostMalloc(&h_B[i], MSIZE*sizeof(int), hipHostMallocDefault)); checkCudaErrors(hipHostMalloc(&h_C[i], MSIZE*sizeof(int), hipHostMallocDefault)); } for(i = 0; i < TK_NUM; i++){ checkCudaErrors(hipMalloc(&d_A[i], MSIZE*sizeof(int))); checkCudaErrors(hipMalloc(&d_B[i], MSIZE*sizeof(int))); checkCudaErrors(hipMalloc(&d_C[i], MSIZE*sizeof(int))); h_D[i] = (int*)malloc(sizeof(int)*MSIZE); } // mandelbrot h_task_indx = (float*)malloc(TK_NUM * sizeof(float)); checkCudaErrors(hipMalloc(&d_task_indx, TK_NUM *sizeof(float))); for(i = 0; i < TK_NUM; i++) { h_task_indx[i] = (float)(i/(TK_NUM/2.0)); checkCudaErrors(hipHostMalloc(&h_count[i], n * n *sizeof(int), NULL)); checkCudaErrors(hipMalloc(&d_count[i], n * n *sizeof(int))); h_count_host[i] = (int*)malloc(n * n * sizeof(int)); } //filter bank for(i = 0; i < TK_NUM; i++){ checkCudaErrors(hipHostMalloc(&h_r[i], N_sim*sizeof(float), hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_r[i], N_sim*sizeof(float))); checkCudaErrors(hipHostMalloc(&h_H[i], N_col*sizeof(float), hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_H[i], N_col*sizeof(float))); checkCudaErrors(hipHostMalloc(&h_F[i], N_col*sizeof(float), hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_F[i], N_col*sizeof(float))); checkCudaErrors(hipHostMalloc(&h_Vect_H[i], N_sim*sizeof(float), hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_Vect_H[i], N_sim*sizeof(float))); checkCudaErrors(hipHostMalloc(&h_Vect_Dn[i], (N_sim/N_samp)*sizeof(float), hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_Vect_Dn[i], (N_sim/N_samp)*sizeof(float))); checkCudaErrors(hipHostMalloc(&h_Vect_Up[i], N_sim*sizeof(float), hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_Vect_Up[i], N_sim*sizeof(float))); checkCudaErrors(hipHostMalloc(&h_Vect_F[i], N_sim*sizeof(float), hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_Vect_F[i], N_sim*sizeof(float))); h_Vect_F_host[i] = (float*)malloc(N_sim*sizeof(float)); } y = (float*)malloc(N_sim*sizeof(float)); //DES for(i = 0; i < TK_NUM; i++){ checkCudaErrors(hipHostMalloc(&h_packet_in[i], LEN*sizeof(unsigned char), hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_packet_in[i], LEN*sizeof(unsigned char))); checkCudaErrors(hipHostMalloc(&h_packet_out[i], LEN*sizeof(unsigned char), hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_packet_out[i], LEN*sizeof(unsigned char))); h_packet_host[i] = (unsigned char *) malloc (LEN*sizeof(unsigned char)); } checkCudaErrors(hipHostMalloc(&h_des_esk, 96*sizeof(uint32), hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_des_esk, 96*sizeof(uint32))); checkCudaErrors(hipHostMalloc(&h_des_dsk, 96*sizeof(uint32), hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_des_dsk, 96*sizeof(uint32))); /*Generate encryption key*/ des_set_key(h_des_esk, h_des_dsk, DES3_keys[0], DES3_keys[1], DES3_keys[2]); //Init.matrix init_matrix(h_A, h_B, h_C, h_D); //Init filter init_filter(h_r, h_Vect_Up, h_Vect_F, h_Vect_H, h_H, h_F, y, h_Vect_F_host); //Init DES init_des(h_packet_in); //Init runtime runtime_init(); double timer = 0.0; start_timer = my_timer(); //mem copy for(i = 0; i < TK_NUM; i++){ checkCudaErrors(hipMemcpyAsync(d_A[i], h_A[i], MSIZE*sizeof(int), hipMemcpyHostToDevice, runtime_stream)); checkCudaErrors(hipMemcpyAsync(d_B[i], h_B[i], MSIZE*sizeof(int), hipMemcpyHostToDevice, runtime_stream)); checkCudaErrors(hipMemcpyAsync(d_r[i], h_r[i], N_sim*sizeof(float), hipMemcpyHostToDevice, runtime_stream)); checkCudaErrors(hipMemcpyAsync(d_Vect_Up[i], h_Vect_Up[i], N_sim*sizeof(float), hipMemcpyHostToDevice, runtime_stream)); checkCudaErrors(hipMemcpyAsync(d_Vect_F[i], h_Vect_F[i], N_sim*sizeof(float), hipMemcpyHostToDevice, runtime_stream)); checkCudaErrors(hipMemcpyAsync(d_Vect_H[i], h_Vect_H[i], N_sim*sizeof(float), hipMemcpyHostToDevice, runtime_stream)); checkCudaErrors(hipMemcpyAsync(d_H[i], h_H[i], N_col*sizeof(float), hipMemcpyHostToDevice, runtime_stream)); checkCudaErrors(hipMemcpyAsync(d_F[i], h_F[i], N_col*sizeof(float), hipMemcpyHostToDevice, runtime_stream)); checkCudaErrors(hipMemcpyAsync(d_packet_in[i], h_packet_in[i], LEN*sizeof(unsigned char), hipMemcpyHostToDevice, runtime_stream)); } checkCudaErrors(hipMemcpyAsync(d_task_indx, h_task_indx, TK_NUM*sizeof(float), hipMemcpyHostToDevice, runtime_stream)); checkCudaErrors(hipMemcpyAsync(d_des_esk, h_des_esk, 96*sizeof(uint32), hipMemcpyHostToDevice, runtime_stream)); checkCudaErrors(hipMemcpyAsync(d_des_dsk, h_des_dsk, 96*sizeof(uint32), hipMemcpyHostToDevice, runtime_stream)); checkCudaErrors(hipStreamSynchronize(runtime_stream)); end_timer = my_timer(); timer = end_timer - start_timer; printf("Pagoda MultiWrok: #task:%d, #thread:%d\n", task, TDD_NUM); // compute int mult_c, mand_c, filter_c, des_c; mult_c = 0, mand_c = 0, filter_c = 0, des_c = 0; start_timer = my_timer(); for(i = 0; i < task; i++){ switch(i%4){ case 0: taskLaunch(9, INT, TDD_NUM, INT, 1, INT, 0, INT, 0, INT, 0, INT, d_A[mult_c], INT, d_B[mult_c], INT, d_C[mult_c], INT, MROW); mult_c ++; break; case 1: taskLaunch(7, INT, TDD_NUM, INT, 1, INT, 0, INT, 0, INT, 1, INT, d_count[mand_c], INT, &d_task_indx[mand_c]); mand_c ++; break; case 2: taskLaunch(12, INT, TDD_NUM, INT, 1, INT, 0, INT, 1, INT, 2, FLOAT, d_r[filter_c], FLOAT, d_H[filter_c], FLOAT, d_Vect_H[filter_c], FLOAT, d_Vect_Dn[filter_c], FLOAT, d_Vect_Up[filter_c], FLOAT, d_Vect_F[filter_c], FLOAT, d_F[filter_c]); filter_c ++; break; case 3: taskLaunch(10, INT, TDD_NUM, INT, 1, INT, 0, INT, 0, INT, 3, INT32, d_des_esk, INT32, d_des_esk, CHAR, d_packet_in[des_c], CHAR, d_packet_out[des_c], INT, LEN/8); des_c ++; break; } } waitAll(task); end_timer = my_timer(); printf("GPU elapsed time:%lf Sec.\n", end_timer - start_timer); start_timer = my_timer(); // memory copy back for(i = 0; i < TK_NUM; i++){ checkCudaErrors(hipMemcpyAsync(h_C[i],d_C[i], MSIZE*sizeof(int), hipMemcpyDeviceToHost, runtime_stream)); checkCudaErrors(hipMemcpyAsync(h_count[i], d_count[i], n * n*sizeof(int), hipMemcpyDeviceToHost, runtime_stream)); checkCudaErrors(hipMemcpyAsync(h_Vect_F[i], d_Vect_F[i], N_sim*sizeof(float), hipMemcpyDeviceToHost, runtime_stream)); checkCudaErrors(hipMemcpyAsync(h_packet_out[i], d_packet_out[i], LEN*sizeof(unsigned char), hipMemcpyDeviceToHost, runtime_stream)); } checkCudaErrors(hipStreamSynchronize(runtime_stream)); end_timer = my_timer(); timer += (end_timer - start_timer); printf("Mem. copy time:%lf Sec.\n", timer); runtime_destroy(); runtime_free(); mult_c = 0, mand_c = 0, filter_c = 0, des_c = 0; start_timer = my_timer(); // cpu compute for(i = 0; i < task; i++){ switch(i%4){ case 0: mult(h_A[mult_c], h_B[mult_c], h_D[mult_c], MROW); mult_c ++; break; case 1: h_get_pixel(h_count_host[mand_c], h_task_indx[mand_c]); mand_c++; break; case 2: h_FBCore(h_r[filter_c], h_H[filter_c], h_Vect_H[filter_c], h_Vect_Dn[filter_c], h_Vect_Up[filter_c], h_Vect_F_host[filter_c], h_F[filter_c]); filter_c ++; break; case 3: des_encrypt(h_des_esk, h_des_dsk, h_packet_in[des_c], h_packet_host[des_c], LEN/8); des_c ++; break; } } end_timer = my_timer(); printf("CPU elapsed time:%lf Sec.\n", end_timer - start_timer); //verificiation printf("Verifying\n"); long long flag = 0; for(i = 0; i < TK_NUM; i++){ for(j = 0; j < MSIZE; j++){ if(h_C[i][j] != h_D[i][j]){ printf("Mult, Error:%d, %d\n", h_C[i][j], h_D[i][j]); break; } flag ++; } for(j = 0; j < N_sim; j++){ if(abs(h_Vect_F[i][j]- h_Vect_F_host[i][j]) > 0.1){ printf("Filter Error:%f, %f\n", h_Vect_F[i][j], h_Vect_F_host[i][j], i, j); break; } flag ++; } for(j = 0; j < LEN; j++){ if(h_packet_out[i][j] != h_packet_host[i][j]){ printf("DES Error:%u, %u, %d, %d\n", h_packet_out[i][j], h_packet_host[i][j], i, j); break; } flag ++; } } if(flag == (TK_NUM * MSIZE + TK_NUM * N_sim + TK_NUM * LEN)) printf("Verifying Successfully\n"); //free mem. for(i = 0; i < TK_NUM; i++){ checkCudaErrors(hipHostFree(h_A[i])); checkCudaErrors(hipFree(d_A[i])); checkCudaErrors(hipHostFree(h_B[i])); checkCudaErrors(hipFree(d_B[i])); checkCudaErrors(hipHostFree(h_C[i])); checkCudaErrors(hipFree(d_C[i])); checkCudaErrors(hipHostFree(h_count[i])); checkCudaErrors(hipFree(d_count[i])); checkCudaErrors(hipHostFree(h_r[i])); checkCudaErrors(hipFree(d_r[i])); checkCudaErrors(hipHostFree(h_H[i])); checkCudaErrors(hipFree(d_H[i])); checkCudaErrors(hipHostFree(h_F[i])); checkCudaErrors(hipFree(d_F[i])); checkCudaErrors(hipHostFree(h_Vect_H[i])); checkCudaErrors(hipFree(d_Vect_H[i])); checkCudaErrors(hipHostFree(h_Vect_Dn[i])); checkCudaErrors(hipFree(d_Vect_Dn[i])); checkCudaErrors(hipHostFree(h_Vect_Up[i])); checkCudaErrors(hipFree(d_Vect_Up[i])); checkCudaErrors(hipHostFree(h_Vect_F[i])); checkCudaErrors(hipFree(d_Vect_F[i])); checkCudaErrors(hipHostFree(h_packet_in[i])); checkCudaErrors(hipFree(d_packet_in[i])); checkCudaErrors(hipHostFree(h_packet_out[i])); checkCudaErrors(hipFree(d_packet_out[i])); free(h_packet_host[i]); free(h_count_host[i]); free(h_Vect_F_host[i]); } checkCudaErrors(hipFree(d_task_indx)); checkCudaErrors(hipHostFree(h_des_esk)); checkCudaErrors(hipFree(d_des_esk)); checkCudaErrors(hipHostFree(h_des_dsk)); checkCudaErrors(hipFree(d_des_dsk)); free(h_task_indx); free(y); if(hipDeviceReset()== hipSuccess) printf("Reset successful\n"); return 0; } void init_matrix(int **A, int **B, int **C, int **D){ int i, j; for(i = 0; i < TK_NUM; i++){ for(j = 0; j < MSIZE; j++){ A[i][j] = (i%MROW)+1; B[i][j] = (i%MCOL)+1; C[i][j] = 0; D[i][j] = 0; } } } void init_filter(float **r, float **Vect_Up, float **Vect_F, float **Vect_H, float **H, float **F, float *y, float **Vect_F_host){ int i, j; for(i = 0; i < TK_NUM; i++) for(j = 0; j < N_sim; j++){ r[i][j] = j + 0.0001; y[j] = 0; Vect_Up[i][j] = 0; Vect_F[i][j] = 0; Vect_H[i][j]=0; Vect_F_host[i][j] = 0; } for(i = 0; i < TK_NUM; i++) for(j = 0; j < N_col; j++){ H[i][j] = 0.0001; F[i][j] = 0.0001; } } void init_des(unsigned char **packet_in){ int i, j; for(i = 0; i < TK_NUM; i++){ for(j = 0; j < LEN; j++){ if(j < HEADER_SIZE ){ packet_in[i][j] = headers[i % MAX_PACKETS][j]; }else{ packet_in[i][j] = DES3_init[j%8]; } } } }
6a2151ef91e16da43a940a9cf8471b77e65cd1a2.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include "kernel.h" #include "headers.h" #include "runtime.cuh" #define TK_NUM 2048 //num. of task in each category #define task (TK_NUM*4) #define MAX_PACKETS 100 #define MAX_INDEX 32768 double my_timer() { struct timeval time; double _ret_val_0; gettimeofday(( & time), 0); _ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0)); return _ret_val_0; } void init_matrix(int **A, int **B, int **C, int **D); void init_filter(float **r, float **Vect_Up, float **Vect_F, float **Vect_H, float **H, float **F, float *y, float **Vect_F_host); void init_des(unsigned char **packet_in); int des_main_ks( uint32 *SK, uint8 *key ); int des_set_key( uint32 *esk, uint32 *dsk, uint8 key1[8], uint8 key2[8], uint8 key3[8]); int main(){ int i, j; int *h_A[TK_NUM], *h_B[TK_NUM], *h_C[TK_NUM], *h_D[TK_NUM]; int *d_A[TK_NUM], *d_B[TK_NUM], *d_C[TK_NUM]; int *h_count[TK_NUM]; int *d_count[TK_NUM]; int *h_count_host[TK_NUM]; float *h_task_indx; float *d_task_indx; float *h_r[TK_NUM],*d_r[TK_NUM]; float *y, *h_H[TK_NUM], *d_H[TK_NUM]; float *h_F[TK_NUM], *d_F[TK_NUM]; float *h_Vect_H[TK_NUM], *d_Vect_H[TK_NUM]; // output of the F float *h_Vect_Dn[TK_NUM], *d_Vect_Dn[TK_NUM]; // output of the down sampler float *h_Vect_Up[TK_NUM], *d_Vect_Up[TK_NUM]; // output of the up sampler float *h_Vect_F[TK_NUM], *d_Vect_F[TK_NUM], *h_Vect_F_host[TK_NUM]; // this is the output of the unsigned char *h_packet_in[TK_NUM], *d_packet_in[TK_NUM]; unsigned char *h_packet_out[TK_NUM], *d_packet_out[TK_NUM]; unsigned char *h_packet_host[TK_NUM]; uint32 *h_des_esk; uint32 *h_des_dsk; uint32 *d_des_esk; uint32 *d_des_dsk; double start_timer, end_timer; //matrix mult. for(i = 0; i < TK_NUM; i++){ checkCudaErrors(cudaHostAlloc(&h_A[i], MSIZE*sizeof(int), cudaHostAllocDefault)); checkCudaErrors(cudaHostAlloc(&h_B[i], MSIZE*sizeof(int), cudaHostAllocDefault)); checkCudaErrors(cudaHostAlloc(&h_C[i], MSIZE*sizeof(int), cudaHostAllocDefault)); } for(i = 0; i < TK_NUM; i++){ checkCudaErrors(cudaMalloc(&d_A[i], MSIZE*sizeof(int))); checkCudaErrors(cudaMalloc(&d_B[i], MSIZE*sizeof(int))); checkCudaErrors(cudaMalloc(&d_C[i], MSIZE*sizeof(int))); h_D[i] = (int*)malloc(sizeof(int)*MSIZE); } // mandelbrot h_task_indx = (float*)malloc(TK_NUM * sizeof(float)); checkCudaErrors(cudaMalloc(&d_task_indx, TK_NUM *sizeof(float))); for(i = 0; i < TK_NUM; i++) { h_task_indx[i] = (float)(i/(TK_NUM/2.0)); checkCudaErrors(cudaHostAlloc(&h_count[i], n * n *sizeof(int), NULL)); checkCudaErrors(cudaMalloc(&d_count[i], n * n *sizeof(int))); h_count_host[i] = (int*)malloc(n * n * sizeof(int)); } //filter bank for(i = 0; i < TK_NUM; i++){ checkCudaErrors(cudaHostAlloc(&h_r[i], N_sim*sizeof(float), cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_r[i], N_sim*sizeof(float))); checkCudaErrors(cudaHostAlloc(&h_H[i], N_col*sizeof(float), cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_H[i], N_col*sizeof(float))); checkCudaErrors(cudaHostAlloc(&h_F[i], N_col*sizeof(float), cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_F[i], N_col*sizeof(float))); checkCudaErrors(cudaHostAlloc(&h_Vect_H[i], N_sim*sizeof(float), cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_Vect_H[i], N_sim*sizeof(float))); checkCudaErrors(cudaHostAlloc(&h_Vect_Dn[i], (N_sim/N_samp)*sizeof(float), cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_Vect_Dn[i], (N_sim/N_samp)*sizeof(float))); checkCudaErrors(cudaHostAlloc(&h_Vect_Up[i], N_sim*sizeof(float), cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_Vect_Up[i], N_sim*sizeof(float))); checkCudaErrors(cudaHostAlloc(&h_Vect_F[i], N_sim*sizeof(float), cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_Vect_F[i], N_sim*sizeof(float))); h_Vect_F_host[i] = (float*)malloc(N_sim*sizeof(float)); } y = (float*)malloc(N_sim*sizeof(float)); //DES for(i = 0; i < TK_NUM; i++){ checkCudaErrors(cudaHostAlloc(&h_packet_in[i], LEN*sizeof(unsigned char), cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_packet_in[i], LEN*sizeof(unsigned char))); checkCudaErrors(cudaHostAlloc(&h_packet_out[i], LEN*sizeof(unsigned char), cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_packet_out[i], LEN*sizeof(unsigned char))); h_packet_host[i] = (unsigned char *) malloc (LEN*sizeof(unsigned char)); } checkCudaErrors(cudaHostAlloc(&h_des_esk, 96*sizeof(uint32), cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_des_esk, 96*sizeof(uint32))); checkCudaErrors(cudaHostAlloc(&h_des_dsk, 96*sizeof(uint32), cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_des_dsk, 96*sizeof(uint32))); /*Generate encryption key*/ des_set_key(h_des_esk, h_des_dsk, DES3_keys[0], DES3_keys[1], DES3_keys[2]); //Init.matrix init_matrix(h_A, h_B, h_C, h_D); //Init filter init_filter(h_r, h_Vect_Up, h_Vect_F, h_Vect_H, h_H, h_F, y, h_Vect_F_host); //Init DES init_des(h_packet_in); //Init runtime runtime_init(); double timer = 0.0; start_timer = my_timer(); //mem copy for(i = 0; i < TK_NUM; i++){ checkCudaErrors(cudaMemcpyAsync(d_A[i], h_A[i], MSIZE*sizeof(int), cudaMemcpyHostToDevice, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(d_B[i], h_B[i], MSIZE*sizeof(int), cudaMemcpyHostToDevice, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(d_r[i], h_r[i], N_sim*sizeof(float), cudaMemcpyHostToDevice, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(d_Vect_Up[i], h_Vect_Up[i], N_sim*sizeof(float), cudaMemcpyHostToDevice, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(d_Vect_F[i], h_Vect_F[i], N_sim*sizeof(float), cudaMemcpyHostToDevice, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(d_Vect_H[i], h_Vect_H[i], N_sim*sizeof(float), cudaMemcpyHostToDevice, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(d_H[i], h_H[i], N_col*sizeof(float), cudaMemcpyHostToDevice, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(d_F[i], h_F[i], N_col*sizeof(float), cudaMemcpyHostToDevice, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(d_packet_in[i], h_packet_in[i], LEN*sizeof(unsigned char), cudaMemcpyHostToDevice, runtime_stream)); } checkCudaErrors(cudaMemcpyAsync(d_task_indx, h_task_indx, TK_NUM*sizeof(float), cudaMemcpyHostToDevice, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(d_des_esk, h_des_esk, 96*sizeof(uint32), cudaMemcpyHostToDevice, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(d_des_dsk, h_des_dsk, 96*sizeof(uint32), cudaMemcpyHostToDevice, runtime_stream)); checkCudaErrors(cudaStreamSynchronize(runtime_stream)); end_timer = my_timer(); timer = end_timer - start_timer; printf("Pagoda MultiWrok: #task:%d, #thread:%d\n", task, TDD_NUM); // compute int mult_c, mand_c, filter_c, des_c; mult_c = 0, mand_c = 0, filter_c = 0, des_c = 0; start_timer = my_timer(); for(i = 0; i < task; i++){ switch(i%4){ case 0: taskLaunch(9, INT, TDD_NUM, INT, 1, INT, 0, INT, 0, INT, 0, INT, d_A[mult_c], INT, d_B[mult_c], INT, d_C[mult_c], INT, MROW); mult_c ++; break; case 1: taskLaunch(7, INT, TDD_NUM, INT, 1, INT, 0, INT, 0, INT, 1, INT, d_count[mand_c], INT, &d_task_indx[mand_c]); mand_c ++; break; case 2: taskLaunch(12, INT, TDD_NUM, INT, 1, INT, 0, INT, 1, INT, 2, FLOAT, d_r[filter_c], FLOAT, d_H[filter_c], FLOAT, d_Vect_H[filter_c], FLOAT, d_Vect_Dn[filter_c], FLOAT, d_Vect_Up[filter_c], FLOAT, d_Vect_F[filter_c], FLOAT, d_F[filter_c]); filter_c ++; break; case 3: taskLaunch(10, INT, TDD_NUM, INT, 1, INT, 0, INT, 0, INT, 3, INT32, d_des_esk, INT32, d_des_esk, CHAR, d_packet_in[des_c], CHAR, d_packet_out[des_c], INT, LEN/8); des_c ++; break; } } waitAll(task); end_timer = my_timer(); printf("GPU elapsed time:%lf Sec.\n", end_timer - start_timer); start_timer = my_timer(); // memory copy back for(i = 0; i < TK_NUM; i++){ checkCudaErrors(cudaMemcpyAsync(h_C[i],d_C[i], MSIZE*sizeof(int), cudaMemcpyDeviceToHost, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(h_count[i], d_count[i], n * n*sizeof(int), cudaMemcpyDeviceToHost, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(h_Vect_F[i], d_Vect_F[i], N_sim*sizeof(float), cudaMemcpyDeviceToHost, runtime_stream)); checkCudaErrors(cudaMemcpyAsync(h_packet_out[i], d_packet_out[i], LEN*sizeof(unsigned char), cudaMemcpyDeviceToHost, runtime_stream)); } checkCudaErrors(cudaStreamSynchronize(runtime_stream)); end_timer = my_timer(); timer += (end_timer - start_timer); printf("Mem. copy time:%lf Sec.\n", timer); runtime_destroy(); runtime_free(); mult_c = 0, mand_c = 0, filter_c = 0, des_c = 0; start_timer = my_timer(); // cpu compute for(i = 0; i < task; i++){ switch(i%4){ case 0: mult(h_A[mult_c], h_B[mult_c], h_D[mult_c], MROW); mult_c ++; break; case 1: h_get_pixel(h_count_host[mand_c], h_task_indx[mand_c]); mand_c++; break; case 2: h_FBCore(h_r[filter_c], h_H[filter_c], h_Vect_H[filter_c], h_Vect_Dn[filter_c], h_Vect_Up[filter_c], h_Vect_F_host[filter_c], h_F[filter_c]); filter_c ++; break; case 3: des_encrypt(h_des_esk, h_des_dsk, h_packet_in[des_c], h_packet_host[des_c], LEN/8); des_c ++; break; } } end_timer = my_timer(); printf("CPU elapsed time:%lf Sec.\n", end_timer - start_timer); //verificiation printf("Verifying\n"); long long flag = 0; for(i = 0; i < TK_NUM; i++){ for(j = 0; j < MSIZE; j++){ if(h_C[i][j] != h_D[i][j]){ printf("Mult, Error:%d, %d\n", h_C[i][j], h_D[i][j]); break; } flag ++; } for(j = 0; j < N_sim; j++){ if(abs(h_Vect_F[i][j]- h_Vect_F_host[i][j]) > 0.1){ printf("Filter Error:%f, %f\n", h_Vect_F[i][j], h_Vect_F_host[i][j], i, j); break; } flag ++; } for(j = 0; j < LEN; j++){ if(h_packet_out[i][j] != h_packet_host[i][j]){ printf("DES Error:%u, %u, %d, %d\n", h_packet_out[i][j], h_packet_host[i][j], i, j); break; } flag ++; } } if(flag == (TK_NUM * MSIZE + TK_NUM * N_sim + TK_NUM * LEN)) printf("Verifying Successfully\n"); //free mem. for(i = 0; i < TK_NUM; i++){ checkCudaErrors(cudaFreeHost(h_A[i])); checkCudaErrors(cudaFree(d_A[i])); checkCudaErrors(cudaFreeHost(h_B[i])); checkCudaErrors(cudaFree(d_B[i])); checkCudaErrors(cudaFreeHost(h_C[i])); checkCudaErrors(cudaFree(d_C[i])); checkCudaErrors(cudaFreeHost(h_count[i])); checkCudaErrors(cudaFree(d_count[i])); checkCudaErrors(cudaFreeHost(h_r[i])); checkCudaErrors(cudaFree(d_r[i])); checkCudaErrors(cudaFreeHost(h_H[i])); checkCudaErrors(cudaFree(d_H[i])); checkCudaErrors(cudaFreeHost(h_F[i])); checkCudaErrors(cudaFree(d_F[i])); checkCudaErrors(cudaFreeHost(h_Vect_H[i])); checkCudaErrors(cudaFree(d_Vect_H[i])); checkCudaErrors(cudaFreeHost(h_Vect_Dn[i])); checkCudaErrors(cudaFree(d_Vect_Dn[i])); checkCudaErrors(cudaFreeHost(h_Vect_Up[i])); checkCudaErrors(cudaFree(d_Vect_Up[i])); checkCudaErrors(cudaFreeHost(h_Vect_F[i])); checkCudaErrors(cudaFree(d_Vect_F[i])); checkCudaErrors(cudaFreeHost(h_packet_in[i])); checkCudaErrors(cudaFree(d_packet_in[i])); checkCudaErrors(cudaFreeHost(h_packet_out[i])); checkCudaErrors(cudaFree(d_packet_out[i])); free(h_packet_host[i]); free(h_count_host[i]); free(h_Vect_F_host[i]); } checkCudaErrors(cudaFree(d_task_indx)); checkCudaErrors(cudaFreeHost(h_des_esk)); checkCudaErrors(cudaFree(d_des_esk)); checkCudaErrors(cudaFreeHost(h_des_dsk)); checkCudaErrors(cudaFree(d_des_dsk)); free(h_task_indx); free(y); if(cudaDeviceReset()== cudaSuccess) printf("Reset successful\n"); return 0; } void init_matrix(int **A, int **B, int **C, int **D){ int i, j; for(i = 0; i < TK_NUM; i++){ for(j = 0; j < MSIZE; j++){ A[i][j] = (i%MROW)+1; B[i][j] = (i%MCOL)+1; C[i][j] = 0; D[i][j] = 0; } } } void init_filter(float **r, float **Vect_Up, float **Vect_F, float **Vect_H, float **H, float **F, float *y, float **Vect_F_host){ int i, j; for(i = 0; i < TK_NUM; i++) for(j = 0; j < N_sim; j++){ r[i][j] = j + 0.0001; y[j] = 0; Vect_Up[i][j] = 0; Vect_F[i][j] = 0; Vect_H[i][j]=0; Vect_F_host[i][j] = 0; } for(i = 0; i < TK_NUM; i++) for(j = 0; j < N_col; j++){ H[i][j] = 0.0001; F[i][j] = 0.0001; } } void init_des(unsigned char **packet_in){ int i, j; for(i = 0; i < TK_NUM; i++){ for(j = 0; j < LEN; j++){ if(j < HEADER_SIZE ){ packet_in[i][j] = headers[i % MAX_PACKETS][j]; }else{ packet_in[i][j] = DES3_init[j%8]; } } } }
7408792e756baf1f34b158f3aa927559f43a6eff.hip
// !!! This is a file automatically generated by hipify!!! #include <float.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <hip/hip_runtime.h> #include "gpu_util.h" #define MIN(X, Y) (((X) < (Y)) ? (X) : (Y)) #define MAX(X, Y) (((X) > (Y)) ? (X) : (Y)) #ifndef EPS # define EPS 1.e-6 #endif /* gpu parameters */ //#define GRID_SIZE 16 //#define BLOCK_SIZE 256 //#define DEBUG #ifdef DEBUG #define DPRINTF(fmt, args...) \ do { \ printf("%s, line %u: " fmt "\r\n", __FUNCTION__, __LINE__ , ##args); \ fflush(stdout); \ } while (0) #else #define DPRINTF(fmt, args...) do{}while(0) #endif #if __CUDA_ARCH__ < 600 __device__ double doubleAtomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __device__ int get_global_tid() { return (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x; } double squared_distance(double* ps, double* center, int dim) { double sum = 0; for (int i = 0; i < dim; i++){ double temp = center[i] - ps[i]; sum += temp * temp; } return sum; } __device__ double squared_distance_on_gpu(double* ps, double* center, int dim) { double sum = 0; for (int i = 0; i < dim; i++){ double temp = center[i] - ps[i]; sum += temp * temp; } return sum; } double** create_2D_double_array_on_gpu(int n, int dim) { double **arr; arr = (double **)gpu_alloc(n * sizeof(double*)); for (int i = 0 ; i < n; i++) arr[i] = (double *)gpu_alloc( dim * sizeof(double)); if (arr == NULL ) { fprintf(stderr, "Error in allocation!\n"); exit(-1); } return arr; } double** create_2D_double_array(int n, int dim) { double **arr, *temp; temp = (double *)calloc(n * dim, sizeof(double)); arr = (double **)calloc(n, sizeof(double *)); for (int i = 0 ; i < n; i++) arr[i] = temp + i * dim; if (arr == NULL || temp == NULL) { fprintf(stderr, "Error in allocation!\n"); exit(-1); } return arr; } void delete_points(double** ps){ free(ps); ps = NULL; } double** init_centers_kpp(double **ps, int n, int k, int dim){ int i; int curr_k = 0; int first_i; int max, max_i; double *distances_from_centers, *temp_distances; distances_from_centers = (double*) malloc(sizeof(double)*n); double **centers = create_2D_double_array(k,dim); temp_distances = (double*) malloc(sizeof(double)*n); // Initialize with max double for (i = 0; i < n; i++) distances_from_centers[i] = DBL_MAX; srand(time(NULL)); // Choose a first point first_i = rand() % n; DPRINTF("First random index: %d", first_i); memcpy(centers[curr_k], ps[first_i], dim * sizeof(double)); DPRINTF("Point 1: (%f, %f)", ps[first_i][0], ps[first_i][1]); DPRINTF("Center 1: (%f, %f)", centers[curr_k][0], centers[curr_k][1]); while(curr_k < k-1) { max = -1; max_i = -1; for(i=0; i<n; i++){ DPRINTF("New squared_distance: %f and old min squared_distance: %f", squared_distance(ps[i], centers[curr_k], dim), distances_from_centers[i]); temp_distances[i] = MIN(squared_distance(ps[i], centers[curr_k], dim), distances_from_centers[i]); if(temp_distances[i] > max){ max = temp_distances[i]; max_i = i; } } memcpy(distances_from_centers, temp_distances, n * sizeof(double)); memcpy(centers[++curr_k], ps[max_i], dim * sizeof(double)); } free(temp_distances); free(distances_from_centers); return centers; } int find_cluster_on_cpu(double* ps, double** centers, int n, int k, int dim) { int cluster = 0; double dist, min = squared_distance(ps, centers[0], dim); for (int i = 1; i < k; i++){ dist = squared_distance(ps, centers[i], dim); if (min > dist){ min = dist; cluster = i; } } return cluster; } __global__ void find_cluster_on_gpu(double *dev_points, double *dev_centers, int n, int k, int dim, int *result_clusters) { double min, dist; int cluster_it_belongs; int index = get_global_tid(); int start = index*dim; int end = start + dim; if (index < n){ for (int i = start; i < end; i+=dim){ min = DBL_MAX; for (int j = 0; j < k; j++){ dist = squared_distance_on_gpu(&dev_points[i], &dev_centers[j*dim], dim); if (min > dist){ min = dist; cluster_it_belongs = j; } } result_clusters[index] = cluster_it_belongs; } } } __global__ void count_points_in_clusters_on_gpu(double* dev_points, // Device point data int* dev_points_clusters, // Device point -> cluster int n, int k, int dim, double* dev_centers, // Device center data int* dev_points_in_cluster) { int i, j; int index = get_global_tid(); int start = index; int end = start + 1; // Clear dev_centers in order to save the new_centers there if (index < k){ dev_points_in_cluster[index] = 0; for(j=0; j<dim; j++){ dev_centers[index*dim + j] = 0; } } __syncthreads(); if (index < n){ for (i = start; i < end; i++) { atomicAdd(&dev_points_in_cluster[dev_points_clusters[i]], 1); for (j = 0; j < dim; j++) { doubleAtomicAdd(&(dev_centers[dev_points_clusters[i]*dim + j]), dev_points[i*dim + j]); } } } } __global__ void update_center_on_gpu(int n, int k, int dim, double* dev_centers, int* dev_points_in_cluster){ int i, j; int index = get_global_tid(); int start = index; int end = start + 1; if (index < k){ for (i = start; i < end; i++) { if (dev_points_in_cluster[i]) { for (j = 0; j < dim; j++){ dev_centers[i*dim + j] /= dev_points_in_cluster[i]; } } } } } int main(int argc, char *argv[]) { int n, k, i, j; int dim = 2; double **points; int BLOCK_SIZE = 256; //Default if (argc > 1) BLOCK_SIZE = atoi(argv[1]); //The second input argument should be the dataset filename if (argc > 2) { FILE *in; in = fopen(argv[2], "r"); //Parse file fscanf(in, "%d %d \n", &n ,&k); points = create_2D_double_array(n, dim); for (i =0; i<n; i++) { for (j=0; j<dim; j++) { fscanf(in, "%lf", &points[i][j]); } } fclose(in); //Otherwise parse stdin //PS: For large datasets this doesn't work at all } else { // read input scanf("%d %d", &n, &k); points = create_2D_double_array(n, dim); for (i = 0; i < n; i++) { for (j = 0; j < dim; j++) scanf("%lf", &points[i][j]); } } printf("Input Read successfully \n"); // Calculate grid and block sizes int grid_size = (n+BLOCK_SIZE-1)/BLOCK_SIZE; dim3 gpu_grid(grid_size, 1); dim3 gpu_block(BLOCK_SIZE, 1); printf("Grid size : %dx%d\n", gpu_grid.x, gpu_grid.y); printf("Block size: %dx%d\n", gpu_block.x, gpu_block.y); // printf("Shared memory size: %ld bytes\n", shmem_size); clock_t start = clock(); double **centers; printf("Initializing Centers...\n"); centers = init_centers_kpp(points, n, k, dim); printf("Initializing Centers done\n"); // start algorithm double check = 1; double eps = 1.0E-6; int *points_clusters; double **new_centers; new_centers = create_2D_double_array(k, dim); points_clusters = (int *)calloc(n, sizeof(int)); // GPU allocations double *dev_centers, *dev_points; int *dev_points_clusters; int *dev_points_in_cluster; dev_centers = (double *) gpu_alloc(k*dim*sizeof(double)); dev_points = (double *) gpu_alloc(n*dim*sizeof(double)); dev_points_in_cluster = (int *) gpu_alloc(k*sizeof(int)); dev_points_clusters = (int *) gpu_alloc(n*sizeof(int)); printf("GPU allocs done \n"); // Copy points to GPU if (copy_to_gpu(points[0], dev_points, n*dim*sizeof(double)) != 0) { printf("Error in copy_to_gpu points\n"); return -1; } // Copy centers to GPU if (copy_to_gpu(centers[0], dev_centers, k*dim*sizeof(double)) != 0) { printf("Error in copy_to_gpu centers\n"); return -1; } printf("Loop Start \n"); int step = 0; while (check > eps) { // assign points to clusters - step 1 hipLaunchKernelGGL(( find_cluster_on_gpu), dim3(gpu_grid),dim3(gpu_block), 0, 0, dev_points, dev_centers, n, k, dim, dev_points_clusters); hipDeviceSynchronize(); // update means - step 2 // Count points that belong to each cluster hipLaunchKernelGGL(( count_points_in_clusters_on_gpu), dim3(gpu_grid),dim3(gpu_block), 0, 0, dev_points, dev_points_clusters, n, k, dim, dev_centers, dev_points_in_cluster); hipDeviceSynchronize(); // Update centers based on counted points hipLaunchKernelGGL(( update_center_on_gpu), dim3(gpu_grid),dim3(gpu_block), 0, 0, n, k, dim, dev_centers, dev_points_in_cluster); hipDeviceSynchronize(); // TODO: centers check in GPU, so we don't copy from gpu each time if (copy_from_gpu(new_centers[0], dev_centers, k*dim*sizeof(double)) != 0) { printf("Error in copy_from_gpu dev_centers\n"); return -1; } // check for convergence for (i = 0; i < k; i++){ for (j = 0; j < dim; j++){ printf("%lf ", new_centers[i][j]); } printf("\n"); } check = 0; for (j = 0; j < k; j++) { check += sqrt(squared_distance(new_centers[j], centers[j], dim)); for (i = 0; i < dim; i++){ centers[j][i] = new_centers[j][i]; } } printf("Step %d , Convergence: %lf \n", step, check); step += 1; //free new_centers // if (step == 5) break; // delete_points(new_centers); } double time_elapsed = (double)(clock() - start) / CLOCKS_PER_SEC; printf("Total Time Elapsed: %lf seconds\n", time_elapsed); FILE *f; //Store Performance metrics //For now just the time elapsed, in the future maybe we'll need memory GPU memory bandwidth etc... f = fopen("log.out", "w"); fprintf(f, "Time Elapsed: %lf ", time_elapsed); fclose(f); // print & save results f = fopen("centers.out", "w"); printf("Centers:\n"); for (i = 0; i < k; i++) { for (j = 0; j < dim; j++){ printf("%lf ", centers[i][j]); fprintf(f, "%lf ", centers[i][j]); } printf("\n"); fprintf(f, "\n"); } fclose(f); //Store Mapping Data in case we need it copy_from_gpu(points_clusters, dev_points_clusters, n*sizeof(int)); f = fopen("point_cluster_map.out", "w"); for (i =0;i<n;i++){ fprintf(f, "%d\n", points_clusters[i]); } fclose(f); // GPU clean gpu_free(dev_centers); gpu_free(dev_points); gpu_free(dev_points_clusters); // clear and exit delete_points(points); delete_points(centers); free(points_clusters); return 0; }
7408792e756baf1f34b158f3aa927559f43a6eff.cu
#include <float.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <cuda.h> #include "gpu_util.h" #define MIN(X, Y) (((X) < (Y)) ? (X) : (Y)) #define MAX(X, Y) (((X) > (Y)) ? (X) : (Y)) #ifndef EPS # define EPS 1.e-6 #endif /* gpu parameters */ //#define GRID_SIZE 16 //#define BLOCK_SIZE 256 //#define DEBUG #ifdef DEBUG #define DPRINTF(fmt, args...) \ do { \ printf("%s, line %u: " fmt "\r\n", __FUNCTION__, __LINE__ , ##args); \ fflush(stdout); \ } while (0) #else #define DPRINTF(fmt, args...) do{}while(0) #endif #if __CUDA_ARCH__ < 600 __device__ double doubleAtomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __device__ int get_global_tid() { return (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x; } double squared_distance(double* ps, double* center, int dim) { double sum = 0; for (int i = 0; i < dim; i++){ double temp = center[i] - ps[i]; sum += temp * temp; } return sum; } __device__ double squared_distance_on_gpu(double* ps, double* center, int dim) { double sum = 0; for (int i = 0; i < dim; i++){ double temp = center[i] - ps[i]; sum += temp * temp; } return sum; } double** create_2D_double_array_on_gpu(int n, int dim) { double **arr; arr = (double **)gpu_alloc(n * sizeof(double*)); for (int i = 0 ; i < n; i++) arr[i] = (double *)gpu_alloc( dim * sizeof(double)); if (arr == NULL ) { fprintf(stderr, "Error in allocation!\n"); exit(-1); } return arr; } double** create_2D_double_array(int n, int dim) { double **arr, *temp; temp = (double *)calloc(n * dim, sizeof(double)); arr = (double **)calloc(n, sizeof(double *)); for (int i = 0 ; i < n; i++) arr[i] = temp + i * dim; if (arr == NULL || temp == NULL) { fprintf(stderr, "Error in allocation!\n"); exit(-1); } return arr; } void delete_points(double** ps){ free(ps); ps = NULL; } double** init_centers_kpp(double **ps, int n, int k, int dim){ int i; int curr_k = 0; int first_i; int max, max_i; double *distances_from_centers, *temp_distances; distances_from_centers = (double*) malloc(sizeof(double)*n); double **centers = create_2D_double_array(k,dim); temp_distances = (double*) malloc(sizeof(double)*n); // Initialize with max double for (i = 0; i < n; i++) distances_from_centers[i] = DBL_MAX; srand(time(NULL)); // Choose a first point first_i = rand() % n; DPRINTF("First random index: %d", first_i); memcpy(centers[curr_k], ps[first_i], dim * sizeof(double)); DPRINTF("Point 1: (%f, %f)", ps[first_i][0], ps[first_i][1]); DPRINTF("Center 1: (%f, %f)", centers[curr_k][0], centers[curr_k][1]); while(curr_k < k-1) { max = -1; max_i = -1; for(i=0; i<n; i++){ DPRINTF("New squared_distance: %f and old min squared_distance: %f", squared_distance(ps[i], centers[curr_k], dim), distances_from_centers[i]); temp_distances[i] = MIN(squared_distance(ps[i], centers[curr_k], dim), distances_from_centers[i]); if(temp_distances[i] > max){ max = temp_distances[i]; max_i = i; } } memcpy(distances_from_centers, temp_distances, n * sizeof(double)); memcpy(centers[++curr_k], ps[max_i], dim * sizeof(double)); } free(temp_distances); free(distances_from_centers); return centers; } int find_cluster_on_cpu(double* ps, double** centers, int n, int k, int dim) { int cluster = 0; double dist, min = squared_distance(ps, centers[0], dim); for (int i = 1; i < k; i++){ dist = squared_distance(ps, centers[i], dim); if (min > dist){ min = dist; cluster = i; } } return cluster; } __global__ void find_cluster_on_gpu(double *dev_points, double *dev_centers, int n, int k, int dim, int *result_clusters) { double min, dist; int cluster_it_belongs; int index = get_global_tid(); int start = index*dim; int end = start + dim; if (index < n){ for (int i = start; i < end; i+=dim){ min = DBL_MAX; for (int j = 0; j < k; j++){ dist = squared_distance_on_gpu(&dev_points[i], &dev_centers[j*dim], dim); if (min > dist){ min = dist; cluster_it_belongs = j; } } result_clusters[index] = cluster_it_belongs; } } } __global__ void count_points_in_clusters_on_gpu(double* dev_points, // Device point data int* dev_points_clusters, // Device point -> cluster int n, int k, int dim, double* dev_centers, // Device center data int* dev_points_in_cluster) { int i, j; int index = get_global_tid(); int start = index; int end = start + 1; // Clear dev_centers in order to save the new_centers there if (index < k){ dev_points_in_cluster[index] = 0; for(j=0; j<dim; j++){ dev_centers[index*dim + j] = 0; } } __syncthreads(); if (index < n){ for (i = start; i < end; i++) { atomicAdd(&dev_points_in_cluster[dev_points_clusters[i]], 1); for (j = 0; j < dim; j++) { doubleAtomicAdd(&(dev_centers[dev_points_clusters[i]*dim + j]), dev_points[i*dim + j]); } } } } __global__ void update_center_on_gpu(int n, int k, int dim, double* dev_centers, int* dev_points_in_cluster){ int i, j; int index = get_global_tid(); int start = index; int end = start + 1; if (index < k){ for (i = start; i < end; i++) { if (dev_points_in_cluster[i]) { for (j = 0; j < dim; j++){ dev_centers[i*dim + j] /= dev_points_in_cluster[i]; } } } } } int main(int argc, char *argv[]) { int n, k, i, j; int dim = 2; double **points; int BLOCK_SIZE = 256; //Default if (argc > 1) BLOCK_SIZE = atoi(argv[1]); //The second input argument should be the dataset filename if (argc > 2) { FILE *in; in = fopen(argv[2], "r"); //Parse file fscanf(in, "%d %d \n", &n ,&k); points = create_2D_double_array(n, dim); for (i =0; i<n; i++) { for (j=0; j<dim; j++) { fscanf(in, "%lf", &points[i][j]); } } fclose(in); //Otherwise parse stdin //PS: For large datasets this doesn't work at all } else { // read input scanf("%d %d", &n, &k); points = create_2D_double_array(n, dim); for (i = 0; i < n; i++) { for (j = 0; j < dim; j++) scanf("%lf", &points[i][j]); } } printf("Input Read successfully \n"); // Calculate grid and block sizes int grid_size = (n+BLOCK_SIZE-1)/BLOCK_SIZE; dim3 gpu_grid(grid_size, 1); dim3 gpu_block(BLOCK_SIZE, 1); printf("Grid size : %dx%d\n", gpu_grid.x, gpu_grid.y); printf("Block size: %dx%d\n", gpu_block.x, gpu_block.y); // printf("Shared memory size: %ld bytes\n", shmem_size); clock_t start = clock(); double **centers; printf("Initializing Centers...\n"); centers = init_centers_kpp(points, n, k, dim); printf("Initializing Centers done\n"); // start algorithm double check = 1; double eps = 1.0E-6; int *points_clusters; double **new_centers; new_centers = create_2D_double_array(k, dim); points_clusters = (int *)calloc(n, sizeof(int)); // GPU allocations double *dev_centers, *dev_points; int *dev_points_clusters; int *dev_points_in_cluster; dev_centers = (double *) gpu_alloc(k*dim*sizeof(double)); dev_points = (double *) gpu_alloc(n*dim*sizeof(double)); dev_points_in_cluster = (int *) gpu_alloc(k*sizeof(int)); dev_points_clusters = (int *) gpu_alloc(n*sizeof(int)); printf("GPU allocs done \n"); // Copy points to GPU if (copy_to_gpu(points[0], dev_points, n*dim*sizeof(double)) != 0) { printf("Error in copy_to_gpu points\n"); return -1; } // Copy centers to GPU if (copy_to_gpu(centers[0], dev_centers, k*dim*sizeof(double)) != 0) { printf("Error in copy_to_gpu centers\n"); return -1; } printf("Loop Start \n"); int step = 0; while (check > eps) { // assign points to clusters - step 1 find_cluster_on_gpu<<<gpu_grid,gpu_block>>>( dev_points, dev_centers, n, k, dim, dev_points_clusters); cudaDeviceSynchronize(); // update means - step 2 // Count points that belong to each cluster count_points_in_clusters_on_gpu<<<gpu_grid,gpu_block>>>( dev_points, dev_points_clusters, n, k, dim, dev_centers, dev_points_in_cluster); cudaDeviceSynchronize(); // Update centers based on counted points update_center_on_gpu<<<gpu_grid,gpu_block>>>( n, k, dim, dev_centers, dev_points_in_cluster); cudaDeviceSynchronize(); // TODO: centers check in GPU, so we don't copy from gpu each time if (copy_from_gpu(new_centers[0], dev_centers, k*dim*sizeof(double)) != 0) { printf("Error in copy_from_gpu dev_centers\n"); return -1; } // check for convergence for (i = 0; i < k; i++){ for (j = 0; j < dim; j++){ printf("%lf ", new_centers[i][j]); } printf("\n"); } check = 0; for (j = 0; j < k; j++) { check += sqrt(squared_distance(new_centers[j], centers[j], dim)); for (i = 0; i < dim; i++){ centers[j][i] = new_centers[j][i]; } } printf("Step %d , Convergence: %lf \n", step, check); step += 1; //free new_centers // if (step == 5) break; // delete_points(new_centers); } double time_elapsed = (double)(clock() - start) / CLOCKS_PER_SEC; printf("Total Time Elapsed: %lf seconds\n", time_elapsed); FILE *f; //Store Performance metrics //For now just the time elapsed, in the future maybe we'll need memory GPU memory bandwidth etc... f = fopen("log.out", "w"); fprintf(f, "Time Elapsed: %lf ", time_elapsed); fclose(f); // print & save results f = fopen("centers.out", "w"); printf("Centers:\n"); for (i = 0; i < k; i++) { for (j = 0; j < dim; j++){ printf("%lf ", centers[i][j]); fprintf(f, "%lf ", centers[i][j]); } printf("\n"); fprintf(f, "\n"); } fclose(f); //Store Mapping Data in case we need it copy_from_gpu(points_clusters, dev_points_clusters, n*sizeof(int)); f = fopen("point_cluster_map.out", "w"); for (i =0;i<n;i++){ fprintf(f, "%d\n", points_clusters[i]); } fclose(f); // GPU clean gpu_free(dev_centers); gpu_free(dev_points); gpu_free(dev_points_clusters); // clear and exit delete_points(points); delete_points(centers); free(points_clusters); return 0; }
7a830cbac12992deeba383740c887a3684356bee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" __global__ void mul_scalar_float(int n,int idx, float dx,float *dy,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = dy[i] * dx; } }
7a830cbac12992deeba383740c887a3684356bee.cu
#include "includes.h" extern "C" __global__ void mul_scalar_float(int n,int idx, float dx,float *dy,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = dy[i] * dx; } }
73f9bfa56737a5db0bdf93210f1c0666c703b1d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright Ramtin Shams (hereafter referred to as 'the author'). All rights reserved. **Citation required in derived works or publications** NOTICE TO USER: Users and possessors of this source code are hereby granted a nonexclusive, royalty-free license to use this source code for non-commercial purposes only, as long as the author is appropriately acknowledged by inclusion of this notice in derived works and citation of appropriate publication(s) listed at the end of this notice in any derived works or publications that use or have benefited from this source code in its entirety or in part. THE AUTHOR MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. Relevant publication(s): @inproceedings{Shams_ICSPCS_2007, author = "R. Shams and R. A. Kennedy", title = "Efficient Histogram Algorithms for {NVIDIA} {CUDA} Compatible Devices", booktitle = "Proc. Int. Conf. on Signal Processing and Communications Systems ({ICSPCS})", address = "Gold Coast, Australia", month = dec, year = "2007", pages = "418-422", } @inproceedings{Shams_DICTA_2007a, author = "R. Shams and N. Barnes", title = "Speeding up Mutual Information Computation Using {NVIDIA} {CUDA} Hardware", booktitle = "Proc. Digital Image Computing: Techniques and Applications ({DICTA})", address = "Adelaide, Australia", month = dec, year = "2007", pages = "555-560", doi = "10.1109/DICTA.2007.4426846", }; */ // includes, system #include <stdlib.h> #include <tchar.h> #include <stdarg.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil.h> #include "cuda_basics.h" #include "cuda_hist.h" // includes, kernels #include "gpu_hist.cu" extern "C" double cudaHista(float *src, float *hist, int length, int bins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int size = length * sizeof(float); //Device memory pointers float *d_src, *d_hist; double time = 0; unsigned int hTimer; cudaHistOptions options; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(hipMalloc((void**) &d_src, size)); //Copy src data to device memory CUDA_SAFE_CALL(hipMemcpy(d_src, src, size, hipMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0); } else { d_src = src; //Do not copy hist! } if (p_options == NULL) { options.threads = 160; options.blocks = 64; } else options = *p_options; //Perform sanity checks if (options.threads > MAX_THREADS) printf("'threads' exceed the maximum."), exit(1); if (options.threads % WARP_SIZE != 0) printf("'threads' must be a multiple of the WARP_SIZE."), exit(1); if (options.blocks > MAX_BLOCKS_PER_DIM) printf("'blocks' exceed the maximum."), exit(1); //Prepare the execution configuration int warps = options.threads / WARP_SIZE; int max_bins = MAX_USABLE_SHARED / sizeof(unsigned int) / warps; block.x = WARP_SIZE; block.y = warps; block.z = 1; grid.x = options.blocks; grid.y = 1; grid.z = 1; int shared_mem_size = max_bins * warps * sizeof(unsigned int); if (shared_mem_size> MAX_USABLE_SHARED) printf("Maximum shared memory exceeded."), exit(1); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUT_SAFE_CALL(cutStartTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(hipMalloc((void**) &d_hist, options.blocks * bins * sizeof(float))); //Initialize histogram memory CUDA_SAFE_CALL(hipMemset(d_hist, 0, options.blocks * bins * sizeof(float))); TIMER_PRINT("Initializing data", 0); TIMER_START; int calls = iDivUp(bins, max_bins); for (int i = 0; i < calls; i++) { hipLaunchKernelGGL(( gpuHista), dim3(grid), dim3(block), shared_mem_size, 0, d_src, d_hist + max_bins * i, length, bins, min(max_bins, bins - max_bins * i), max_bins * i); CUT_CHECK_ERROR("gpuHista() execution failed\n"); } TIMER_PRINT("gpuHista", length); //Sum up the histograms int numHists = grid.x; if (numHists > 1) { block.x = MAX_THREADS; block.y = 1; block.z = 1; grid.x = ceil((float) bins / block.x); grid.y = 1; grid.z = 1; TIMER_START; hipLaunchKernelGGL(( gpuSumGlobalMem), dim3(grid), dim3(block), 0, 0, d_hist, d_hist, numHists, bins); CUT_CHECK_ERROR("gpuSumGlobalMem() execution failed\n"); TIMER_PRINT("gpuSumGlobalMem", bins * numHists); } CUDA_SAFE_CALL(hipDeviceSynchronize()); CUT_SAFE_CALL(cutStopTimer(hTimer)); time = cutGetTimerValue(hTimer); CUT_SAFE_CALL(cutDeleteTimer(hTimer)); TIMER_START; if (!device) { CUDA_SAFE_CALL(hipMemcpy(hist, d_hist, bins * sizeof(float), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_src)); } else { CUDA_SAFE_CALL(hipMemcpy(hist, d_hist, bins * sizeof(float), hipMemcpyDeviceToDevice)); } CUDA_SAFE_CALL(hipFree(d_hist)); TIMER_PRINT("Storing data", 0); TIMER_DELETE; return time; } extern "C" double cudaHistb(float *src, float *hist, int length, int bins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int size = length * sizeof(float); //Device memory pointers float *d_src, *d_hist, *d_interim; double time = 0; unsigned int hTimer; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(hipMalloc((void**) &d_src, size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(hipMemcpy(d_src, src, size, hipMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0) } else { d_src = src; d_hist = hist; } cudaHistOptions options; if (p_options) options = *p_options; else { options.threads = 128; options.blocks = 8; } //Prepare execution configuration block.x = options.threads; block.y = 1; block.z = 1; grid.x = options.blocks; grid.y = 1; grid.z = 1; int cell_len = powf(2.0f, ceilf(log2f(block.x)) + ceilf(log2f(grid.x))); int hist_len = cell_len * bins; CUDA_SAFE_CALL(hipDeviceSynchronize()); CUT_SAFE_CALL(cutStartTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(hipMalloc((void**) &d_interim, hist_len * sizeof(float))); TIMER_PRINT("Allocating memory", 0); cudaZeroMem(d_interim, hist_len); //This much faster than hipMemset TIMER_START; int shared_len_pt = GPUHIST_SHARED_LEN >> (int) ceil(log2f(options.threads)); //Length of shared memory available to each thread (in int32) int n = (shared_len_pt << 5) / bins; const int bits_pbin = n > 0 ? min((1 << (int)log2f(n)), 32) : 0; //Number of bits per bin per thread 0, 1, 2, 4, 8, 16, 32 #ifdef VERBOSE printf("bits per bin: %d\n", bits_pbin); #endif hipLaunchKernelGGL(( gpuHistb), dim3(grid), dim3(block), 0, 0, d_src, d_interim, length, bins, bits_pbin); CUT_CHECK_ERROR("gpuHistb() execution failed\n"); TIMER_PRINT("gpuHistb", length); //Reduce the interim histogram if (bins > MAX_BLOCKS_PER_DIM) //We want the bins to fit in horizontal grid.x printf("Maximimum bins exceeded."), exit(1); block.x = max(min(cell_len, MAX_THREADS), 64); block.y = 1; block.z = 1; //gpuReduceHist requires at least 64 threads grid.x = bins; grid.y = 1; grid.z = 1; TIMER_START; hipLaunchKernelGGL(( gpuReduceHist), dim3(grid), dim3(block), block.x * sizeof(float), 0, d_interim, d_hist, cell_len); CUT_CHECK_ERROR("gpuReduceHist() execution failed\n"); TIMER_PRINT("gpuReduceHist", hist_len); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUT_SAFE_CALL(cutStopTimer(hTimer)); time = cutGetTimerValue(hTimer); CUT_SAFE_CALL(cutDeleteTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(hipFree(d_interim)); if (!device) { CUDA_SAFE_CALL(hipMemcpy(hist, d_hist, bins * sizeof(float), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_src)); CUDA_SAFE_CALL(hipFree(d_hist)); } TIMER_PRINT("Storing data", 0); TIMER_DELETE; return time; } extern "C" double cudaHistc(float *src, float *hist, int length, int bins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int size = length * sizeof(float); //Device memory pointers float *d_src, *d_hist, *d_interim; double time = 0; unsigned int hTimer; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(hipMalloc((void**) &d_src, size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(hipMemcpy(d_src, src, size, hipMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0) } else { d_src = src; d_hist = hist; } cudaHistOptions options; if (p_options) options = *p_options; else { options.threads = 128; options.blocks = 8; } //Prepare execution configuration block.x = options.threads; block.y = 1; block.z = 1; grid.x = options.blocks; grid.y = 1; grid.z = 1; int cell_len = powf(2.0f, ceilf(log2f(block.x)) + ceilf(log2f(grid.x))); int hist_len = cell_len * bins; CUDA_SAFE_CALL(hipDeviceSynchronize()); CUT_SAFE_CALL(cutStartTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(hipMalloc((void**) &d_interim, hist_len * sizeof(float))); TIMER_PRINT("Allocating memory", 0); cudaZeroMem(d_interim, hist_len); //This much faster than hipMemset TIMER_START; int shared_len_pw = GPUHIST_SHARED_LEN >> (int) ceil(log2f(options.threads >> LOG2_WARP_SIZE)); //Length of shared memory available to each warp (in int32) int bits_pbin = max(min((shared_len_pw * 27) / bins, 27), 0); for (int i = 1; i <= 28; i++) { if (bits_pbin >= 27 / i) { bits_pbin = 27 / i; break; } } #ifdef VERBOSE printf("bits per bin: %d\n", bits_pbin); #endif hipLaunchKernelGGL(( gpuHistc), dim3(grid), dim3(block), 0, 0, d_src, d_interim, length, bins, bits_pbin); CUT_CHECK_ERROR("gpuHistc() execution failed\n"); TIMER_PRINT("gpuHistc", length); //Reduce the interim histogram /*if (cell_len > 1024) printf("Maximimum length exceeded."), exit(1); if (bins > MAX_BLOCKS_PER_DIM) //We want the bins to fit in horizontal grid.x printf("Maximimum bins exceeded."), exit(1); block.x = cell_len >> 1; block.y = 1; block.z = 1; grid.x = bins; grid.y = 1; grid.z = 1; TIMER_START; gpuReduceHist<<<grid, block, cell_len * sizeof(float)>>>(d_interim, d_hist); CUT_CHECK_ERROR("gpuReduceHist() execution failed\n"); TIMER_PRINT("gpuReduceHist", hist_len);*/ if (bins > MAX_BLOCKS_PER_DIM) //We want the bins to fit in horizontal grid.x printf("Maximimum bins exceeded."), exit(1); block.x = max(min(cell_len, MAX_THREADS), 64); block.y = 1; block.z = 1; //gpuReduce requires at least 64 threads grid.x = bins; grid.y = 1; grid.z = 1; TIMER_START; hipLaunchKernelGGL(( gpuReduceHist), dim3(grid), dim3(block), block.x * sizeof(float), 0, d_interim, d_hist, cell_len); CUT_CHECK_ERROR("gpuReduceHist() execution failed\n"); TIMER_PRINT("gpuReduceHist", hist_len); /*if (bins > MAX_BLOCKS_PER_DIM) //We want the bins to fit in horizontal grid.x printf("Maximimum bins exceeded."), exit(1); cudaSumAlongRows(d_interim, d_hist, cell_len, bins, true);*/ CUDA_SAFE_CALL(hipDeviceSynchronize()); CUT_SAFE_CALL(cutStopTimer(hTimer)); time = cutGetTimerValue(hTimer); CUT_SAFE_CALL(cutDeleteTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(hipFree(d_interim)); if (!device) { CUDA_SAFE_CALL(hipMemcpy(hist, d_hist, bins * sizeof(float), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_src)); CUDA_SAFE_CALL(hipFree(d_hist)); } TIMER_PRINT("Storing data", 0); TIMER_DELETE; return time; } extern "C" double cudaHist_Approx(float *src, float *hist, int length, int bins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int size = length * sizeof(float); //Device memory pointers float *d_src, *d_hist, *d_interim; double time = 0; unsigned int hTimer; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(hipMalloc((void**) &d_src, size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(hipMemcpy(d_src, src, size, hipMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0) } else { d_src = src; d_hist = hist; } cudaHistOptions options; if (p_options) options = *p_options; else { options.threads = 256; options.blocks = 16; } //Prepare execution configuration block.x = options.threads; block.y = 1; block.z = 1; grid.x = options.blocks; grid.y = 1; grid.z = 1; int cell_len = powf(2.0f, ceilf(log2f(grid.x))); int hist_len = cell_len * bins; CUDA_SAFE_CALL(hipDeviceSynchronize()); CUT_SAFE_CALL(cutStartTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(hipMalloc((void**) &d_interim, hist_len * sizeof(float))); TIMER_PRINT("Allocating memory", 0); cudaZeroMem(d_interim, hist_len); //This is much faster than hipMemset TIMER_START; int n = (GPUHIST_SHARED_LEN << 5) / bins; const int bits_pbin = n > 0 ? min((1 << (int)log2f(n)), 32) : 0; //Number of bits per bin per thread 0, 1, 2, 4, 8, 16, 32 #ifdef VERBOSE printf("bits per bin: %d\n", bits_pbin); #endif hipLaunchKernelGGL(( gpuHist_Approx), dim3(grid), dim3(block), 0, 0, d_src, d_interim, length, bins, bits_pbin, (float) bins / options.threads); CUT_CHECK_ERROR("gpuHist_Approx() execution failed\n"); TIMER_PRINT("gpuHist_Approx", length); //Reduce the interim histogram if (bins > MAX_BLOCKS_PER_DIM) //We want the bins to fit in horizontal grid.x printf("Maximimum bins exceeded."), exit(1); block.x = max(min(cell_len, MAX_THREADS), 64); block.y = 1; block.z = 1; //gpuReduceHist requires at least 64 threads grid.x = bins; grid.y = 1; grid.z = 1; TIMER_START; hipLaunchKernelGGL(( gpuReduceHist), dim3(grid), dim3(block), block.x * sizeof(float), 0, d_interim, d_hist, cell_len); CUT_CHECK_ERROR("gpuReduceHist() execution failed\n"); TIMER_PRINT("gpuReduceHist", hist_len); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUT_SAFE_CALL(cutStopTimer(hTimer)); time = cutGetTimerValue(hTimer); CUT_SAFE_CALL(cutDeleteTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(hipFree(d_interim)); if (!device) { CUDA_SAFE_CALL(hipMemcpy(hist, d_hist, bins * sizeof(float), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_src)); CUDA_SAFE_CALL(hipFree(d_hist)); } TIMER_PRINT("Storing data", 0); TIMER_DELETE; return time; } extern "C" void cudaHist2Da(float *src1, float *src2, float *hist, int length, int xbins, int ybins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int bins = xbins * ybins; float *d_src1, *d_src2, *d_hist, *d_src; //Device memory pointers int size = length * sizeof(float); TIMER_CREATE; TIMER_START; if (!device) { //Allocate data on the device CUDA_SAFE_CALL(hipMalloc((void**) &d_src1, size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_src2, size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(hipMemcpy(d_src1, src1, size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_src2, src2, size, hipMemcpyHostToDevice)); } else { d_src1 = src1; d_src2 = src2; d_hist = hist; } CUDA_SAFE_CALL(hipMalloc((void**) &d_src, size)); //Buffer to hold the conbined source data that can be passed to cudaHist TIMER_PRINT("Loading data", 0); //Combine src1 and src2 into a single array for processing by cudaHist //Prepare execution configuration const int max_threads = MAX_THREADS; int good_len = iRoundUp(length, WARP_SIZE); block.x = max_threads; block.y = 1; block.z = 1; grid.x = ceil(sqrtf(iDivUp(good_len, max_threads))); grid.y = grid.x; grid.z = 1; //CUDA throws an excution error if grid.z is not 1 TIMER_START; hipLaunchKernelGGL(( gpuCombineHist2DSrcData), dim3(grid), dim3(block), 0, 0, d_src1, d_src2, d_src, length, xbins, ybins); CUT_CHECK_ERROR("gpuCombineHist2DSrcData() execution failed\n"); TIMER_PRINT("gpuCombineHist2DSrcData", length); cudaHista(d_src, d_hist, length, bins, p_options, true); //No need to initialize d_hist, will be done by cudaHist if (!device) { TIMER_START; CUDA_SAFE_CALL(hipMemcpy(hist, d_hist, bins * sizeof(float), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_src1)); CUDA_SAFE_CALL(hipFree(d_src2)); CUDA_SAFE_CALL(hipFree(d_hist)); TIMER_PRINT("Storing data", 0); } CUDA_SAFE_CALL(hipFree(d_src)); TIMER_DELETE; } extern "C" void cudaHist2Db(float *src1, float *src2, float *hist, int length, int xbins, int ybins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int bins = xbins * ybins; float *d_src1, *d_src2, *d_hist, *d_src; //Device memory pointers int size = length * sizeof(float); TIMER_CREATE; TIMER_START; if (!device) { //Allocate data on the device CUDA_SAFE_CALL(hipMalloc((void**) &d_src1, size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_src2, size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(hipMemcpy(d_src1, src1, size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_src2, src2, size, hipMemcpyHostToDevice)); } else { d_src1 = src1; d_src2 = src2; d_hist = hist; } CUDA_SAFE_CALL(hipMalloc((void**) &d_src, size)); //Buffer to hold the conbined source data that can be passed to cudaHist TIMER_PRINT("Loading data", 0); //Combine src1 and src2 into a single array for processing by cudaHist //Prepare execution configuration const int max_threads = MAX_THREADS; int good_len = iRoundUp(length, WARP_SIZE); block.x = max_threads; block.y = 1; block.z = 1; grid.x = ceil(sqrtf(iDivUp(good_len, max_threads))); grid.y = grid.x; grid.z = 1; //CUDA throws an excution error if grid.z is not 1 TIMER_START; hipLaunchKernelGGL(( gpuCombineHist2DSrcData), dim3(grid), dim3(block), 0, 0, d_src1, d_src2, d_src, length, xbins, ybins); CUT_CHECK_ERROR("gpuCombineHist2DSrcData() execution failed\n"); TIMER_PRINT("gpuCombineHist2DSrcData", length); cudaHistb(d_src, d_hist, length, bins, p_options, true); //No need to initialize d_hist, will be done by cudaHist if (!device) { TIMER_START; CUDA_SAFE_CALL(hipMemcpy(hist, d_hist, bins * sizeof(float), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_src1)); CUDA_SAFE_CALL(hipFree(d_src2)); CUDA_SAFE_CALL(hipFree(d_hist)); TIMER_PRINT("Storing data", 0); } CUDA_SAFE_CALL(hipFree(d_src)); TIMER_DELETE; } extern "C" void cudaHist2D_Approx(float *src1, float *src2, float *hist, int length, int xbins, int ybins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int bins = xbins * ybins; float *d_src1, *d_src2, *d_hist, *d_src; //Device memory pointers int size = length * sizeof(float); TIMER_CREATE; TIMER_START; if (!device) { //Allocate data on the device CUDA_SAFE_CALL(hipMalloc((void**) &d_src1, size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_src2, size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(hipMemcpy(d_src1, src1, size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_src2, src2, size, hipMemcpyHostToDevice)); } else { d_src1 = src1; d_src2 = src2; d_hist = hist; } CUDA_SAFE_CALL(hipMalloc((void**) &d_src, size)); //Buffer to hold the conbined source data that can be passed to cudaHist TIMER_PRINT("Loading data", 0); //Combine src1 and src2 into a single array for processing by cudaHist //Prepare execution configuration const int max_threads = MAX_THREADS; int good_len = iRoundUp(length, WARP_SIZE); block.x = max_threads; block.y = 1; block.z = 1; grid.x = ceil(sqrtf(iDivUp(good_len, max_threads))); grid.y = grid.x; grid.z = 1; //CUDA throws an excution error if grid.z is not 1 TIMER_START; hipLaunchKernelGGL(( gpuCombineHist2DSrcData), dim3(grid), dim3(block), 0, 0, d_src1, d_src2, d_src, length, xbins, ybins); CUT_CHECK_ERROR("gpuCombineHist2DSrcData() execution failed\n"); TIMER_PRINT("gpuCombineHist2DSrcData", length); cudaHist_Approx(d_src, d_hist, length, bins, p_options, true); //No need to initialize d_hist, will be done by cudaHist if (!device) { TIMER_START; CUDA_SAFE_CALL(hipMemcpy(hist, d_hist, bins * sizeof(float), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_src1)); CUDA_SAFE_CALL(hipFree(d_src2)); CUDA_SAFE_CALL(hipFree(d_hist)); TIMER_PRINT("Storing data", 0); } CUDA_SAFE_CALL(hipFree(d_src)); TIMER_DELETE; }
73f9bfa56737a5db0bdf93210f1c0666c703b1d5.cu
/* Copyright Ramtin Shams (hereafter referred to as 'the author'). All rights reserved. **Citation required in derived works or publications** NOTICE TO USER: Users and possessors of this source code are hereby granted a nonexclusive, royalty-free license to use this source code for non-commercial purposes only, as long as the author is appropriately acknowledged by inclusion of this notice in derived works and citation of appropriate publication(s) listed at the end of this notice in any derived works or publications that use or have benefited from this source code in its entirety or in part. THE AUTHOR MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. Relevant publication(s): @inproceedings{Shams_ICSPCS_2007, author = "R. Shams and R. A. Kennedy", title = "Efficient Histogram Algorithms for {NVIDIA} {CUDA} Compatible Devices", booktitle = "Proc. Int. Conf. on Signal Processing and Communications Systems ({ICSPCS})", address = "Gold Coast, Australia", month = dec, year = "2007", pages = "418-422", } @inproceedings{Shams_DICTA_2007a, author = "R. Shams and N. Barnes", title = "Speeding up Mutual Information Computation Using {NVIDIA} {CUDA} Hardware", booktitle = "Proc. Digital Image Computing: Techniques and Applications ({DICTA})", address = "Adelaide, Australia", month = dec, year = "2007", pages = "555-560", doi = "10.1109/DICTA.2007.4426846", }; */ // includes, system #include <stdlib.h> #include <tchar.h> #include <stdarg.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil.h> #include "cuda_basics.h" #include "cuda_hist.h" // includes, kernels #include "gpu_hist.cu" extern "C" double cudaHista(float *src, float *hist, int length, int bins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int size = length * sizeof(float); //Device memory pointers float *d_src, *d_hist; double time = 0; unsigned int hTimer; cudaHistOptions options; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(cudaMalloc((void**) &d_src, size)); //Copy src data to device memory CUDA_SAFE_CALL(cudaMemcpy(d_src, src, size, cudaMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0); } else { d_src = src; //Do not copy hist! } if (p_options == NULL) { options.threads = 160; options.blocks = 64; } else options = *p_options; //Perform sanity checks if (options.threads > MAX_THREADS) printf("'threads' exceed the maximum."), exit(1); if (options.threads % WARP_SIZE != 0) printf("'threads' must be a multiple of the WARP_SIZE."), exit(1); if (options.blocks > MAX_BLOCKS_PER_DIM) printf("'blocks' exceed the maximum."), exit(1); //Prepare the execution configuration int warps = options.threads / WARP_SIZE; int max_bins = MAX_USABLE_SHARED / sizeof(unsigned int) / warps; block.x = WARP_SIZE; block.y = warps; block.z = 1; grid.x = options.blocks; grid.y = 1; grid.z = 1; int shared_mem_size = max_bins * warps * sizeof(unsigned int); if (shared_mem_size> MAX_USABLE_SHARED) printf("Maximum shared memory exceeded."), exit(1); CUDA_SAFE_CALL(cudaThreadSynchronize()); CUT_SAFE_CALL(cutStartTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(cudaMalloc((void**) &d_hist, options.blocks * bins * sizeof(float))); //Initialize histogram memory CUDA_SAFE_CALL(cudaMemset(d_hist, 0, options.blocks * bins * sizeof(float))); TIMER_PRINT("Initializing data", 0); TIMER_START; int calls = iDivUp(bins, max_bins); for (int i = 0; i < calls; i++) { gpuHista<<<grid, block, shared_mem_size>>>(d_src, d_hist + max_bins * i, length, bins, min(max_bins, bins - max_bins * i), max_bins * i); CUT_CHECK_ERROR("gpuHista() execution failed\n"); } TIMER_PRINT("gpuHista", length); //Sum up the histograms int numHists = grid.x; if (numHists > 1) { block.x = MAX_THREADS; block.y = 1; block.z = 1; grid.x = ceil((float) bins / block.x); grid.y = 1; grid.z = 1; TIMER_START; gpuSumGlobalMem<<<grid, block>>>(d_hist, d_hist, numHists, bins); CUT_CHECK_ERROR("gpuSumGlobalMem() execution failed\n"); TIMER_PRINT("gpuSumGlobalMem", bins * numHists); } CUDA_SAFE_CALL(cudaThreadSynchronize()); CUT_SAFE_CALL(cutStopTimer(hTimer)); time = cutGetTimerValue(hTimer); CUT_SAFE_CALL(cutDeleteTimer(hTimer)); TIMER_START; if (!device) { CUDA_SAFE_CALL(cudaMemcpy(hist, d_hist, bins * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_src)); } else { CUDA_SAFE_CALL(cudaMemcpy(hist, d_hist, bins * sizeof(float), cudaMemcpyDeviceToDevice)); } CUDA_SAFE_CALL(cudaFree(d_hist)); TIMER_PRINT("Storing data", 0); TIMER_DELETE; return time; } extern "C" double cudaHistb(float *src, float *hist, int length, int bins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int size = length * sizeof(float); //Device memory pointers float *d_src, *d_hist, *d_interim; double time = 0; unsigned int hTimer; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(cudaMalloc((void**) &d_src, size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(cudaMemcpy(d_src, src, size, cudaMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0) } else { d_src = src; d_hist = hist; } cudaHistOptions options; if (p_options) options = *p_options; else { options.threads = 128; options.blocks = 8; } //Prepare execution configuration block.x = options.threads; block.y = 1; block.z = 1; grid.x = options.blocks; grid.y = 1; grid.z = 1; int cell_len = powf(2.0f, ceilf(log2f(block.x)) + ceilf(log2f(grid.x))); int hist_len = cell_len * bins; CUDA_SAFE_CALL(cudaThreadSynchronize()); CUT_SAFE_CALL(cutStartTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(cudaMalloc((void**) &d_interim, hist_len * sizeof(float))); TIMER_PRINT("Allocating memory", 0); cudaZeroMem(d_interim, hist_len); //This much faster than cudaMemset TIMER_START; int shared_len_pt = GPUHIST_SHARED_LEN >> (int) ceil(log2f(options.threads)); //Length of shared memory available to each thread (in int32) int n = (shared_len_pt << 5) / bins; const int bits_pbin = n > 0 ? min((1 << (int)log2f(n)), 32) : 0; //Number of bits per bin per thread 0, 1, 2, 4, 8, 16, 32 #ifdef VERBOSE printf("bits per bin: %d\n", bits_pbin); #endif gpuHistb<<<grid, block>>>(d_src, d_interim, length, bins, bits_pbin); CUT_CHECK_ERROR("gpuHistb() execution failed\n"); TIMER_PRINT("gpuHistb", length); //Reduce the interim histogram if (bins > MAX_BLOCKS_PER_DIM) //We want the bins to fit in horizontal grid.x printf("Maximimum bins exceeded."), exit(1); block.x = max(min(cell_len, MAX_THREADS), 64); block.y = 1; block.z = 1; //gpuReduceHist requires at least 64 threads grid.x = bins; grid.y = 1; grid.z = 1; TIMER_START; gpuReduceHist<<<grid, block, block.x * sizeof(float)>>>(d_interim, d_hist, cell_len); CUT_CHECK_ERROR("gpuReduceHist() execution failed\n"); TIMER_PRINT("gpuReduceHist", hist_len); CUDA_SAFE_CALL(cudaThreadSynchronize()); CUT_SAFE_CALL(cutStopTimer(hTimer)); time = cutGetTimerValue(hTimer); CUT_SAFE_CALL(cutDeleteTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(cudaFree(d_interim)); if (!device) { CUDA_SAFE_CALL(cudaMemcpy(hist, d_hist, bins * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_src)); CUDA_SAFE_CALL(cudaFree(d_hist)); } TIMER_PRINT("Storing data", 0); TIMER_DELETE; return time; } extern "C" double cudaHistc(float *src, float *hist, int length, int bins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int size = length * sizeof(float); //Device memory pointers float *d_src, *d_hist, *d_interim; double time = 0; unsigned int hTimer; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(cudaMalloc((void**) &d_src, size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(cudaMemcpy(d_src, src, size, cudaMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0) } else { d_src = src; d_hist = hist; } cudaHistOptions options; if (p_options) options = *p_options; else { options.threads = 128; options.blocks = 8; } //Prepare execution configuration block.x = options.threads; block.y = 1; block.z = 1; grid.x = options.blocks; grid.y = 1; grid.z = 1; int cell_len = powf(2.0f, ceilf(log2f(block.x)) + ceilf(log2f(grid.x))); int hist_len = cell_len * bins; CUDA_SAFE_CALL(cudaThreadSynchronize()); CUT_SAFE_CALL(cutStartTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(cudaMalloc((void**) &d_interim, hist_len * sizeof(float))); TIMER_PRINT("Allocating memory", 0); cudaZeroMem(d_interim, hist_len); //This much faster than cudaMemset TIMER_START; int shared_len_pw = GPUHIST_SHARED_LEN >> (int) ceil(log2f(options.threads >> LOG2_WARP_SIZE)); //Length of shared memory available to each warp (in int32) int bits_pbin = max(min((shared_len_pw * 27) / bins, 27), 0); for (int i = 1; i <= 28; i++) { if (bits_pbin >= 27 / i) { bits_pbin = 27 / i; break; } } #ifdef VERBOSE printf("bits per bin: %d\n", bits_pbin); #endif gpuHistc<<<grid, block>>>(d_src, d_interim, length, bins, bits_pbin); CUT_CHECK_ERROR("gpuHistc() execution failed\n"); TIMER_PRINT("gpuHistc", length); //Reduce the interim histogram /*if (cell_len > 1024) printf("Maximimum length exceeded."), exit(1); if (bins > MAX_BLOCKS_PER_DIM) //We want the bins to fit in horizontal grid.x printf("Maximimum bins exceeded."), exit(1); block.x = cell_len >> 1; block.y = 1; block.z = 1; grid.x = bins; grid.y = 1; grid.z = 1; TIMER_START; gpuReduceHist<<<grid, block, cell_len * sizeof(float)>>>(d_interim, d_hist); CUT_CHECK_ERROR("gpuReduceHist() execution failed\n"); TIMER_PRINT("gpuReduceHist", hist_len);*/ if (bins > MAX_BLOCKS_PER_DIM) //We want the bins to fit in horizontal grid.x printf("Maximimum bins exceeded."), exit(1); block.x = max(min(cell_len, MAX_THREADS), 64); block.y = 1; block.z = 1; //gpuReduce requires at least 64 threads grid.x = bins; grid.y = 1; grid.z = 1; TIMER_START; gpuReduceHist<<<grid, block, block.x * sizeof(float)>>>(d_interim, d_hist, cell_len); CUT_CHECK_ERROR("gpuReduceHist() execution failed\n"); TIMER_PRINT("gpuReduceHist", hist_len); /*if (bins > MAX_BLOCKS_PER_DIM) //We want the bins to fit in horizontal grid.x printf("Maximimum bins exceeded."), exit(1); cudaSumAlongRows(d_interim, d_hist, cell_len, bins, true);*/ CUDA_SAFE_CALL(cudaThreadSynchronize()); CUT_SAFE_CALL(cutStopTimer(hTimer)); time = cutGetTimerValue(hTimer); CUT_SAFE_CALL(cutDeleteTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(cudaFree(d_interim)); if (!device) { CUDA_SAFE_CALL(cudaMemcpy(hist, d_hist, bins * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_src)); CUDA_SAFE_CALL(cudaFree(d_hist)); } TIMER_PRINT("Storing data", 0); TIMER_DELETE; return time; } extern "C" double cudaHist_Approx(float *src, float *hist, int length, int bins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int size = length * sizeof(float); //Device memory pointers float *d_src, *d_hist, *d_interim; double time = 0; unsigned int hTimer; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(cudaMalloc((void**) &d_src, size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(cudaMemcpy(d_src, src, size, cudaMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0) } else { d_src = src; d_hist = hist; } cudaHistOptions options; if (p_options) options = *p_options; else { options.threads = 256; options.blocks = 16; } //Prepare execution configuration block.x = options.threads; block.y = 1; block.z = 1; grid.x = options.blocks; grid.y = 1; grid.z = 1; int cell_len = powf(2.0f, ceilf(log2f(grid.x))); int hist_len = cell_len * bins; CUDA_SAFE_CALL(cudaThreadSynchronize()); CUT_SAFE_CALL(cutStartTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(cudaMalloc((void**) &d_interim, hist_len * sizeof(float))); TIMER_PRINT("Allocating memory", 0); cudaZeroMem(d_interim, hist_len); //This is much faster than cudaMemset TIMER_START; int n = (GPUHIST_SHARED_LEN << 5) / bins; const int bits_pbin = n > 0 ? min((1 << (int)log2f(n)), 32) : 0; //Number of bits per bin per thread 0, 1, 2, 4, 8, 16, 32 #ifdef VERBOSE printf("bits per bin: %d\n", bits_pbin); #endif gpuHist_Approx<<<grid, block>>>(d_src, d_interim, length, bins, bits_pbin, (float) bins / options.threads); CUT_CHECK_ERROR("gpuHist_Approx() execution failed\n"); TIMER_PRINT("gpuHist_Approx", length); //Reduce the interim histogram if (bins > MAX_BLOCKS_PER_DIM) //We want the bins to fit in horizontal grid.x printf("Maximimum bins exceeded."), exit(1); block.x = max(min(cell_len, MAX_THREADS), 64); block.y = 1; block.z = 1; //gpuReduceHist requires at least 64 threads grid.x = bins; grid.y = 1; grid.z = 1; TIMER_START; gpuReduceHist<<<grid, block, block.x * sizeof(float)>>>(d_interim, d_hist, cell_len); CUT_CHECK_ERROR("gpuReduceHist() execution failed\n"); TIMER_PRINT("gpuReduceHist", hist_len); CUDA_SAFE_CALL(cudaThreadSynchronize()); CUT_SAFE_CALL(cutStopTimer(hTimer)); time = cutGetTimerValue(hTimer); CUT_SAFE_CALL(cutDeleteTimer(hTimer)); TIMER_START; CUDA_SAFE_CALL(cudaFree(d_interim)); if (!device) { CUDA_SAFE_CALL(cudaMemcpy(hist, d_hist, bins * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_src)); CUDA_SAFE_CALL(cudaFree(d_hist)); } TIMER_PRINT("Storing data", 0); TIMER_DELETE; return time; } extern "C" void cudaHist2Da(float *src1, float *src2, float *hist, int length, int xbins, int ybins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int bins = xbins * ybins; float *d_src1, *d_src2, *d_hist, *d_src; //Device memory pointers int size = length * sizeof(float); TIMER_CREATE; TIMER_START; if (!device) { //Allocate data on the device CUDA_SAFE_CALL(cudaMalloc((void**) &d_src1, size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_src2, size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(cudaMemcpy(d_src1, src1, size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_src2, src2, size, cudaMemcpyHostToDevice)); } else { d_src1 = src1; d_src2 = src2; d_hist = hist; } CUDA_SAFE_CALL(cudaMalloc((void**) &d_src, size)); //Buffer to hold the conbined source data that can be passed to cudaHist TIMER_PRINT("Loading data", 0); //Combine src1 and src2 into a single array for processing by cudaHist //Prepare execution configuration const int max_threads = MAX_THREADS; int good_len = iRoundUp(length, WARP_SIZE); block.x = max_threads; block.y = 1; block.z = 1; grid.x = ceil(sqrtf(iDivUp(good_len, max_threads))); grid.y = grid.x; grid.z = 1; //CUDA throws an excution error if grid.z is not 1 TIMER_START; gpuCombineHist2DSrcData<<<grid, block>>>(d_src1, d_src2, d_src, length, xbins, ybins); CUT_CHECK_ERROR("gpuCombineHist2DSrcData() execution failed\n"); TIMER_PRINT("gpuCombineHist2DSrcData", length); cudaHista(d_src, d_hist, length, bins, p_options, true); //No need to initialize d_hist, will be done by cudaHist if (!device) { TIMER_START; CUDA_SAFE_CALL(cudaMemcpy(hist, d_hist, bins * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_src1)); CUDA_SAFE_CALL(cudaFree(d_src2)); CUDA_SAFE_CALL(cudaFree(d_hist)); TIMER_PRINT("Storing data", 0); } CUDA_SAFE_CALL(cudaFree(d_src)); TIMER_DELETE; } extern "C" void cudaHist2Db(float *src1, float *src2, float *hist, int length, int xbins, int ybins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int bins = xbins * ybins; float *d_src1, *d_src2, *d_hist, *d_src; //Device memory pointers int size = length * sizeof(float); TIMER_CREATE; TIMER_START; if (!device) { //Allocate data on the device CUDA_SAFE_CALL(cudaMalloc((void**) &d_src1, size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_src2, size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(cudaMemcpy(d_src1, src1, size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_src2, src2, size, cudaMemcpyHostToDevice)); } else { d_src1 = src1; d_src2 = src2; d_hist = hist; } CUDA_SAFE_CALL(cudaMalloc((void**) &d_src, size)); //Buffer to hold the conbined source data that can be passed to cudaHist TIMER_PRINT("Loading data", 0); //Combine src1 and src2 into a single array for processing by cudaHist //Prepare execution configuration const int max_threads = MAX_THREADS; int good_len = iRoundUp(length, WARP_SIZE); block.x = max_threads; block.y = 1; block.z = 1; grid.x = ceil(sqrtf(iDivUp(good_len, max_threads))); grid.y = grid.x; grid.z = 1; //CUDA throws an excution error if grid.z is not 1 TIMER_START; gpuCombineHist2DSrcData<<<grid, block>>>(d_src1, d_src2, d_src, length, xbins, ybins); CUT_CHECK_ERROR("gpuCombineHist2DSrcData() execution failed\n"); TIMER_PRINT("gpuCombineHist2DSrcData", length); cudaHistb(d_src, d_hist, length, bins, p_options, true); //No need to initialize d_hist, will be done by cudaHist if (!device) { TIMER_START; CUDA_SAFE_CALL(cudaMemcpy(hist, d_hist, bins * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_src1)); CUDA_SAFE_CALL(cudaFree(d_src2)); CUDA_SAFE_CALL(cudaFree(d_hist)); TIMER_PRINT("Storing data", 0); } CUDA_SAFE_CALL(cudaFree(d_src)); TIMER_DELETE; } extern "C" void cudaHist2D_Approx(float *src1, float *src2, float *hist, int length, int xbins, int ybins, cudaHistOptions *p_options /*= NULL*/, bool device /*= false*/) { dim3 grid, block; int bins = xbins * ybins; float *d_src1, *d_src2, *d_hist, *d_src; //Device memory pointers int size = length * sizeof(float); TIMER_CREATE; TIMER_START; if (!device) { //Allocate data on the device CUDA_SAFE_CALL(cudaMalloc((void**) &d_src1, size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_src2, size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_hist, bins * sizeof(float))); //Copy src data to device memory CUDA_SAFE_CALL(cudaMemcpy(d_src1, src1, size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_src2, src2, size, cudaMemcpyHostToDevice)); } else { d_src1 = src1; d_src2 = src2; d_hist = hist; } CUDA_SAFE_CALL(cudaMalloc((void**) &d_src, size)); //Buffer to hold the conbined source data that can be passed to cudaHist TIMER_PRINT("Loading data", 0); //Combine src1 and src2 into a single array for processing by cudaHist //Prepare execution configuration const int max_threads = MAX_THREADS; int good_len = iRoundUp(length, WARP_SIZE); block.x = max_threads; block.y = 1; block.z = 1; grid.x = ceil(sqrtf(iDivUp(good_len, max_threads))); grid.y = grid.x; grid.z = 1; //CUDA throws an excution error if grid.z is not 1 TIMER_START; gpuCombineHist2DSrcData<<<grid, block>>>(d_src1, d_src2, d_src, length, xbins, ybins); CUT_CHECK_ERROR("gpuCombineHist2DSrcData() execution failed\n"); TIMER_PRINT("gpuCombineHist2DSrcData", length); cudaHist_Approx(d_src, d_hist, length, bins, p_options, true); //No need to initialize d_hist, will be done by cudaHist if (!device) { TIMER_START; CUDA_SAFE_CALL(cudaMemcpy(hist, d_hist, bins * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_src1)); CUDA_SAFE_CALL(cudaFree(d_src2)); CUDA_SAFE_CALL(cudaFree(d_hist)); TIMER_PRINT("Storing data", 0); } CUDA_SAFE_CALL(cudaFree(d_src)); TIMER_DELETE; }
7749845b5f7d951deec1ad87d8fda54476838752.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <optix.h> #include "optixTriangle.h" #include <cuda/helpers.h> #include <sutil/vec_math.h> extern "C" { __constant__ Params params; } static __forceinline__ __device__ void setPayload( float3 p ) { optixSetPayload_0( float_as_int( p.x ) ); optixSetPayload_1( float_as_int( p.y ) ); optixSetPayload_2( float_as_int( p.z ) ); } static __forceinline__ __device__ void computeRay( uint3 idx, uint3 dim, float3& origin, float3& direction ) { const float3 U = params.cam_u; const float3 V = params.cam_v; const float3 W = params.cam_w; const float2 d = 2.0f * make_float2( static_cast<float>( idx.x ) / static_cast<float>( dim.x ), static_cast<float>( idx.y ) / static_cast<float>( dim.y ) ) - 1.0f; origin = params.cam_eye; direction = normalize( d.x * U + d.y * V + W ); } extern "C" __global__ void __raygen__rg() { // Lookup our location within the launch grid const uint3 idx = optixGetLaunchIndex(); const uint3 dim = optixGetLaunchDimensions(); // Map our launch idx to a screen location and create a ray from the camera // location through the screen float3 ray_origin, ray_direction; computeRay( idx, dim, ray_origin, ray_direction ); // Trace the ray against our scene hierarchy unsigned int p0, p1, p2; optixTrace( params.handle, ray_origin, ray_direction, 0.0f, // Min intersection distance 1e16f, // Max intersection distance 0.0f, // rayTime -- used for motion blur OptixVisibilityMask( 255 ), // Specify always visible OPTIX_RAY_FLAG_NONE, 0, // SBT offset -- See SBT discussion 1, // SBT stride -- See SBT discussion 0, // missSBTIndex -- See SBT discussion p0, p1, p2 ); float3 result; result.x = int_as_float( p0 ); result.y = int_as_float( p1 ); result.z = int_as_float( p2 ); // Record results in our output raster params.image[idx.y * params.image_width + idx.x] = make_color( result ); } extern "C" __global__ void __miss__ms() { MissData* miss_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); setPayload( miss_data->bg_color ); } extern "C" __global__ void __closesthit__ch() { // When built-in triangle intersection is used, a number of fundamental // attributes are provided by the OptiX API, indlucing barycentric coordinates. const float2 barycentrics = optixGetTriangleBarycentrics(); setPayload( make_float3( barycentrics, 1.0f ) ); }
7749845b5f7d951deec1ad87d8fda54476838752.cu
// // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <optix.h> #include "optixTriangle.h" #include <cuda/helpers.h> #include <sutil/vec_math.h> extern "C" { __constant__ Params params; } static __forceinline__ __device__ void setPayload( float3 p ) { optixSetPayload_0( float_as_int( p.x ) ); optixSetPayload_1( float_as_int( p.y ) ); optixSetPayload_2( float_as_int( p.z ) ); } static __forceinline__ __device__ void computeRay( uint3 idx, uint3 dim, float3& origin, float3& direction ) { const float3 U = params.cam_u; const float3 V = params.cam_v; const float3 W = params.cam_w; const float2 d = 2.0f * make_float2( static_cast<float>( idx.x ) / static_cast<float>( dim.x ), static_cast<float>( idx.y ) / static_cast<float>( dim.y ) ) - 1.0f; origin = params.cam_eye; direction = normalize( d.x * U + d.y * V + W ); } extern "C" __global__ void __raygen__rg() { // Lookup our location within the launch grid const uint3 idx = optixGetLaunchIndex(); const uint3 dim = optixGetLaunchDimensions(); // Map our launch idx to a screen location and create a ray from the camera // location through the screen float3 ray_origin, ray_direction; computeRay( idx, dim, ray_origin, ray_direction ); // Trace the ray against our scene hierarchy unsigned int p0, p1, p2; optixTrace( params.handle, ray_origin, ray_direction, 0.0f, // Min intersection distance 1e16f, // Max intersection distance 0.0f, // rayTime -- used for motion blur OptixVisibilityMask( 255 ), // Specify always visible OPTIX_RAY_FLAG_NONE, 0, // SBT offset -- See SBT discussion 1, // SBT stride -- See SBT discussion 0, // missSBTIndex -- See SBT discussion p0, p1, p2 ); float3 result; result.x = int_as_float( p0 ); result.y = int_as_float( p1 ); result.z = int_as_float( p2 ); // Record results in our output raster params.image[idx.y * params.image_width + idx.x] = make_color( result ); } extern "C" __global__ void __miss__ms() { MissData* miss_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); setPayload( miss_data->bg_color ); } extern "C" __global__ void __closesthit__ch() { // When built-in triangle intersection is used, a number of fundamental // attributes are provided by the OptiX API, indlucing barycentric coordinates. const float2 barycentrics = optixGetTriangleBarycentrics(); setPayload( make_float3( barycentrics, 1.0f ) ); }
ea90d6a4b517512b4424921b68b4e0c576b08d42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #define N 2000000 #define BLOCK_SIZE 1024 //using namespace std; __global__ void ReduceMin(int n, int *input, int *output){ __shared__ int sh[BLOCK_SIZE]; int tid = threadIdx.x; int myId = threadIdx.x + blockIdx.x*blockDim.x; if(tid<BLOCK_SIZE) sh[tid] = input[myId]; else sh[tid] = INT_MAX; __syncthreads(); for(int i = blockDim.x/2; i>0;i>>=1){ if(tid<i) { if(sh[tid]>sh[tid+i]) // sh[tid]<sh[tid +i] for max sh[tid] = atomicMin(&sh[tid+i], sh[tid]); //atomicMax for max else sh[tid] = sh[tid]; } __syncthreads(); } if(tid==0) output[blockIdx.x] = sh[0]; } int main(){ int num_blocks; if(N%BLOCK_SIZE!=0) num_blocks = N/BLOCK_SIZE+1; else if(N/BLOCK_SIZE==0) num_blocks =1; else num_blocks= N/BLOCK_SIZE; int *h = (int*)malloc(sizeof(int)*N); int *d_h, *d_temp; int *h_temp = (int *) malloc(sizeof(int)*1); hipMalloc((void **)&d_h, sizeof(int)*N); hipMalloc((void **)&d_temp, sizeof(int)*num_blocks); for(int i =0;i<N;i++) h[i] = i+1; hipMemcpy(d_h, h, sizeof(int)*N, hipMemcpyHostToDevice);hipLaunchKernelGGL(( ReduceMin), dim3(num_blocks),dim3(BLOCK_SIZE), 0, 0, BLOCK_SIZE, d_h, d_temp); hipMemcpy(h, d_temp, sizeof(int)*num_blocks, hipMemcpyDeviceToHost); int maxx = INT_MAX; //INT_MIN for max for(int i =0;i<num_blocks;i++){ if(h[i]<maxx &&h[i]!=0) //h[i]>maxx for max maxx = h[i]; } printf("%d", maxx); hipFree(d_h); hipFree(d_temp); }
ea90d6a4b517512b4424921b68b4e0c576b08d42.cu
#include<stdio.h> #define N 2000000 #define BLOCK_SIZE 1024 //using namespace std; __global__ void ReduceMin(int n, int *input, int *output){ __shared__ int sh[BLOCK_SIZE]; int tid = threadIdx.x; int myId = threadIdx.x + blockIdx.x*blockDim.x; if(tid<BLOCK_SIZE) sh[tid] = input[myId]; else sh[tid] = INT_MAX; __syncthreads(); for(int i = blockDim.x/2; i>0;i>>=1){ if(tid<i) { if(sh[tid]>sh[tid+i]) // sh[tid]<sh[tid +i] for max sh[tid] = atomicMin(&sh[tid+i], sh[tid]); //atomicMax for max else sh[tid] = sh[tid]; } __syncthreads(); } if(tid==0) output[blockIdx.x] = sh[0]; } int main(){ int num_blocks; if(N%BLOCK_SIZE!=0) num_blocks = N/BLOCK_SIZE+1; else if(N/BLOCK_SIZE==0) num_blocks =1; else num_blocks= N/BLOCK_SIZE; int *h = (int*)malloc(sizeof(int)*N); int *d_h, *d_temp; int *h_temp = (int *) malloc(sizeof(int)*1); cudaMalloc((void **)&d_h, sizeof(int)*N); cudaMalloc((void **)&d_temp, sizeof(int)*num_blocks); for(int i =0;i<N;i++) h[i] = i+1; cudaMemcpy(d_h, h, sizeof(int)*N, cudaMemcpyHostToDevice); ReduceMin<<<num_blocks,BLOCK_SIZE>>>(BLOCK_SIZE, d_h, d_temp); cudaMemcpy(h, d_temp, sizeof(int)*num_blocks, cudaMemcpyDeviceToHost); int maxx = INT_MAX; //INT_MIN for max for(int i =0;i<num_blocks;i++){ if(h[i]<maxx &&h[i]!=0) //h[i]>maxx for max maxx = h[i]; } printf("%d", maxx); cudaFree(d_h); cudaFree(d_temp); }
cac124ac475fc0c26cf30896b4bfd6f908da230f.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2020, Carnegie Mellon University * See LICENSE for details */ /*************************************************************************** * SPL Matrix * * * * Computes matrix that corresponds to SPL generated routine * ***************************************************************************/ #include <limits.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <hipfft.h> #include <hipfftXt.h> #include <helper_cuda.h> #ifndef ROWS #error ROWS must be defined #endif #ifndef COLUMNS #error COLUMNS must be defined #endif hipfftDoubleReal *Input, *Output; hipfftDoubleReal *dev_in, *dev_out; void initialize(int argc, char **argv) { hipHostMalloc ( &Input, sizeof(hipfftDoubleReal) * COLUMNS ); hipHostMalloc ( &Output, sizeof(hipfftDoubleReal) * ROWS ); hipMalloc ( &dev_in, sizeof(hipfftDoubleReal) * COLUMNS ); hipMalloc ( &dev_out, sizeof(hipfftDoubleReal) * ROWS ); INITFUNC(); } void finalize() { hipHostFree (Output); hipHostFree (Input); hipFree (dev_out); hipFree (dev_in); } void set_value_in_vector(hipfftDoubleReal *arr, int elem) { // Zero array and put '1' in the location indicated by element int idx; for (idx = 0; idx < COLUMNS; idx++) arr[idx] = (idx == elem) ? 1.0 : 0.0; return; } void compute_matrix() { int x, y; printf("[ "); for (x = 0; x < COLUMNS; x++) { set_value_in_vector(Input, x); hipMemcpy ( dev_in, Input, sizeof(hipfftDoubleReal) * COLUMNS, hipMemcpyHostToDevice); FUNC(dev_out, dev_in); hipMemcpy ( Output, dev_out, sizeof(hipfftDoubleReal) * ROWS, hipMemcpyDeviceToHost); if (x != 0) { printf(",\n [ "); } else { printf("[ "); } for (y = 0; y < ROWS; y++) { if (y != 0) { printf(", "); } printf("FloatString(\"%.18g\")", Output[y]); } printf(" ]"); } printf("\n];\n"); } int main(int argc, char** argv) { initialize(argc, argv); compute_matrix(); finalize(); return EXIT_SUCCESS; }
cac124ac475fc0c26cf30896b4bfd6f908da230f.cu
/* * Copyright (c) 2018-2020, Carnegie Mellon University * See LICENSE for details */ /*************************************************************************** * SPL Matrix * * * * Computes matrix that corresponds to SPL generated routine * ***************************************************************************/ #include <limits.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <cufft.h> #include <cufftXt.h> #include <helper_cuda.h> #ifndef ROWS #error ROWS must be defined #endif #ifndef COLUMNS #error COLUMNS must be defined #endif cufftDoubleReal *Input, *Output; cufftDoubleReal *dev_in, *dev_out; void initialize(int argc, char **argv) { cudaMallocHost ( &Input, sizeof(cufftDoubleReal) * COLUMNS ); cudaMallocHost ( &Output, sizeof(cufftDoubleReal) * ROWS ); cudaMalloc ( &dev_in, sizeof(cufftDoubleReal) * COLUMNS ); cudaMalloc ( &dev_out, sizeof(cufftDoubleReal) * ROWS ); INITFUNC(); } void finalize() { cudaFreeHost (Output); cudaFreeHost (Input); cudaFree (dev_out); cudaFree (dev_in); } void set_value_in_vector(cufftDoubleReal *arr, int elem) { // Zero array and put '1' in the location indicated by element int idx; for (idx = 0; idx < COLUMNS; idx++) arr[idx] = (idx == elem) ? 1.0 : 0.0; return; } void compute_matrix() { int x, y; printf("[ "); for (x = 0; x < COLUMNS; x++) { set_value_in_vector(Input, x); cudaMemcpy ( dev_in, Input, sizeof(cufftDoubleReal) * COLUMNS, cudaMemcpyHostToDevice); FUNC(dev_out, dev_in); cudaMemcpy ( Output, dev_out, sizeof(cufftDoubleReal) * ROWS, cudaMemcpyDeviceToHost); if (x != 0) { printf(",\n [ "); } else { printf("[ "); } for (y = 0; y < ROWS; y++) { if (y != 0) { printf(", "); } printf("FloatString(\"%.18g\")", Output[y]); } printf(" ]"); } printf("\n];\n"); } int main(int argc, char** argv) { initialize(argc, argv); compute_matrix(); finalize(); return EXIT_SUCCESS; }
5fd09dd08b43865f776f672ddb9ad3019365beff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "gputimer.h" const int N= 1024; // matrix size is NxN const int K= 32; // tile size is KxK // Utility functions: compare, print, and fill matrices #define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__) template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != hipSuccess) { fprintf(stderr, "CUDA error at: %s : %d\n", file,line); fprintf(stderr, "%s %s\n", hipGetErrorString(err), func);; exit(1); } } int compare_matrices(float *gpu, float *ref) { int result = 0; for(int j=0; j < N; j++) for(int i=0; i < N; i++) if (ref[i + j*N] != gpu[i + j*N]) { // printf("reference(%d,%d) = %f but test(%d,%d) = %f\n", // i,j,ref[i+j*N],i,j,test[i+j*N]); result = 1; } return result; } void print_matrix(float *mat) { for(int j=0; j < N; j++) { for(int i=0; i < N; i++) { printf("%4.4g ", mat[i + j*N]); } printf("\n"); } } // fill a matrix with sequential numbers in the range 0..N-1 void fill_matrix(float *mat) { for(int j=0; j < N * N; j++) mat[j] = (float) j; } void transpose_CPU(float in[], float out[]) { for(int j=0; j < N; j++) for(int i=0; i < N; i++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } // to be launched on a single thread __global__ void transpose_serial(float in[], float out[]) { for(int j=0; j < N; j++) for(int i=0; i < N; i++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } // to be launched with one thread per row of output matrix __global__ void transpose_parallel_per_row(float in[], float out[]) { int i = threadIdx.x; for(int j=0; j < N; j++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } // to be launched with one thread per element, in KxK threadblocks // thread (x,y) in grid writes element (i,j) of output matrix __global__ void transpose_parallel_per_element(float in[], float out[]) { int i = blockIdx.x * K + threadIdx.x; int j = blockIdx.y * K + threadIdx.y; out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } __global__ void transpose_parallel_per_element_tiled(float in[], float out[]) { int left = blockIdx.x * K; int top = blockIdx.y * K; __shared__ float s[K*K]; s[threadIdx.x * K + threadIdx.y] = in[(top + threadIdx.y) * N + left + threadIdx.x]; __syncthreads(); int left2 = top; int top2 = left; out[(top2 + threadIdx.y) * N + left2 + threadIdx.x] = s[threadIdx.y * K + threadIdx.x]; } // to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks // thread blocks read & write tiles, in coalesced fashion // adjacent threads read adjacent input elements, write adjacent output elmts __global__ void transpose_parallel_per_element_tiled2(float in[], float out[]) { int left = blockIdx.x * 2 * K; int top = blockIdx.y * 2 * K; __shared__ float s[2*K*2*K]; int i = 4*(threadIdx.x * K + threadIdx.y); int row = i / (2*K); int col = i % (2*K); s[col * 2 * K + row] = in[(top + row) * N + left + col]; s[(col+1) * 2 * K + row] = in[(top + row) * N + left + col + 1]; s[(col+2) * 2 * K + row] = in[(top + row) * N + left + col + 2]; s[(col+3) * 2 * K + row] = in[(top + row) * N + left + col + 3]; __syncthreads(); int left2 = top; int top2 = left; out[(top2 + row) * N + left2 + col] = s[row * 2 * K + col]; out[(top2 + row) * N + left2 + col + 1] = s[row * 2 * K + col + 1]; out[(top2 + row) * N + left2 + col + 2] = s[row * 2 * K + col + 2]; out[(top2 + row) * N + left2 + col + 3] = s[row * 2 * K + col + 3]; } // to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks // thread blocks read & write tiles, in coalesced fashion // adjacent threads read adjacent input elements, write adjacent output elmts __global__ void transpose_parallel_per_element_tiled16(float in[], float out[]) { // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16; int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16; int x = threadIdx.x, y = threadIdx.y; __shared__ float tile[16][16]; // coalesced read from global mem, TRANSPOSED write into shared mem: tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N]; __syncthreads(); // read from shared mem, coalesced write to global mem: out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y]; } // to be launched with one thread per element, in KxK threadblocks // thread blocks read & write tiles, in coalesced fashion // shared memory array padded to avoid bank conflicts __global__ void transpose_parallel_per_element_tiled_padded(float in[], float out[]) { // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K; int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K; int x = threadIdx.x, y = threadIdx.y; __shared__ float tile[K][K+1]; // coalesced read from global mem, TRANSPOSED write into shared mem: tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N]; __syncthreads(); // read from shared mem, coalesced write to global mem: out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y]; } // to be launched with one thread per element, in KxK threadblocks // thread blocks read & write tiles, in coalesced fashion // shared memory array padded to avoid bank conflicts __global__ void transpose_parallel_per_element_tiled_padded16(float in[], float out[]) { // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16; int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16; int x = threadIdx.x, y = threadIdx.y; __shared__ float tile[16][16+1]; // coalesced read from global mem, TRANSPOSED write into shared mem: tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N]; __syncthreads(); // read from shared mem, coalesced write to global mem: out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y]; } int main(int argc, char **argv) { int numbytes = N * N * sizeof(float); float *in = (float *) malloc(numbytes); float *out = (float *) malloc(numbytes); float *gold = (float *) malloc(numbytes); fill_matrix(in); transpose_CPU(in, gold); float *d_in, *d_out; hipMalloc(&d_in, numbytes); hipMalloc(&d_out, numbytes); hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice); GpuTimer timer; /* * Now time each kernel and verify that it produces the correct result. * * To be really careful about benchmarking purposes, we should run every kernel once * to "warm" the system and avoid any compilation or code-caching effects, then run * every kernel 10 or 100 times and average the timings to smooth out any variance. * But this makes for messy code and our goal is teaching, not detailed benchmarking. */ timer.Start(); hipLaunchKernelGGL(( transpose_serial), dim3(1),dim3(1), 0, 0, d_in, d_out); timer.Stop(); hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost); printf("transpose_serial: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); timer.Start(); hipLaunchKernelGGL(( transpose_parallel_per_row), dim3(1),dim3(N), 0, 0, d_in, d_out); timer.Stop(); hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost); printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); dim3 blocks(N/K,N/K); // blocks per grid dim3 threads(K,K); // threads per block // timer.Start(); // transpose_parallel_per_element<<<blocks,threads>>>(d_in, d_out); // timer.Stop(); // hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost); // printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n", // timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); timer.Start(); hipLaunchKernelGGL(( transpose_parallel_per_element_tiled), dim3(blocks),dim3(threads), 0, 0, d_in, d_out); timer.Stop(); hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost); printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); dim3 blocks2(N/K/2,N/K/2); // blocks per grid dim3 threads2(K,K); // threads per block // timer.Start(); // transpose_parallel_per_element<<<blocks,threads>>>(d_in, d_out); // timer.Stop(); // hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost); // printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n", // timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); timer.Start(); hipLaunchKernelGGL(( transpose_parallel_per_element_tiled2), dim3(blocks2),dim3(threads2), 0, 0, d_in, d_out); timer.Stop(); hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost); printf("transpose_parallel_per_element_tiled2 %dx%d: %g ms.\nVerifying ...%s\n", 2*K, 2*K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); // dim3 blocks16x16(N/16,N/16); // blocks per grid // dim3 threads16x16(16,16); // threads per block // timer.Start(); // transpose_parallel_per_element_tiled16<<<blocks16x16,threads16x16>>>(d_in, d_out); // timer.Stop(); // hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost); // printf("transpose_parallel_per_element_tiled 16x16: %g ms.\nVerifying ...%s\n", // timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); // timer.Start(); // transpose_parallel_per_element_tiled_padded16<<<blocks16x16,threads16x16>>>(d_in, d_out); // timer.Stop(); // hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost); // printf("transpose_parallel_per_element_tiled_padded 16x16: %g ms.\nVerifying...%s\n", // timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); hipFree(d_in); hipFree(d_out); }
5fd09dd08b43865f776f672ddb9ad3019365beff.cu
#include <stdio.h> #include "gputimer.h" const int N= 1024; // matrix size is NxN const int K= 32; // tile size is KxK // Utility functions: compare, print, and fill matrices #define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__) template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != cudaSuccess) { fprintf(stderr, "CUDA error at: %s : %d\n", file,line); fprintf(stderr, "%s %s\n", cudaGetErrorString(err), func);; exit(1); } } int compare_matrices(float *gpu, float *ref) { int result = 0; for(int j=0; j < N; j++) for(int i=0; i < N; i++) if (ref[i + j*N] != gpu[i + j*N]) { // printf("reference(%d,%d) = %f but test(%d,%d) = %f\n", // i,j,ref[i+j*N],i,j,test[i+j*N]); result = 1; } return result; } void print_matrix(float *mat) { for(int j=0; j < N; j++) { for(int i=0; i < N; i++) { printf("%4.4g ", mat[i + j*N]); } printf("\n"); } } // fill a matrix with sequential numbers in the range 0..N-1 void fill_matrix(float *mat) { for(int j=0; j < N * N; j++) mat[j] = (float) j; } void transpose_CPU(float in[], float out[]) { for(int j=0; j < N; j++) for(int i=0; i < N; i++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } // to be launched on a single thread __global__ void transpose_serial(float in[], float out[]) { for(int j=0; j < N; j++) for(int i=0; i < N; i++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } // to be launched with one thread per row of output matrix __global__ void transpose_parallel_per_row(float in[], float out[]) { int i = threadIdx.x; for(int j=0; j < N; j++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } // to be launched with one thread per element, in KxK threadblocks // thread (x,y) in grid writes element (i,j) of output matrix __global__ void transpose_parallel_per_element(float in[], float out[]) { int i = blockIdx.x * K + threadIdx.x; int j = blockIdx.y * K + threadIdx.y; out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } __global__ void transpose_parallel_per_element_tiled(float in[], float out[]) { int left = blockIdx.x * K; int top = blockIdx.y * K; __shared__ float s[K*K]; s[threadIdx.x * K + threadIdx.y] = in[(top + threadIdx.y) * N + left + threadIdx.x]; __syncthreads(); int left2 = top; int top2 = left; out[(top2 + threadIdx.y) * N + left2 + threadIdx.x] = s[threadIdx.y * K + threadIdx.x]; } // to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks // thread blocks read & write tiles, in coalesced fashion // adjacent threads read adjacent input elements, write adjacent output elmts __global__ void transpose_parallel_per_element_tiled2(float in[], float out[]) { int left = blockIdx.x * 2 * K; int top = blockIdx.y * 2 * K; __shared__ float s[2*K*2*K]; int i = 4*(threadIdx.x * K + threadIdx.y); int row = i / (2*K); int col = i % (2*K); s[col * 2 * K + row] = in[(top + row) * N + left + col]; s[(col+1) * 2 * K + row] = in[(top + row) * N + left + col + 1]; s[(col+2) * 2 * K + row] = in[(top + row) * N + left + col + 2]; s[(col+3) * 2 * K + row] = in[(top + row) * N + left + col + 3]; __syncthreads(); int left2 = top; int top2 = left; out[(top2 + row) * N + left2 + col] = s[row * 2 * K + col]; out[(top2 + row) * N + left2 + col + 1] = s[row * 2 * K + col + 1]; out[(top2 + row) * N + left2 + col + 2] = s[row * 2 * K + col + 2]; out[(top2 + row) * N + left2 + col + 3] = s[row * 2 * K + col + 3]; } // to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks // thread blocks read & write tiles, in coalesced fashion // adjacent threads read adjacent input elements, write adjacent output elmts __global__ void transpose_parallel_per_element_tiled16(float in[], float out[]) { // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16; int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16; int x = threadIdx.x, y = threadIdx.y; __shared__ float tile[16][16]; // coalesced read from global mem, TRANSPOSED write into shared mem: tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N]; __syncthreads(); // read from shared mem, coalesced write to global mem: out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y]; } // to be launched with one thread per element, in KxK threadblocks // thread blocks read & write tiles, in coalesced fashion // shared memory array padded to avoid bank conflicts __global__ void transpose_parallel_per_element_tiled_padded(float in[], float out[]) { // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K; int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K; int x = threadIdx.x, y = threadIdx.y; __shared__ float tile[K][K+1]; // coalesced read from global mem, TRANSPOSED write into shared mem: tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N]; __syncthreads(); // read from shared mem, coalesced write to global mem: out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y]; } // to be launched with one thread per element, in KxK threadblocks // thread blocks read & write tiles, in coalesced fashion // shared memory array padded to avoid bank conflicts __global__ void transpose_parallel_per_element_tiled_padded16(float in[], float out[]) { // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16; int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16; int x = threadIdx.x, y = threadIdx.y; __shared__ float tile[16][16+1]; // coalesced read from global mem, TRANSPOSED write into shared mem: tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N]; __syncthreads(); // read from shared mem, coalesced write to global mem: out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y]; } int main(int argc, char **argv) { int numbytes = N * N * sizeof(float); float *in = (float *) malloc(numbytes); float *out = (float *) malloc(numbytes); float *gold = (float *) malloc(numbytes); fill_matrix(in); transpose_CPU(in, gold); float *d_in, *d_out; cudaMalloc(&d_in, numbytes); cudaMalloc(&d_out, numbytes); cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice); GpuTimer timer; /* * Now time each kernel and verify that it produces the correct result. * * To be really careful about benchmarking purposes, we should run every kernel once * to "warm" the system and avoid any compilation or code-caching effects, then run * every kernel 10 or 100 times and average the timings to smooth out any variance. * But this makes for messy code and our goal is teaching, not detailed benchmarking. */ timer.Start(); transpose_serial<<<1,1>>>(d_in, d_out); timer.Stop(); cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost); printf("transpose_serial: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); timer.Start(); transpose_parallel_per_row<<<1,N>>>(d_in, d_out); timer.Stop(); cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost); printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); dim3 blocks(N/K,N/K); // blocks per grid dim3 threads(K,K); // threads per block // timer.Start(); // transpose_parallel_per_element<<<blocks,threads>>>(d_in, d_out); // timer.Stop(); // cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost); // printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n", // timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); timer.Start(); transpose_parallel_per_element_tiled<<<blocks,threads>>>(d_in, d_out); timer.Stop(); cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost); printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n", K, K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); dim3 blocks2(N/K/2,N/K/2); // blocks per grid dim3 threads2(K,K); // threads per block // timer.Start(); // transpose_parallel_per_element<<<blocks,threads>>>(d_in, d_out); // timer.Stop(); // cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost); // printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n", // timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); timer.Start(); transpose_parallel_per_element_tiled2<<<blocks2,threads2>>>(d_in, d_out); timer.Stop(); cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost); printf("transpose_parallel_per_element_tiled2 %dx%d: %g ms.\nVerifying ...%s\n", 2*K, 2*K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); // dim3 blocks16x16(N/16,N/16); // blocks per grid // dim3 threads16x16(16,16); // threads per block // timer.Start(); // transpose_parallel_per_element_tiled16<<<blocks16x16,threads16x16>>>(d_in, d_out); // timer.Stop(); // cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost); // printf("transpose_parallel_per_element_tiled 16x16: %g ms.\nVerifying ...%s\n", // timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); // timer.Start(); // transpose_parallel_per_element_tiled_padded16<<<blocks16x16,threads16x16>>>(d_in, d_out); // timer.Stop(); // cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost); // printf("transpose_parallel_per_element_tiled_padded 16x16: %g ms.\nVerifying...%s\n", // timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success"); cudaFree(d_in); cudaFree(d_out); }
abeb1848b6a5b621dc4f304064ab96684dcafada.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "sparse_mat_compact.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *input = NULL; hipMalloc(&input, XSIZE*YSIZE); int *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int *output_index_array = NULL; hipMalloc(&output_index_array, XSIZE*YSIZE); int array_size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( sparse_mat_compact), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,output_index_array,array_size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( sparse_mat_compact), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,output_index_array,array_size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( sparse_mat_compact), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,output_index_array,array_size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
abeb1848b6a5b621dc4f304064ab96684dcafada.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "sparse_mat_compact.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); int *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int *output_index_array = NULL; cudaMalloc(&output_index_array, XSIZE*YSIZE); int array_size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); sparse_mat_compact<<<gridBlock,threadBlock>>>(input,output,output_index_array,array_size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { sparse_mat_compact<<<gridBlock,threadBlock>>>(input,output,output_index_array,array_size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { sparse_mat_compact<<<gridBlock,threadBlock>>>(input,output,output_index_array,array_size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e15b9a94bf677d366cad7df62cf49a59bd281148.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <bits/stdc++.h> using namespace std; double data[1024+1]; // to save data in file void csv() { char filename[11]="data.csv"; FILE *fp; fp=fopen(filename,"w+"); fprintf(fp,"Number of Threads, Average Time"); for(int i=0;i<=1024;i+=32) { fprintf(fp,"\n%d",max(i,1)); fprintf(fp,",%lf ",data[i]); } fclose(fp); printf("\n%sfile created",filename); } // Kernel function to add the elements of two arrays __global__ void add(int N, float *X, float *Y) { int t = threadIdx.x; int T = blockDim.x; for (int i = t; i < N; i += T) { Y[i] = X[i] + Y[i]; } } int main(void) { int N = 1<<27;//1.34217728 *10^8 elements. 512 MB float *X, *Y; //Allocates Memory so that both GPU and CPU can access (512*2=1GB). hipMallocManaged(&X, N*sizeof(float)); hipMallocManaged(&Y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { X[i] = 1.0f; Y[i] = 2.0f; } clock_t t; // Run add 10 times with different number of threads. and save the average time on a table. //it is good practice to keep thread number multiple of 32. for(int i=0;i<=1024;i+=32) { int T=max(i,1);// we will need atleast 1 thread. double avg=0; for(int j=0;j<=10;j++) { t=clock(); hipLaunchKernelGGL(( add), dim3(dim3(1,1,1)), dim3(dim3(T,1,1)), 0, 0, N, X, Y); hipDeviceSynchronize(); t = clock() - t; printf("T = %d, Run = %d Time = %lf\n",T,j,(((double)t)/CLOCKS_PER_SEC)*1000); if(j)avg+=((((double)t)/CLOCKS_PER_SEC)*1000);// skips the first run. } avg=avg/10; data[i]=avg; printf ("It took GPU %lf ms with %d threads.\n",avg,T); } csv();// save data in output file // Free memory hipFree(X); hipFree(Y); return 0; }
e15b9a94bf677d366cad7df62cf49a59bd281148.cu
#include <bits/stdc++.h> using namespace std; double data[1024+1]; // to save data in file void csv() { char filename[11]="data.csv"; FILE *fp; fp=fopen(filename,"w+"); fprintf(fp,"Number of Threads, Average Time"); for(int i=0;i<=1024;i+=32) { fprintf(fp,"\n%d",max(i,1)); fprintf(fp,",%lf ",data[i]); } fclose(fp); printf("\n%sfile created",filename); } // Kernel function to add the elements of two arrays __global__ void add(int N, float *X, float *Y) { int t = threadIdx.x; int T = blockDim.x; for (int i = t; i < N; i += T) { Y[i] = X[i] + Y[i]; } } int main(void) { int N = 1<<27;//1.34217728 *10^8 elements. 512 MB float *X, *Y; //Allocates Memory so that both GPU and CPU can access (512*2=1GB). cudaMallocManaged(&X, N*sizeof(float)); cudaMallocManaged(&Y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { X[i] = 1.0f; Y[i] = 2.0f; } clock_t t; // Run add 10 times with different number of threads. and save the average time on a table. //it is good practice to keep thread number multiple of 32. for(int i=0;i<=1024;i+=32) { int T=max(i,1);// we will need atleast 1 thread. double avg=0; for(int j=0;j<=10;j++) { t=clock(); add<<<dim3(1,1,1), dim3(T,1,1)>>>(N, X, Y); cudaDeviceSynchronize(); t = clock() - t; printf("T = %d, Run = %d Time = %lf\n",T,j,(((double)t)/CLOCKS_PER_SEC)*1000); if(j)avg+=((((double)t)/CLOCKS_PER_SEC)*1000);// skips the first run. } avg=avg/10; data[i]=avg; printf ("It took GPU %lf ms with %d threads.\n",avg,T); } csv();// save data in output file // Free memory cudaFree(X); cudaFree(Y); return 0; }
44c6090a6056a6ac6d0db905fa27c4890f3fa736.hip
// !!! This is a file automatically generated by hipify!!! #include "GDFColumn.cuh" #include "GDFCounter.cuh" #include "utilities/legacy/bit_util.cuh" #include "gtest/gtest.h" namespace { struct GdfColumnCppTest : public testing::Test { GdfColumnCppTest() { counter_instance = GDFRefCounter::getInstance(); } ~GdfColumnCppTest() {} void SetUp() { } void TearDown() override {} GDFRefCounter *counter_instance; }; TEST_F(GdfColumnCppTest, AssignOperatorInGdfCounter) { gdf_column *gdf_col_1{}; gdf_column *gdf_col_2{}; gdf_column *gdf_col_3{}; ASSERT_EQ(counter_instance->get_map_size(), 0); { // initialization gdf_column_cpp cpp_col_1; gdf_dtype_extra_info extra_info{TIME_UNIT_NONE}; cpp_col_1.create_gdf_column(GDF_INT32, extra_info, 16, nullptr, 4, "sample"); gdf_col_1 = cpp_col_1.get_gdf_column(); gdf_column_cpp cpp_col_2; cpp_col_2.create_gdf_column(GDF_INT64, extra_info, 32, nullptr, 8, "sample"); gdf_col_2 = cpp_col_2.get_gdf_column(); gdf_column_cpp cpp_col_3; cpp_col_3.create_gdf_column(GDF_INT64, extra_info, 32, nullptr, 8, "sample"); gdf_col_3 = cpp_col_3.get_gdf_column(); ASSERT_EQ(counter_instance->get_map_size(), 3); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 1); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_2), 1); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_3), 1); // test assign operator cpp_col_2 = cpp_col_1; ASSERT_EQ(counter_instance->get_map_size(), 2); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 2); ASSERT_EQ(counter_instance->contains_column(gdf_col_2), false); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_3), 1); // test assign operator on equal gdf_column_cpp cpp_col_1 = cpp_col_2; ASSERT_EQ(counter_instance->get_map_size(), 2); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 2); ASSERT_EQ(counter_instance->contains_column(gdf_col_2), false); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_3), 1); // test assign operator again cpp_col_1 = cpp_col_3; ASSERT_EQ(counter_instance->get_map_size(), 2); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 1); ASSERT_EQ(counter_instance->contains_column(gdf_col_2), false); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_3), 2); } ASSERT_EQ(counter_instance->get_map_size(), 0); ASSERT_TRUE(counter_instance->contains_column(gdf_col_1) == false); ASSERT_TRUE(counter_instance->contains_column(gdf_col_2) == false); ASSERT_TRUE(counter_instance->contains_column(gdf_col_3) == false); } // void gdf_column_cpp::create_gdf_column(gdf_dtype type, // size_t num_values, // void * input_data, // cudf::valid_type * host_valids, // size_t width_per_value, // const std::string &column_name) TEST_F(GdfColumnCppTest, CreateGdfColumnCppTypeOne_DoubleCreation) { gdf_column *gdf_col_1{}; gdf_column *gdf_col_2{}; ASSERT_EQ(counter_instance->get_map_size(), 0); { // initialize gdf_column_cpp cpp_col; gdf_dtype_extra_info extra_info{TIME_UNIT_NONE}; cpp_col.create_gdf_column(GDF_INT32, extra_info, 32, nullptr, nullptr, 4, "sample 1"); gdf_col_1 = cpp_col.get_gdf_column(); ASSERT_EQ(counter_instance->get_map_size(), 1); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 1); ASSERT_EQ(counter_instance->contains_column(gdf_col_2), false); // create again - note: os could reuse the pointer cpp_col.create_gdf_column(GDF_INT64, extra_info, 64, nullptr, nullptr, 8, "sample 2"); gdf_col_2 = cpp_col.get_gdf_column(); ASSERT_EQ(counter_instance->get_map_size(), 1); if (gdf_col_1 == gdf_col_2) { ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 1); } else { ASSERT_EQ(counter_instance->contains_column(gdf_col_1), false); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_2), 1); } } ASSERT_EQ(counter_instance->get_map_size(), 0); ASSERT_TRUE(counter_instance->contains_column(gdf_col_1) == false); ASSERT_TRUE(counter_instance->contains_column(gdf_col_2) == false); } TEST_F(GdfColumnCppTest, CreateGdfColumnCppNVCategoryValids) { size_t totalStrings = 7; const char *cstrings[] = {"aaaaaaab", nullptr, "aaaaaaak", nullptr, nullptr, "aaaaaaax", nullptr}; NVCategory *category = NVCategory::create_from_array(cstrings, totalStrings); gdf_column_cpp cpp_col; cpp_col.create_gdf_column(category, totalStrings, "test"); std::vector<cudf::valid_type> valids(gdf_valid_allocation_size(cpp_col.size())); CheckCudaErrors(hipMemcpy(valids.data(), cpp_col.valid(), valids.size(), hipMemcpyDeviceToHost)); for (size_t i = 0; i < totalStrings; i++) { ASSERT_EQ(gdf_is_valid(valids.data(), i), cstrings[i] != nullptr); } ASSERT_EQ(cpp_col.null_count(), 4); } } // namespace
44c6090a6056a6ac6d0db905fa27c4890f3fa736.cu
#include "GDFColumn.cuh" #include "GDFCounter.cuh" #include "utilities/legacy/bit_util.cuh" #include "gtest/gtest.h" namespace { struct GdfColumnCppTest : public testing::Test { GdfColumnCppTest() { counter_instance = GDFRefCounter::getInstance(); } ~GdfColumnCppTest() {} void SetUp() { } void TearDown() override {} GDFRefCounter *counter_instance; }; TEST_F(GdfColumnCppTest, AssignOperatorInGdfCounter) { gdf_column *gdf_col_1{}; gdf_column *gdf_col_2{}; gdf_column *gdf_col_3{}; ASSERT_EQ(counter_instance->get_map_size(), 0); { // initialization gdf_column_cpp cpp_col_1; gdf_dtype_extra_info extra_info{TIME_UNIT_NONE}; cpp_col_1.create_gdf_column(GDF_INT32, extra_info, 16, nullptr, 4, "sample"); gdf_col_1 = cpp_col_1.get_gdf_column(); gdf_column_cpp cpp_col_2; cpp_col_2.create_gdf_column(GDF_INT64, extra_info, 32, nullptr, 8, "sample"); gdf_col_2 = cpp_col_2.get_gdf_column(); gdf_column_cpp cpp_col_3; cpp_col_3.create_gdf_column(GDF_INT64, extra_info, 32, nullptr, 8, "sample"); gdf_col_3 = cpp_col_3.get_gdf_column(); ASSERT_EQ(counter_instance->get_map_size(), 3); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 1); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_2), 1); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_3), 1); // test assign operator cpp_col_2 = cpp_col_1; ASSERT_EQ(counter_instance->get_map_size(), 2); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 2); ASSERT_EQ(counter_instance->contains_column(gdf_col_2), false); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_3), 1); // test assign operator on equal gdf_column_cpp cpp_col_1 = cpp_col_2; ASSERT_EQ(counter_instance->get_map_size(), 2); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 2); ASSERT_EQ(counter_instance->contains_column(gdf_col_2), false); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_3), 1); // test assign operator again cpp_col_1 = cpp_col_3; ASSERT_EQ(counter_instance->get_map_size(), 2); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 1); ASSERT_EQ(counter_instance->contains_column(gdf_col_2), false); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_3), 2); } ASSERT_EQ(counter_instance->get_map_size(), 0); ASSERT_TRUE(counter_instance->contains_column(gdf_col_1) == false); ASSERT_TRUE(counter_instance->contains_column(gdf_col_2) == false); ASSERT_TRUE(counter_instance->contains_column(gdf_col_3) == false); } // void gdf_column_cpp::create_gdf_column(gdf_dtype type, // size_t num_values, // void * input_data, // cudf::valid_type * host_valids, // size_t width_per_value, // const std::string &column_name) TEST_F(GdfColumnCppTest, CreateGdfColumnCppTypeOne_DoubleCreation) { gdf_column *gdf_col_1{}; gdf_column *gdf_col_2{}; ASSERT_EQ(counter_instance->get_map_size(), 0); { // initialize gdf_column_cpp cpp_col; gdf_dtype_extra_info extra_info{TIME_UNIT_NONE}; cpp_col.create_gdf_column(GDF_INT32, extra_info, 32, nullptr, nullptr, 4, "sample 1"); gdf_col_1 = cpp_col.get_gdf_column(); ASSERT_EQ(counter_instance->get_map_size(), 1); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 1); ASSERT_EQ(counter_instance->contains_column(gdf_col_2), false); // create again - note: os could reuse the pointer cpp_col.create_gdf_column(GDF_INT64, extra_info, 64, nullptr, nullptr, 8, "sample 2"); gdf_col_2 = cpp_col.get_gdf_column(); ASSERT_EQ(counter_instance->get_map_size(), 1); if (gdf_col_1 == gdf_col_2) { ASSERT_EQ(counter_instance->column_ref_value(gdf_col_1), 1); } else { ASSERT_EQ(counter_instance->contains_column(gdf_col_1), false); ASSERT_EQ(counter_instance->column_ref_value(gdf_col_2), 1); } } ASSERT_EQ(counter_instance->get_map_size(), 0); ASSERT_TRUE(counter_instance->contains_column(gdf_col_1) == false); ASSERT_TRUE(counter_instance->contains_column(gdf_col_2) == false); } TEST_F(GdfColumnCppTest, CreateGdfColumnCppNVCategoryValids) { size_t totalStrings = 7; const char *cstrings[] = {"aaaaaaab", nullptr, "aaaaaaak", nullptr, nullptr, "aaaaaaax", nullptr}; NVCategory *category = NVCategory::create_from_array(cstrings, totalStrings); gdf_column_cpp cpp_col; cpp_col.create_gdf_column(category, totalStrings, "test"); std::vector<cudf::valid_type> valids(gdf_valid_allocation_size(cpp_col.size())); CheckCudaErrors(cudaMemcpy(valids.data(), cpp_col.valid(), valids.size(), cudaMemcpyDeviceToHost)); for (size_t i = 0; i < totalStrings; i++) { ASSERT_EQ(gdf_is_valid(valids.data(), i), cstrings[i] != nullptr); } ASSERT_EQ(cpp_col.null_count(), 4); } } // namespace
15a78888273d8ac85507a2c9c8510ab2811b436b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************** * FILENAME : an_kernel.cu * * DESCRIPTION : * Kernel side implementation of AlexNet network * * NOTES : * This file includes implementation of 2D/3D convolution * normalisation,pooling and fully connected layer kernels. * * AUTHOR : Aajna Karki * https://www.linkedin.com/in/aajna/ *********************************************************************/ #ifndef _AN_KERNEL_H_ #define _AN_KERNEL_H_ #include <stdio.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) CUT_BANK_CHECKER(((float*)&As[0][0]), (BLOCK_SIZE * i + j)) #define BS(i, j) CUT_BANK_CHECKER(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif //#define LAYER2_DEBUG //#define POOL_DEBUG __constant__ int kernelTemplate[25] = { 0, 1, 2, 3, 4, 29, 30, 31, 32, 33, 58, 59, 60, 61, 62, 87, 88, 89, 90, 91, 116,117,118,119,120 }; __constant__ int kernelTemplate2[25] = { 0, 1, 2, 3, 4, 13, 14, 15, 16, 17, 26, 27, 28, 29, 30, 39, 40, 41, 42, 43, 52, 53, 54, 55, 56 }; #ifndef CPU __global__ void executeFirstLayer(float *bias,float *Layer1_Neurons_GPU,float *Layer1_Weights_GPU,float *Layer2_Neurons_GPU,int r_offset, int c_offset) { float product = 0.0; int col_width = 227; int stride_width = 4; int stride = 0,colstride = 0; int output = blockIdx.x; int row = threadIdx.x + r_offset; int col = threadIdx.y + c_offset; colstride = 3*row*stride_width*col_width; stride = 0; product = 0; stride = col * 4 * 3; /* RGB weights and input 11*11*3 */ #pragma unroll for(int i = 0; i < 11; i++) //loop unrolling { for(int j = 0; j < 11; j++) { product += ((Layer1_Neurons_GPU[i*227*3 + j*3 + stride + colstride] * Layer1_Weights_GPU[i*11 + j + (output * 11*11*3)]) + (Layer1_Neurons_GPU[i*227*3 + j*3 + 1 + stride + colstride] * Layer1_Weights_GPU[i*11 + 11*11 + j+ (output * 11*11*3)]) + (Layer1_Neurons_GPU[i*227*3 + j*3 + 2 + stride + colstride] * Layer1_Weights_GPU[i*11 + 11*11*2 + j+ (output * 11*11*3)])); } } product += bias[output]; if(product < 0) /* RELU Layer */ product = 0; // max(0,x) Layer2_Neurons_GPU[output*55*55 + row*55 + col] = product; product = 0.0; } /* IN : Layer2_Neurons_GPU // Neurons input Layer2_pool_GPU // output after pooling out // number of outputs out_fr // feature map size of output in terms of row out_fc // feature map size of output in terms of column kernel // kernel size stride_width // stride in_fr // feature map size of input in terms of row in_fc // feature map size of input in terms of column */ __global__ void executepoolingCuda(float *Layer2_Neurons_GPU,float *Layer2_pool_GPU,int out,int out_fr,int out_fc,int kernel,int stride_width,int in_fr,int in_fc) { float max = 0.0; int stride = 0,colstride = 0; int output = blockIdx.x; int row = threadIdx.x; int col = threadIdx.y; colstride = row * stride_width*in_fc; stride = col * stride_width; #pragma unroll for(int i = 0; i < kernel; i++) { for(int j = 0; j < kernel; j++) { if(max < ((Layer2_Neurons_GPU[(output*in_fr*in_fc) + i*in_fc + j + stride + colstride]))) max = ((Layer2_Neurons_GPU[(output*in_fr*in_fc) + i*in_fc + j + stride + colstride])) ; } } Layer2_pool_GPU[output*out_fr*out_fc + row*out_fc + col] = max; max = 0.0; stride+= stride_width; } __global__ void execute3DconvolutionCuda(float *bias,float *Layer2_Neurons_GPU, float *Layer2_Weights_GPU,float *Layer3_Neurons_GPU,int out,int fr,int fc,int stride_width,int kernel,int pad,int in_output,int group) { float product = 0.0; int x_pad = 0, y_pad = 0, loopc = 0,loopr = 0; int stride = 0,colstride = 0; int output = blockIdx.x; // 128 colstride = 0; int row = threadIdx.x; stride = 0; if(row > pad) colstride = (row - pad) * fr; int col = threadIdx.y; if(col >= pad) stride = col * stride_width; x_pad = 0; y_pad = 0; /* set the loops value */ loopc = kernel;loopr = kernel; /* take care of padding in left hand side of image*/ if( row < pad) { x_pad = pad - row; loopr = kernel - x_pad; } /* take care of padding in upper side of image*/ if( col < pad ) { y_pad = pad - col; loopc = kernel - y_pad; } /* take care of padding in right side of image*/ if(col >= fc - pad) loopc = fc + pad - col; /* take care of padding in bottom of image */ if(row >= fr - pad) loopr = fr + pad - row; #pragma unroll for(int feature =0; feature < in_output ; feature++) // calculate the feature maps { for(int i =0; i < loopr ; i++) // kernel convolution { for(int j =0; j < loopc ; j++) // kernel convolution { product += ( Layer2_Neurons_GPU[feature*fr*fc + i*fc + j + stride + colstride] * Layer2_Weights_GPU[output*kernel*kernel*in_output + feature*kernel*kernel + i*kernel + j + kernel*x_pad + y_pad]); } } } product += bias[output]; if(product < 0) /* ReLU Layer */ product = 0; Layer3_Neurons_GPU[output*fr*fc + row*fc + col] = product; product = 0.0; if(col >= pad) stride+=stride_width; } __global__ void execute3Dconvolutiongroup2Cuda(float *bias,float *Layer2_Neurons_GPU, float *Layer2_Weights_GPU,float *Layer3_Neurons_GPU,int out,int fr,int fc,int stride_width,int kernel,int pad,int in_output,int group) { float product = 0.0; int x_pad = 0, y_pad = 0, loopc = 0,loopr = 0; int stride = 0,colstride = 0; /* Execute second set of inputs */ int output = blockIdx.x + out; colstride = 0; int row = threadIdx.x; stride = 0; if(row > pad) colstride = (row - pad) * fr; int col = threadIdx.y; if(col >= pad) stride = col*stride_width; x_pad = 0; y_pad = 0; /* set the loops value */ loopc = kernel;loopr = kernel; /* take care of padding in left hand side of image*/ if( row < pad) { x_pad = pad - row; loopr = kernel - x_pad; } /* take care of padding in upper side of image*/ if( col < pad ) { y_pad = pad - col; loopc = kernel - y_pad; } /* take care of padding in right side of image*/ if(col >= fc - pad) loopc = fc + pad - col; /* take care of padding in bottom of image */ if(row >= fr - pad) loopr = fr + pad - row; #pragma unroll for(int feature = in_output ; feature < (in_output << 1) ; feature++) // calculate the feature maps { for(int i =0; i < loopr ; i++) // kernel convolution { for(int j =0; j < loopc ; j++) // kernel convolution { product += (( Layer2_Neurons_GPU[feature*fr*fc + i*fc + j + stride + colstride] * Layer2_Weights_GPU[output*kernel*kernel*in_output + (feature-in_output)*kernel*kernel + i*kernel + j + kernel*x_pad + y_pad])); } } } product += bias[output]; if(product < 0) /* ReLU Layer */ product = 0; Layer3_Neurons_GPU[output*fr*fc + row*fc + col] = product; product = 0.0; } __global__ void executelrnNormCuda_split(float *Layer_InNeurons_GPU, float alpha, float beta,int local_size,int out,int fr,int fc,float *Layer_OutNeurons_GPU,int r_offset, int c_offset) { int nStart = 0, nEnd = 0; float value = 0.0;float sum = 0.0; int output = blockIdx.x; int row = threadIdx.x + r_offset; int col = threadIdx.y + c_offset; nStart=(output-2) > 1 ? (output-2) : 1 ; nEnd=(output+2) < out ? (output+2) : out ; for(int i = (nStart-1); i < (nEnd-1) ; i++) // kernel convolution { sum += pow(( Layer_InNeurons_GPU[i*fr*fc + row*fc + col]),2); } value = (Layer_InNeurons_GPU[output*fr*fc + row*fc + col]) / (pow( 1 + ((alpha/local_size) *sum),beta)); sum = 0; Layer_OutNeurons_GPU[output*fr*fc + row*fc + col] = value; } __global__ void executelrnNormCuda(float *Layer_InNeurons_GPU, float alpha, float beta,int local_size,int out,int fr,int fc,float *Layer_OutNeurons_GPU,int func_call) { int nStart = 0, nEnd = 0; float value = 0.0;float sum = 0.0; int output = blockIdx.x; int row = threadIdx.x + func_call * 32; int col = threadIdx.y + func_call * 32; nStart=(output-2) > 1 ? (output-2) : 1 ; nEnd=(output+2) < out ? (output+2) : out ; for(int i = (nStart-1); i < (nEnd-1) ; i++) // kernel convolution { sum += pow(( Layer_InNeurons_GPU[i*fr*fc + row*fc + col]),2); } value = (Layer_InNeurons_GPU[output*fr*fc + row*fc + col]) / (pow( 1 + ((alpha/local_size) *sum),beta)); sum = 0; Layer_OutNeurons_GPU[output*fr*fc + row*fc + col] = value; } ////copying same function for Layer 6 __constant__ float bias_L6_CM[4096]; __global__ void executeFCLayer_L6(float *Layer_InNeurons_GPU, float *Layer_Weights_GPU,float *Layer_OutNeurons_GPU,int output, int input,bool reLU,bool dropout) { ////__shared__ float Layer_InNeurons_GPU_SM[256*6*6];////no point in making this as shared memory or constant as it is huge >10KB float product = 0.0; int out = blockIdx.x; int weight = out * input; { for(int in = 0; in < input; in++) { product += Layer_InNeurons_GPU[in] * Layer_Weights_GPU[weight+in]; } product += bias_L6_CM[out];////same for entire block so put in constant memory if(reLU == true) { if(product < 0) /* ReLU Layer */ product = 0; } Layer_OutNeurons_GPU[out] = product; product = 0.0; } } //Layer 7 __constant__ float bias_L7_CM[4096]; __constant__ float Layer_InNeurons_GPU_L7_CM[4096]; __global__ void executeFCLayer_L7(float *Layer_Weights_GPU,float *Layer_OutNeurons_GPU,int output, int input,bool reLU,bool dropout) { float product = 0.0; int out = blockIdx.x; int weight = out * input; { //#pragma unroll ////caused increase in execution time for(int in = 0; in < input; in++) { product += Layer_InNeurons_GPU_L7_CM[in] * Layer_Weights_GPU[weight+in]; } product += bias_L7_CM[out];////same for entire block so put in constant memory if(reLU == true) { if(product < 0) /* ReLU Layer */ product = 0; } Layer_OutNeurons_GPU[out] = product; product = 0.0; } } ////copying same function for Layer 8 __constant__ float bias_L8_CM[1000]; //__constant__ float Layer_InNeurons_GPU_L8_CM[4096]; __global__ void executeFCLayer_L8(float* Layer_InNeurons_GPU_L8,float *Layer_Weights_GPU,float *Layer_OutNeurons_GPU,int output, int input,bool reLU,bool dropout) { float product = 0.0; int out = blockIdx.x; int weight = out * input; { for(int in = 0; in < input; in++) { product += Layer_InNeurons_GPU_L8[in] * Layer_Weights_GPU[weight+in]; } product += bias_L8_CM[out];////same for entire block so put in constant memory if(reLU == true) { if(product < 0) /* ReLU Layer */ product = 0; } Layer_OutNeurons_GPU[out] = product; product = 0.0; } } __global__ void executeThirdLayer(float *Layer3_Neurons_GPU, float *Layer3_Weights_GPU,float *Layer4_Neurons_GPU) { int blockID=blockIdx.x; //int pixelY=threadIdx.y; int weightBegin=blockID*1251; float result=0; result+=Layer3_Weights_GPU[weightBegin]; ++weightBegin; for (int i=0; i<1250; ++i ) { result+=Layer3_Neurons_GPU[i+(1250*blockIdx.y)]*Layer3_Weights_GPU[weightBegin+i]; } result=(1.7159*tanhf(0.66666667*result)); Layer4_Neurons_GPU[blockID+(100*blockIdx.y)]=result; } __global__ void executeFourthLayer(float *Layer4_Neurons_GPU,float *Layer4_Weights_GPU,float *Layer5_Neurons_GPU) { int blockID=blockIdx.x; //int pixelY=threadIdx.y; int weightBegin=blockID*101; float result=0; result+=Layer4_Weights_GPU[weightBegin]; ++weightBegin; for (int i=0; i<100; ++i ) { result+=Layer4_Neurons_GPU[i+(100*blockIdx.y)]*Layer4_Weights_GPU[weightBegin+i]; } result=(1.7159*tanhf(0.66666667*result)); Layer5_Neurons_GPU[blockID+(10*blockIdx.y)]=result; } #else void executeFirstLayer(float *bias,float *Layer1_Neurons_GPU,float *Layer1_Weights_GPU,float *Layer2_Neurons_GPU,int stride_width,int col_width,int feature_r,int feature_c,int out) { float product = 0.0; int stride = 0,colstride = 0; { for(int output =0;output < out ;output++) { for(int row =0; row < feature_r ;row++) { colstride = 3*row*stride_width*col_width; stride = 0; for(int col =0; col < feature_c ;col++) { product = 0; /* RGB weights and input 11*11*3 , kernel is 11*11 */ for(int i = 0; i < 11; i++) { for(int j = 0; j < 11; j++) { product += ((Layer1_Neurons_GPU[i*col_width*3 + j*3 + stride + colstride] * Layer1_Weights_GPU[i*11 + j + (output * 11*11*3)]) + (Layer1_Neurons_GPU[i*col_width*3 + j*3 + 1 + stride + colstride] * Layer1_Weights_GPU[i*11 + 11*11 + j+ (output * 11*11*3)]) + (Layer1_Neurons_GPU[i*col_width*3 + j*3 + 2 + stride + colstride] * Layer1_Weights_GPU[i*11 + 11*11*2 + j+ (output * 11*11*3)])); } } product += bias[output]; if(product < 0) /* RELU Layer */ product = 0; // max(0,x) Layer2_Neurons_GPU[output*feature_r*feature_c + row*feature_c + col] = product; #ifdef LAYER1_DEBUG printf("%f\n",product); #endif product = 0.0; stride+= stride_width*3; } } } } } void pooling(float *Layer2_Neurons_GPU,float *Layer2_pool_GPU,int out,int out_fr,int out_fc,int kernel,int stride_width,int in_fr,int in_fc) { printf("pooling Activation layer \n"); float max = 0.0; int downsample = 0; int stride = 0,colstride = 0; { for(int output =0;output < out ;output++) { for(int row =0; row < out_fr ;row++) { colstride = row * stride_width*in_fc; stride = 0; for(int col =0; col < out_fc ;col++) { for(int i = 0; i < kernel; i++) { for(int j = 0; j < kernel; j++) { if(max < ((Layer2_Neurons_GPU[(output*in_fr*in_fc) + i*in_fc + j + stride + colstride]))) max = ((Layer2_Neurons_GPU[(output*in_fr*in_fc) + i*in_fc + j + stride + colstride])) ; // if(output == 141) // printf("%f %d\t",Layer2_Neurons_GPU[(output*in_fr*in_fc) + i*in_fc + j + stride + colstride],((output*in_fr*in_fc) + i*in_fc + j + stride + colstride)) ; } } Layer2_pool_GPU[downsample] = max; #ifdef POOL_DEBUG printf("\n %f %d\n",max,downsample); #endif max = 0.0; downsample++; stride+= stride_width; } } } } } void execute3Dconvolution(float *bias,float *Layer2_Neurons_GPU, float *Layer2_Weights_GPU,float *Layer3_Neurons_GPU,int out,int fr,int fc,int stride_width,int kernel,int pad,int in_output,int group) { float product = 0.0; int x_pad = 0, y_pad = 0, loopc = 0,loopr = 0; printf(" 3D convolution with group %d,output %d,feature %d x %d ,stride %d, kernel %d, pad %d, input %d\n",group,out,fr,fc,stride_width,kernel,pad,in_output); if(group == 2) { out = out >> 1; in_output = in_output >> 1; } int stride = 0,colstride = 0; { for(int output =0;output < out ;output++) /* out = 256 */ { colstride = 0; for(int row =0; row < fr ; row++) /* out = 256 */ { stride = 0; if(row > pad) colstride = (row - pad) * fr; for(int col =0; col < fc ;col++) /* out = 256 */ { x_pad = 0; y_pad = 0; /* set the loops value */ loopc = kernel;loopr = kernel; /* take care of padding in left hand side of image*/ if( row < pad) { x_pad = pad - row; loopr = kernel - x_pad; } /* take care of padding in upper side of image*/ if( col < pad ) { y_pad = pad - col; loopc = kernel - y_pad; } /* take care of padding in right side of image*/ if(col >= fc - pad) loopc = fc + pad - col; /* take care of padding in bottom of image */ if(row >= fr - pad) loopr = fr + pad - row; for(int feature =0; feature < in_output ; feature++) // calculate the feature maps { for(int i =0; i < loopr ; i++) // kernel convolution { for(int j =0; j < loopc ; j++) // kernel convolution { product += ( Layer2_Neurons_GPU[feature*fr*fc + i*fc + j + stride + colstride] * Layer2_Weights_GPU[output*kernel*kernel*in_output + feature*kernel*kernel + i*kernel + j + kernel*x_pad + y_pad]); } } } product += bias[output]; if(product < 0) /* ReLU Layer */ product = 0; #ifdef LAYER2_DEBUG printf("%f\n",product); #endif // if((group == 2) && (out == 128) && (in_output == 192)) // printf("%f\n",product); Layer3_Neurons_GPU[output*fr*fc + row*fc + col] = product; product = 0.0; if(col >= pad) stride+=stride_width; } } } if(group == 2) { /* Execute second set of inputs */ for(int output = out ;output < (out << 1) ;output++) /* out = 256 */ { colstride = 0; for(int row =0; row < fr; row++) /* out = 256 */ { stride = 0; if(row > pad) colstride = (row - pad) * fr; for(int col =0; col < fc ;col++) /* out = 256 */ { x_pad = 0; y_pad = 0; /* set the loops value */ loopc = kernel;loopr = kernel; /* take care of padding in left hand side of image*/ if( row < pad) { x_pad = pad - row; loopr = kernel - x_pad; } /* take care of padding in upper side of image*/ if( col < pad ) { y_pad = pad - col; loopc = kernel - y_pad; } /* take care of padding in right side of image*/ if(col >= fc - pad) loopc = fc + pad - col; /* take care of padding in bottom of image */ if(row >= fr - pad) loopr = fr + pad - row; for(int feature = in_output ; feature < (in_output << 1) ; feature++) // calculate the feature maps { for(int i =0; i < loopr ; i++) // kernel convolution { for(int j =0; j < loopc ; j++) // kernel convolution { product += (( Layer2_Neurons_GPU[feature*fr*fc + i*fc + j + stride + colstride] * Layer2_Weights_GPU[output*kernel*kernel*in_output + (feature-in_output)*kernel*kernel + i*kernel + j + kernel*x_pad + y_pad])); } } } product += bias[output]; if(product < 0) /* ReLU Layer */ product = 0; #ifdef LAYER2_DEBUG printf("%f\n",product); #endif // if((group == 2) && (out == 128) && (in_output == 192)) // printf("%f\n",product); Layer3_Neurons_GPU[output*fr*fc + row*fc + col] = product; product = 0.0; if(col >= pad) stride+=stride_width; } } } } } } void executelrnNorm(float *Layer_InNeurons_GPU, float alpha, float beta,int local_size,int out,int fr,int fc,float *Layer_OutNeurons_GPU) { printf(" Exexcute Norm Layer\n"); int nStart = 0, nEnd = 0; float value = 0.0;float sum = 0.0; for(int row =0; row < fr; row++) { for(int col =0; col < fc ;col++) { for(int output = 0 ;output < out ;output++) { nStart=(output-floor(local_size/2)) > 1 ? (output-floor(local_size/2)) : 1 ; nEnd=(output+floor(local_size/2)) < out ? (output+floor(local_size/2)) : out ; for(int i = (nStart-1); i < (nEnd-1) ; i++) // kernel convolution { sum += pow(( Layer_InNeurons_GPU[i*fr*fc + row*fc + col]),2); } value = (Layer_InNeurons_GPU[output*fr*fc + row*fc + col]) / (pow( 1 + ((alpha/local_size) *sum),beta)); sum = 0; Layer_OutNeurons_GPU[output*fr*fc + row*fc + col] = value; } } } #ifdef NORM_LAYER for(int N = 0; N < out; N++) { for(int W = 0; W < fr; W++) { for(int H = 0; H < fc; H++) { printf("%f\n",Layer_OutNeurons_GPU[N*fr*fc + W*fc + H]);; } } } #endif } void executeFCLayer(float *bias,float *Layer_InNeurons_GPU,float *Layer_Weights_GPU,float *Layer_OutNeurons_GPU,int output, int input,bool reLU,bool dropout) { printf("Execute FC Layer of output : %d input %d\n",output,input); float product = 0.0,max = 0.0; int weight = 0,index = 0; for(int out=0; out < output ; out++) { for(int in = 0; in < input; in++) { product += Layer_InNeurons_GPU[in] * Layer_Weights_GPU[weight++]; } product += bias[out]; if(reLU == true) { if(product < 0) /* ReLU Layer */ product = 0; } else { if(max < product) { index = out; max = product; } } if(dropout == true) { } Layer_OutNeurons_GPU[out] = product; #ifdef FC_DEBUG printf("%f\n",product); #endif product = 0.0; } printf(" MAX from FC layer = %d\n",index); } void executeSoftMax(float *Layer_In_Neurons_GPU) { printf("executeSoftMax \n"); float max = 0,sum = 0; float output[1000] = {0}; for(int i = 0; i < 1000; i++) { if(Layer_In_Neurons_GPU[i] > max) max = Layer_In_Neurons_GPU[i]; } #ifdef SOFTMAX_DEBUG printf("Max = %10e\n",max); #endif for(int i = 0; i < 1000; i++) { output[i] = exp(Layer_In_Neurons_GPU[i] - max); sum += output[i]; } #ifdef SOFTMAX_DEBUG printf("Sum = %10e\n",sum); #endif for(int i = 0; i < 1000; i++) { output[i] *= (1/sum); #ifdef SOFTMAX_DEBUG printf("%10e\n",output[i]); #endif } } #endif #endif // #ifndef _AN_KERNEL_H_
15a78888273d8ac85507a2c9c8510ab2811b436b.cu
/********************************************************************** * FILENAME : an_kernel.cu * * DESCRIPTION : * Kernel side implementation of AlexNet network * * NOTES : * This file includes implementation of 2D/3D convolution * normalisation,pooling and fully connected layer kernels. * * AUTHOR : Aajna Karki * https://www.linkedin.com/in/aajna/ *********************************************************************/ #ifndef _AN_KERNEL_H_ #define _AN_KERNEL_H_ #include <stdio.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) CUT_BANK_CHECKER(((float*)&As[0][0]), (BLOCK_SIZE * i + j)) #define BS(i, j) CUT_BANK_CHECKER(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif //#define LAYER2_DEBUG //#define POOL_DEBUG __constant__ int kernelTemplate[25] = { 0, 1, 2, 3, 4, 29, 30, 31, 32, 33, 58, 59, 60, 61, 62, 87, 88, 89, 90, 91, 116,117,118,119,120 }; __constant__ int kernelTemplate2[25] = { 0, 1, 2, 3, 4, 13, 14, 15, 16, 17, 26, 27, 28, 29, 30, 39, 40, 41, 42, 43, 52, 53, 54, 55, 56 }; #ifndef CPU __global__ void executeFirstLayer(float *bias,float *Layer1_Neurons_GPU,float *Layer1_Weights_GPU,float *Layer2_Neurons_GPU,int r_offset, int c_offset) { float product = 0.0; int col_width = 227; int stride_width = 4; int stride = 0,colstride = 0; int output = blockIdx.x; int row = threadIdx.x + r_offset; int col = threadIdx.y + c_offset; colstride = 3*row*stride_width*col_width; stride = 0; product = 0; stride = col * 4 * 3; /* RGB weights and input 11*11*3 */ #pragma unroll for(int i = 0; i < 11; i++) //loop unrolling { for(int j = 0; j < 11; j++) { product += ((Layer1_Neurons_GPU[i*227*3 + j*3 + stride + colstride] * Layer1_Weights_GPU[i*11 + j + (output * 11*11*3)]) + (Layer1_Neurons_GPU[i*227*3 + j*3 + 1 + stride + colstride] * Layer1_Weights_GPU[i*11 + 11*11 + j+ (output * 11*11*3)]) + (Layer1_Neurons_GPU[i*227*3 + j*3 + 2 + stride + colstride] * Layer1_Weights_GPU[i*11 + 11*11*2 + j+ (output * 11*11*3)])); } } product += bias[output]; if(product < 0) /* RELU Layer */ product = 0; // max(0,x) Layer2_Neurons_GPU[output*55*55 + row*55 + col] = product; product = 0.0; } /* IN : Layer2_Neurons_GPU // Neurons input Layer2_pool_GPU // output after pooling out // number of outputs out_fr // feature map size of output in terms of row out_fc // feature map size of output in terms of column kernel // kernel size stride_width // stride in_fr // feature map size of input in terms of row in_fc // feature map size of input in terms of column */ __global__ void executepoolingCuda(float *Layer2_Neurons_GPU,float *Layer2_pool_GPU,int out,int out_fr,int out_fc,int kernel,int stride_width,int in_fr,int in_fc) { float max = 0.0; int stride = 0,colstride = 0; int output = blockIdx.x; int row = threadIdx.x; int col = threadIdx.y; colstride = row * stride_width*in_fc; stride = col * stride_width; #pragma unroll for(int i = 0; i < kernel; i++) { for(int j = 0; j < kernel; j++) { if(max < ((Layer2_Neurons_GPU[(output*in_fr*in_fc) + i*in_fc + j + stride + colstride]))) max = ((Layer2_Neurons_GPU[(output*in_fr*in_fc) + i*in_fc + j + stride + colstride])) ; } } Layer2_pool_GPU[output*out_fr*out_fc + row*out_fc + col] = max; max = 0.0; stride+= stride_width; } __global__ void execute3DconvolutionCuda(float *bias,float *Layer2_Neurons_GPU, float *Layer2_Weights_GPU,float *Layer3_Neurons_GPU,int out,int fr,int fc,int stride_width,int kernel,int pad,int in_output,int group) { float product = 0.0; int x_pad = 0, y_pad = 0, loopc = 0,loopr = 0; int stride = 0,colstride = 0; int output = blockIdx.x; // 128 colstride = 0; int row = threadIdx.x; stride = 0; if(row > pad) colstride = (row - pad) * fr; int col = threadIdx.y; if(col >= pad) stride = col * stride_width; x_pad = 0; y_pad = 0; /* set the loops value */ loopc = kernel;loopr = kernel; /* take care of padding in left hand side of image*/ if( row < pad) { x_pad = pad - row; loopr = kernel - x_pad; } /* take care of padding in upper side of image*/ if( col < pad ) { y_pad = pad - col; loopc = kernel - y_pad; } /* take care of padding in right side of image*/ if(col >= fc - pad) loopc = fc + pad - col; /* take care of padding in bottom of image */ if(row >= fr - pad) loopr = fr + pad - row; #pragma unroll for(int feature =0; feature < in_output ; feature++) // calculate the feature maps { for(int i =0; i < loopr ; i++) // kernel convolution { for(int j =0; j < loopc ; j++) // kernel convolution { product += ( Layer2_Neurons_GPU[feature*fr*fc + i*fc + j + stride + colstride] * Layer2_Weights_GPU[output*kernel*kernel*in_output + feature*kernel*kernel + i*kernel + j + kernel*x_pad + y_pad]); } } } product += bias[output]; if(product < 0) /* ReLU Layer */ product = 0; Layer3_Neurons_GPU[output*fr*fc + row*fc + col] = product; product = 0.0; if(col >= pad) stride+=stride_width; } __global__ void execute3Dconvolutiongroup2Cuda(float *bias,float *Layer2_Neurons_GPU, float *Layer2_Weights_GPU,float *Layer3_Neurons_GPU,int out,int fr,int fc,int stride_width,int kernel,int pad,int in_output,int group) { float product = 0.0; int x_pad = 0, y_pad = 0, loopc = 0,loopr = 0; int stride = 0,colstride = 0; /* Execute second set of inputs */ int output = blockIdx.x + out; colstride = 0; int row = threadIdx.x; stride = 0; if(row > pad) colstride = (row - pad) * fr; int col = threadIdx.y; if(col >= pad) stride = col*stride_width; x_pad = 0; y_pad = 0; /* set the loops value */ loopc = kernel;loopr = kernel; /* take care of padding in left hand side of image*/ if( row < pad) { x_pad = pad - row; loopr = kernel - x_pad; } /* take care of padding in upper side of image*/ if( col < pad ) { y_pad = pad - col; loopc = kernel - y_pad; } /* take care of padding in right side of image*/ if(col >= fc - pad) loopc = fc + pad - col; /* take care of padding in bottom of image */ if(row >= fr - pad) loopr = fr + pad - row; #pragma unroll for(int feature = in_output ; feature < (in_output << 1) ; feature++) // calculate the feature maps { for(int i =0; i < loopr ; i++) // kernel convolution { for(int j =0; j < loopc ; j++) // kernel convolution { product += (( Layer2_Neurons_GPU[feature*fr*fc + i*fc + j + stride + colstride] * Layer2_Weights_GPU[output*kernel*kernel*in_output + (feature-in_output)*kernel*kernel + i*kernel + j + kernel*x_pad + y_pad])); } } } product += bias[output]; if(product < 0) /* ReLU Layer */ product = 0; Layer3_Neurons_GPU[output*fr*fc + row*fc + col] = product; product = 0.0; } __global__ void executelrnNormCuda_split(float *Layer_InNeurons_GPU, float alpha, float beta,int local_size,int out,int fr,int fc,float *Layer_OutNeurons_GPU,int r_offset, int c_offset) { int nStart = 0, nEnd = 0; float value = 0.0;float sum = 0.0; int output = blockIdx.x; int row = threadIdx.x + r_offset; int col = threadIdx.y + c_offset; nStart=(output-2) > 1 ? (output-2) : 1 ; nEnd=(output+2) < out ? (output+2) : out ; for(int i = (nStart-1); i < (nEnd-1) ; i++) // kernel convolution { sum += pow(( Layer_InNeurons_GPU[i*fr*fc + row*fc + col]),2); } value = (Layer_InNeurons_GPU[output*fr*fc + row*fc + col]) / (pow( 1 + ((alpha/local_size) *sum),beta)); sum = 0; Layer_OutNeurons_GPU[output*fr*fc + row*fc + col] = value; } __global__ void executelrnNormCuda(float *Layer_InNeurons_GPU, float alpha, float beta,int local_size,int out,int fr,int fc,float *Layer_OutNeurons_GPU,int func_call) { int nStart = 0, nEnd = 0; float value = 0.0;float sum = 0.0; int output = blockIdx.x; int row = threadIdx.x + func_call * 32; int col = threadIdx.y + func_call * 32; nStart=(output-2) > 1 ? (output-2) : 1 ; nEnd=(output+2) < out ? (output+2) : out ; for(int i = (nStart-1); i < (nEnd-1) ; i++) // kernel convolution { sum += pow(( Layer_InNeurons_GPU[i*fr*fc + row*fc + col]),2); } value = (Layer_InNeurons_GPU[output*fr*fc + row*fc + col]) / (pow( 1 + ((alpha/local_size) *sum),beta)); sum = 0; Layer_OutNeurons_GPU[output*fr*fc + row*fc + col] = value; } ////copying same function for Layer 6 __constant__ float bias_L6_CM[4096]; __global__ void executeFCLayer_L6(float *Layer_InNeurons_GPU, float *Layer_Weights_GPU,float *Layer_OutNeurons_GPU,int output, int input,bool reLU,bool dropout) { ////__shared__ float Layer_InNeurons_GPU_SM[256*6*6];////no point in making this as shared memory or constant as it is huge >10KB float product = 0.0; int out = blockIdx.x; int weight = out * input; { for(int in = 0; in < input; in++) { product += Layer_InNeurons_GPU[in] * Layer_Weights_GPU[weight+in]; } product += bias_L6_CM[out];////same for entire block so put in constant memory if(reLU == true) { if(product < 0) /* ReLU Layer */ product = 0; } Layer_OutNeurons_GPU[out] = product; product = 0.0; } } //Layer 7 __constant__ float bias_L7_CM[4096]; __constant__ float Layer_InNeurons_GPU_L7_CM[4096]; __global__ void executeFCLayer_L7(float *Layer_Weights_GPU,float *Layer_OutNeurons_GPU,int output, int input,bool reLU,bool dropout) { float product = 0.0; int out = blockIdx.x; int weight = out * input; { //#pragma unroll ////caused increase in execution time for(int in = 0; in < input; in++) { product += Layer_InNeurons_GPU_L7_CM[in] * Layer_Weights_GPU[weight+in]; } product += bias_L7_CM[out];////same for entire block so put in constant memory if(reLU == true) { if(product < 0) /* ReLU Layer */ product = 0; } Layer_OutNeurons_GPU[out] = product; product = 0.0; } } ////copying same function for Layer 8 __constant__ float bias_L8_CM[1000]; //__constant__ float Layer_InNeurons_GPU_L8_CM[4096]; __global__ void executeFCLayer_L8(float* Layer_InNeurons_GPU_L8,float *Layer_Weights_GPU,float *Layer_OutNeurons_GPU,int output, int input,bool reLU,bool dropout) { float product = 0.0; int out = blockIdx.x; int weight = out * input; { for(int in = 0; in < input; in++) { product += Layer_InNeurons_GPU_L8[in] * Layer_Weights_GPU[weight+in]; } product += bias_L8_CM[out];////same for entire block so put in constant memory if(reLU == true) { if(product < 0) /* ReLU Layer */ product = 0; } Layer_OutNeurons_GPU[out] = product; product = 0.0; } } __global__ void executeThirdLayer(float *Layer3_Neurons_GPU, float *Layer3_Weights_GPU,float *Layer4_Neurons_GPU) { int blockID=blockIdx.x; //int pixelY=threadIdx.y; int weightBegin=blockID*1251; float result=0; result+=Layer3_Weights_GPU[weightBegin]; ++weightBegin; for (int i=0; i<1250; ++i ) { result+=Layer3_Neurons_GPU[i+(1250*blockIdx.y)]*Layer3_Weights_GPU[weightBegin+i]; } result=(1.7159*tanhf(0.66666667*result)); Layer4_Neurons_GPU[blockID+(100*blockIdx.y)]=result; } __global__ void executeFourthLayer(float *Layer4_Neurons_GPU,float *Layer4_Weights_GPU,float *Layer5_Neurons_GPU) { int blockID=blockIdx.x; //int pixelY=threadIdx.y; int weightBegin=blockID*101; float result=0; result+=Layer4_Weights_GPU[weightBegin]; ++weightBegin; for (int i=0; i<100; ++i ) { result+=Layer4_Neurons_GPU[i+(100*blockIdx.y)]*Layer4_Weights_GPU[weightBegin+i]; } result=(1.7159*tanhf(0.66666667*result)); Layer5_Neurons_GPU[blockID+(10*blockIdx.y)]=result; } #else void executeFirstLayer(float *bias,float *Layer1_Neurons_GPU,float *Layer1_Weights_GPU,float *Layer2_Neurons_GPU,int stride_width,int col_width,int feature_r,int feature_c,int out) { float product = 0.0; int stride = 0,colstride = 0; { for(int output =0;output < out ;output++) { for(int row =0; row < feature_r ;row++) { colstride = 3*row*stride_width*col_width; stride = 0; for(int col =0; col < feature_c ;col++) { product = 0; /* RGB weights and input 11*11*3 , kernel is 11*11 */ for(int i = 0; i < 11; i++) { for(int j = 0; j < 11; j++) { product += ((Layer1_Neurons_GPU[i*col_width*3 + j*3 + stride + colstride] * Layer1_Weights_GPU[i*11 + j + (output * 11*11*3)]) + (Layer1_Neurons_GPU[i*col_width*3 + j*3 + 1 + stride + colstride] * Layer1_Weights_GPU[i*11 + 11*11 + j+ (output * 11*11*3)]) + (Layer1_Neurons_GPU[i*col_width*3 + j*3 + 2 + stride + colstride] * Layer1_Weights_GPU[i*11 + 11*11*2 + j+ (output * 11*11*3)])); } } product += bias[output]; if(product < 0) /* RELU Layer */ product = 0; // max(0,x) Layer2_Neurons_GPU[output*feature_r*feature_c + row*feature_c + col] = product; #ifdef LAYER1_DEBUG printf("%f\n",product); #endif product = 0.0; stride+= stride_width*3; } } } } } void pooling(float *Layer2_Neurons_GPU,float *Layer2_pool_GPU,int out,int out_fr,int out_fc,int kernel,int stride_width,int in_fr,int in_fc) { printf("pooling Activation layer \n"); float max = 0.0; int downsample = 0; int stride = 0,colstride = 0; { for(int output =0;output < out ;output++) { for(int row =0; row < out_fr ;row++) { colstride = row * stride_width*in_fc; stride = 0; for(int col =0; col < out_fc ;col++) { for(int i = 0; i < kernel; i++) { for(int j = 0; j < kernel; j++) { if(max < ((Layer2_Neurons_GPU[(output*in_fr*in_fc) + i*in_fc + j + stride + colstride]))) max = ((Layer2_Neurons_GPU[(output*in_fr*in_fc) + i*in_fc + j + stride + colstride])) ; // if(output == 141) // printf("%f %d\t",Layer2_Neurons_GPU[(output*in_fr*in_fc) + i*in_fc + j + stride + colstride],((output*in_fr*in_fc) + i*in_fc + j + stride + colstride)) ; } } Layer2_pool_GPU[downsample] = max; #ifdef POOL_DEBUG printf("\n %f %d\n",max,downsample); #endif max = 0.0; downsample++; stride+= stride_width; } } } } } void execute3Dconvolution(float *bias,float *Layer2_Neurons_GPU, float *Layer2_Weights_GPU,float *Layer3_Neurons_GPU,int out,int fr,int fc,int stride_width,int kernel,int pad,int in_output,int group) { float product = 0.0; int x_pad = 0, y_pad = 0, loopc = 0,loopr = 0; printf(" 3D convolution with group %d,output %d,feature %d x %d ,stride %d, kernel %d, pad %d, input %d\n",group,out,fr,fc,stride_width,kernel,pad,in_output); if(group == 2) { out = out >> 1; in_output = in_output >> 1; } int stride = 0,colstride = 0; { for(int output =0;output < out ;output++) /* out = 256 */ { colstride = 0; for(int row =0; row < fr ; row++) /* out = 256 */ { stride = 0; if(row > pad) colstride = (row - pad) * fr; for(int col =0; col < fc ;col++) /* out = 256 */ { x_pad = 0; y_pad = 0; /* set the loops value */ loopc = kernel;loopr = kernel; /* take care of padding in left hand side of image*/ if( row < pad) { x_pad = pad - row; loopr = kernel - x_pad; } /* take care of padding in upper side of image*/ if( col < pad ) { y_pad = pad - col; loopc = kernel - y_pad; } /* take care of padding in right side of image*/ if(col >= fc - pad) loopc = fc + pad - col; /* take care of padding in bottom of image */ if(row >= fr - pad) loopr = fr + pad - row; for(int feature =0; feature < in_output ; feature++) // calculate the feature maps { for(int i =0; i < loopr ; i++) // kernel convolution { for(int j =0; j < loopc ; j++) // kernel convolution { product += ( Layer2_Neurons_GPU[feature*fr*fc + i*fc + j + stride + colstride] * Layer2_Weights_GPU[output*kernel*kernel*in_output + feature*kernel*kernel + i*kernel + j + kernel*x_pad + y_pad]); } } } product += bias[output]; if(product < 0) /* ReLU Layer */ product = 0; #ifdef LAYER2_DEBUG printf("%f\n",product); #endif // if((group == 2) && (out == 128) && (in_output == 192)) // printf("%f\n",product); Layer3_Neurons_GPU[output*fr*fc + row*fc + col] = product; product = 0.0; if(col >= pad) stride+=stride_width; } } } if(group == 2) { /* Execute second set of inputs */ for(int output = out ;output < (out << 1) ;output++) /* out = 256 */ { colstride = 0; for(int row =0; row < fr; row++) /* out = 256 */ { stride = 0; if(row > pad) colstride = (row - pad) * fr; for(int col =0; col < fc ;col++) /* out = 256 */ { x_pad = 0; y_pad = 0; /* set the loops value */ loopc = kernel;loopr = kernel; /* take care of padding in left hand side of image*/ if( row < pad) { x_pad = pad - row; loopr = kernel - x_pad; } /* take care of padding in upper side of image*/ if( col < pad ) { y_pad = pad - col; loopc = kernel - y_pad; } /* take care of padding in right side of image*/ if(col >= fc - pad) loopc = fc + pad - col; /* take care of padding in bottom of image */ if(row >= fr - pad) loopr = fr + pad - row; for(int feature = in_output ; feature < (in_output << 1) ; feature++) // calculate the feature maps { for(int i =0; i < loopr ; i++) // kernel convolution { for(int j =0; j < loopc ; j++) // kernel convolution { product += (( Layer2_Neurons_GPU[feature*fr*fc + i*fc + j + stride + colstride] * Layer2_Weights_GPU[output*kernel*kernel*in_output + (feature-in_output)*kernel*kernel + i*kernel + j + kernel*x_pad + y_pad])); } } } product += bias[output]; if(product < 0) /* ReLU Layer */ product = 0; #ifdef LAYER2_DEBUG printf("%f\n",product); #endif // if((group == 2) && (out == 128) && (in_output == 192)) // printf("%f\n",product); Layer3_Neurons_GPU[output*fr*fc + row*fc + col] = product; product = 0.0; if(col >= pad) stride+=stride_width; } } } } } } void executelrnNorm(float *Layer_InNeurons_GPU, float alpha, float beta,int local_size,int out,int fr,int fc,float *Layer_OutNeurons_GPU) { printf(" Exexcute Norm Layer\n"); int nStart = 0, nEnd = 0; float value = 0.0;float sum = 0.0; for(int row =0; row < fr; row++) { for(int col =0; col < fc ;col++) { for(int output = 0 ;output < out ;output++) { nStart=(output-floor(local_size/2)) > 1 ? (output-floor(local_size/2)) : 1 ; nEnd=(output+floor(local_size/2)) < out ? (output+floor(local_size/2)) : out ; for(int i = (nStart-1); i < (nEnd-1) ; i++) // kernel convolution { sum += pow(( Layer_InNeurons_GPU[i*fr*fc + row*fc + col]),2); } value = (Layer_InNeurons_GPU[output*fr*fc + row*fc + col]) / (pow( 1 + ((alpha/local_size) *sum),beta)); sum = 0; Layer_OutNeurons_GPU[output*fr*fc + row*fc + col] = value; } } } #ifdef NORM_LAYER for(int N = 0; N < out; N++) { for(int W = 0; W < fr; W++) { for(int H = 0; H < fc; H++) { printf("%f\n",Layer_OutNeurons_GPU[N*fr*fc + W*fc + H]);; } } } #endif } void executeFCLayer(float *bias,float *Layer_InNeurons_GPU,float *Layer_Weights_GPU,float *Layer_OutNeurons_GPU,int output, int input,bool reLU,bool dropout) { printf("Execute FC Layer of output : %d input %d\n",output,input); float product = 0.0,max = 0.0; int weight = 0,index = 0; for(int out=0; out < output ; out++) { for(int in = 0; in < input; in++) { product += Layer_InNeurons_GPU[in] * Layer_Weights_GPU[weight++]; } product += bias[out]; if(reLU == true) { if(product < 0) /* ReLU Layer */ product = 0; } else { if(max < product) { index = out; max = product; } } if(dropout == true) { } Layer_OutNeurons_GPU[out] = product; #ifdef FC_DEBUG printf("%f\n",product); #endif product = 0.0; } printf(" MAX from FC layer = %d\n",index); } void executeSoftMax(float *Layer_In_Neurons_GPU) { printf("executeSoftMax \n"); float max = 0,sum = 0; float output[1000] = {0}; for(int i = 0; i < 1000; i++) { if(Layer_In_Neurons_GPU[i] > max) max = Layer_In_Neurons_GPU[i]; } #ifdef SOFTMAX_DEBUG printf("Max = %10e\n",max); #endif for(int i = 0; i < 1000; i++) { output[i] = exp(Layer_In_Neurons_GPU[i] - max); sum += output[i]; } #ifdef SOFTMAX_DEBUG printf("Sum = %10e\n",sum); #endif for(int i = 0; i < 1000; i++) { output[i] *= (1/sum); #ifdef SOFTMAX_DEBUG printf("%10e\n",output[i]); #endif } } #endif #endif // #ifndef _AN_KERNEL_H_
1b86607dc6fa33839ecf889bc97935499cdec067.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= numCols || y >= numRows) return; uchar4 rgba = rgbaImage[x * numCols + y]; unsigned char greyValue = .299f * rgba.x + 0.587f * rgba.y + .114f * rgba.z; greyImage[x * numCols + y] = greyValue; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(32, 32, 1); //TODO const dim3 gridSize( 32 / numRows + 1, 32 / numCols + 1, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
1b86607dc6fa33839ecf889bc97935499cdec067.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= numCols || y >= numRows) return; uchar4 rgba = rgbaImage[x * numCols + y]; unsigned char greyValue = .299f * rgba.x + 0.587f * rgba.y + .114f * rgba.z; greyImage[x * numCols + y] = greyValue; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(32, 32, 1); //TODO const dim3 gridSize( 32 / numRows + 1, 32 / numCols + 1, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }