hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
df5f307f9d31f412a688c53396ff283763bd78ee.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** MIT License Copyright (c) 2016 Antti-Pekka Hynninen Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *******************************************************************************/ #include <hip/hip_runtime.h> #include "CudaUtils.h" #include "CudaMem.h" #include "cuttGpuModelKernel.h" #define RESTRICT //__restrict__ // // Global memory access statistics // struct MemStat { int gld_tran; int gst_tran; int gld_req; int gst_req; int cl_full_l2; int cl_part_l2; int cl_full_l1; int cl_part_l1; // int l1_tran; __device__ __forceinline__ void clear() { gld_tran = 0; gst_tran = 0; gld_req = 0; gst_req = 0; cl_full_l2 = 0; cl_part_l2 = 0; cl_full_l1 = 0; cl_part_l1 = 0; // l1_tran = 0; } }; // // Returns scalar tensor position. Each lane has the same p // NOTE: c and d on inactive warps must be 1 !! // __device__ __forceinline__ int tensorPos( const int p, const int rank, const int c, const int d, const int ct, const int numLane=warpSize ) { int r = ((p/c) % d)*ct; #pragma unroll for (int i=numLane/2;i >= 1;i/=2) { r += __shfl_xor_sync(0xffffffff,r,i); } return r; } // // Counts number of global memory transactions for a warp that accesses // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ int countGlTransactions(const int pos, const int n, const int accWidth, const int warpLane) { int seg0 = pos/accWidth; int srcLane = (warpLane == 0 || warpLane >= n) ? (warpLane) : (warpLane - 1); int seg1 = __shfl_sync(0xffffffff,seg0,srcLane); int count = __popc(__ballot_sync(0xffffffff,seg0 != seg1)) + 1; count = (n == 0) ? 0 : count; return count; } // // Counts number of global memory transactions for a warp that accesses // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ int countGlTransactions(const int* segbuf, const int n) { int count = 0; for (int i = threadIdx.x;i < n;i += blockDim.x) { int seg = segbuf[i]; int seg_prev = (i - 1 >= 0) ? segbuf[i - 1] : -1; count += (seg != seg_prev); } return count; } // // Counts number of full and partial cache lines for a warp that accesses per warp // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ void countCacheLines(const int pos, const int n, const int cacheWidth, const int warpLane, int& cl_full, int& cl_part) { int seg = pos/cacheWidth; // Lane is at the beginning of a full cache line, if seg0 matches seg0 cacheWidth - 1 away int readLane = warpLane + (cacheWidth - 1); int val = (seg == __shfl_sync(0xffffffff,seg,readLane)); val = (readLane < n) ? val : 0; cl_full += val; unsigned int valbit = (((val << cacheWidth) - 1)*val) << warpLane; // Perform warpSize-way bitwise or #pragma unroll for (int i=warpSize/2;i >= 1;i/=2) { valbit |= __shfl_xor_sync(0xffffffff,valbit,i); } // Now: lanes with valbit set are part of a full cache line, // lanes with valbit unset are part of a partial cache line int full = (valbit >> warpLane) & 1; seg = (warpLane < n) ? seg : -1; int segP1 = __shfl_down_sync(0xffffffff,seg,1); segP1 = (warpLane + 1 < warpSize) ? segP1 : -1; int val2 = ((!full) && seg != segP1); cl_part += val2; } // // Counts number of full and partial cache lines for a warp that accesses // memory at cachelines segbuf[0] ... segbuf[n - 1] // __device__ __forceinline__ void countCacheLines(int* segbuf, const int n, const int cacheWidth, int& cl_full, int& cl_part) { const int topbit = (1 << 31); const int lowbits = ~(1 << 31); for (int i = threadIdx.x;i < n;i += blockDim.x) { // seg[i] is at the beginning of a full cache line, if seg[i] matches seg[i + cacheWidth - 1] int i1 = i + (cacheWidth - 1); int val = 0; if (i1 < n) val = ((segbuf[i] & lowbits) == (segbuf[i1] & lowbits)); cl_full += val; // Mark full cache lines with top bit set to 1 if (val) { for (int j=0;j < cacheWidth;j++) { if (i + j < n) segbuf[i + j] |= topbit; } } } __syncthreads(); for (int i = threadIdx.x;i < n;i += blockDim.x) { int seg = segbuf[i]; int segP1 = (i + 1 < n) ? segbuf[i + 1] : -1; int part = ((seg & topbit) == 0); int val2 = (part && seg != segP1); cl_part += val2; } // Clear top bits __syncthreads(); for (int i = threadIdx.x;i < n;i += blockDim.x) { segbuf[i] &= lowbits; } } // // Runs countGlTransactions and countCacheLines counters for testing // Unused values in posData[] are marked with "-1" // __global__ void runCountersKernel(const int* posData, const int numPosData, const int accWidth, const int cacheWidth, int* tranData, int* cl_fullData, int* cl_partData) { const int warpLane = threadIdx.x & (warpSize - 1); for (int i=threadIdx.x + blockIdx.x*blockDim.x;i < numPosData;i+=blockDim.x*gridDim.x) { int pos = posData[i]; int flag = (pos == -1); int ffsval = __ffs(__ballot_sync(0xffffffff,flag)) - 1; int n = (__any_sync(0xffffffff,flag)) ? ffsval : warpSize; int tran = countGlTransactions(pos, n, accWidth, warpLane); int cl_full = 0; int cl_part = 0; countCacheLines(pos, n, cacheWidth, warpLane, cl_full, cl_part); #pragma unroll for (int k=warpSize/2;k >= 1;k/=2) { cl_full += __shfl_xor_sync(0xffffffff,cl_full,k); cl_part += __shfl_xor_sync(0xffffffff,cl_part,k); } int j = i / warpSize; tranData[j] = tran; cl_fullData[j] = cl_full; cl_partData[j] = cl_part; } } // // Reduce memStat within warp and write result to global memory // NOTE: Not super-efficient since every warp does atomicAdd(). // __device__ __forceinline__ void writeMemStat(const int warpLane, MemStat memStat, MemStat* RESTRICT glMemStat) { for (int i=16;i >= 1;i/=2) { // memStat.gld_tran += __shfl_xor_sync(0xffffffff,memStat.gld_tran,i); // memStat.gst_tran += __shfl_xor_sync(0xffffffff,memStat.gst_tran,i); // memStat.gld_req += __shfl_xor_sync(0xffffffff,memStat.gld_req,i); // memStat.gst_req += __shfl_xor_sync(0xffffffff,memStat.gst_req,i); memStat.cl_full_l2 += __shfl_xor_sync(0xffffffff,memStat.cl_full_l2,i); memStat.cl_part_l2 += __shfl_xor_sync(0xffffffff,memStat.cl_part_l2,i); memStat.cl_full_l1 += __shfl_xor_sync(0xffffffff,memStat.cl_full_l1,i); memStat.cl_part_l1 += __shfl_xor_sync(0xffffffff,memStat.cl_part_l1,i); // memStat.l1_tran += __shfl_xor_sync(0xffffffff,memStat.l1_tran,i); } if (warpLane == 0) { atomicAdd(&(glMemStat->gld_tran), memStat.gld_tran); atomicAdd(&(glMemStat->gst_tran), memStat.gst_tran); atomicAdd(&(glMemStat->gld_req), memStat.gld_req); atomicAdd(&(glMemStat->gst_req), memStat.gst_req); atomicAdd(&(glMemStat->cl_full_l2), memStat.cl_full_l2); atomicAdd(&(glMemStat->cl_part_l2), memStat.cl_part_l2); atomicAdd(&(glMemStat->cl_full_l1), memStat.cl_full_l1); atomicAdd(&(glMemStat->cl_part_l1), memStat.cl_part_l1); // atomicAdd(&(glMemStat->l1_tran), memStat.l1_tran); } } // // Transpose when Mm and Mk don't overlap and contain only single rank // // dim3 numthread(TILEDIM, TILEROWS, 1); // dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMk-1)/TILEDIM+1), 1, plan.volMbar); // __global__ void __launch_bounds__(TILEDIM*TILEROWS, 1) countTiled( const int numMm, const int volMbar, const int sizeMbar, const int2 tiledVol, const int cuDimMk, const int cuDimMm, const TensorConvInOut* RESTRICT glMbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = glMbar[warpLane]; } const int bx = (blockIdx.x % numMm)*TILEDIM; const int by = (blockIdx.x / numMm)*TILEDIM; const int xin = bx + threadIdx.x; const int yin = by + threadIdx.y; const int xout = bx + threadIdx.y; const int yout = by + threadIdx.x; const unsigned int maskIny = __ballot_sync(0xffffffff,(yin + warpLane < tiledVol.y))*(xin < tiledVol.x); const unsigned int maskOutx = __ballot_sync(0xffffffff,(xout + warpLane < tiledVol.x))*(yout < tiledVol.y); const int posMinorIn = xin + yin*cuDimMk; const int posMinorOut = yout + xout*cuDimMm; const int posInAdd = TILEROWS*cuDimMk; const int posOutAdd = TILEROWS*cuDimMm; MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z) { // Compute global memory positions int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMajorIn += __shfl_xor_sync(0xffffffff,posMajorIn,i); posMajorOut += __shfl_xor_sync(0xffffffff,posMajorOut,i); } int posIn = posMajorIn + posMinorIn; int posOut = posMajorOut + posMinorOut; // Read data into shared memory tile #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int n = __popc(__ballot_sync(0xffffffff,maskIny & (1 << j))); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); posIn += posInAdd; } #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int n = __popc(__ballot_sync(0xffffffff,maskOutx & (1 << j))); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); countCacheLines(posOut, n, cacheWidth, warpLane, memStat.cl_full_l2, memStat.cl_part_l2); posOut += posOutAdd; } } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Packed transpose. Thread block loads plan.volMmk number of elements // template <int numRegStorage> __global__ void __launch_bounds__(1024, 1) countPacked( const int volMmk, const int volMbar, const int sizeMmk, const int sizeMbar, const TensorConvInOut* RESTRICT gl_Mmk, const TensorConvInOut* RESTRICT gl_Mbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { extern __shared__ int shSegOut[]; const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mmk; Mmk.c_in = 1; Mmk.d_in = 1; Mmk.c_out = 1; Mmk.d_out = 1; if (warpLane < sizeMmk) { Mmk = gl_Mmk[warpLane]; } // Pre-compute tensor positions in Mmk // 3*numRegStorage registers int posMmkIn[numRegStorage]; int posMmkOut[numRegStorage]; #pragma unroll for (int j=0;j < numRegStorage;j++) { posMmkIn[j] = 0; posMmkOut[j] = 0; } for (int i=0;i < sizeMmk;i++) { #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; posMmkIn[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i); posMmkOut[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i); } } // 6 registers TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = gl_Mbar[warpLane]; } MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.x;posMbar < volMbar;posMbar += gridDim.x) { int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i); } int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i); } // Read from global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posIn = posMbarIn + posMmkIn[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmk)); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } // Write to global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posOut = posMbarOut + posMmkOut[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmk)); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); if (posMmk < volMmk) shSegOut[posMmk] = posOut/cacheWidth; } __syncthreads(); countCacheLines(shSegOut, volMmk, cacheWidth, memStat.cl_full_l2, memStat.cl_part_l2); // Go from L2 segments to L1 segments __syncthreads(); const int L2toL1 = accWidth/cacheWidth; for (int i=threadIdx.x;i < volMmk;i+=blockDim.x) { shSegOut[i] /= L2toL1; } __syncthreads(); countCacheLines(shSegOut, volMmk, accWidth, memStat.cl_full_l1, memStat.cl_part_l1); // __syncthreads(); // memStat.l1_tran += countGlTransactions(shSegOut, volMmk); } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Packed method with a split rank // // dim nthread(((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize, 1, 1) // dim nblock(ts.numSplit, min(256, max(1, ts.volMbar)), 1) // template <int numRegStorage> __global__ void __launch_bounds__(1024, 1) countPackedSplit( const int splitDim, const int volMmkUnsplit, const int volMbar, const int sizeMmk, const int sizeMbar, const int cMmSplit, const int cMkSplit, const TensorConvInOut* RESTRICT glMmk, const TensorConvInOut* RESTRICT glMbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { extern __shared__ int shSegOut[]; const int warpLane = threadIdx.x & (warpSize - 1); // const int plusone = (blockIdx.x < (splitDim % gridDim.x)); const int p0 = blockIdx.x*splitDim/gridDim.x; const int volSplit = (blockIdx.x + 1)*splitDim/gridDim.x - p0; const int plusone = volSplit - splitDim/gridDim.x; TensorConvInOut Mmk; Mmk.c_in = 1; Mmk.d_in = 1; Mmk.c_out = 1; Mmk.d_out = 1; if (warpLane < sizeMmk) { Mmk = glMmk[warpLane + plusone*sizeMmk]; } // gridDim.x = number of splits // blockIdx.x = {0 ... gridDim.x - 1} is the split-index // Volume of this split // const int volSplit = (splitDim/gridDim.x) + plusone; // Start position in this split // const int p0 = (splitDim/gridDim.x)*blockIdx.x + min(blockIdx.x, (splitDim % gridDim.x)); const int posMmkIn0 = p0*cMmSplit; const int posMmkOut0 = p0*cMkSplit; // Volume of split Mmk const int volMmkSplit = volSplit*volMmkUnsplit; // Pre-compute tensor positions in Mmk // 3*numRegStorage registers int posMmkIn[numRegStorage]; int posMmkOut[numRegStorage]; #pragma unroll for (int j=0;j < numRegStorage;j++) { posMmkIn[j] = posMmkIn0; posMmkOut[j] = posMmkOut0; } for (int i=0;i < sizeMmk;i++) { #pragma unroll for (int j=0;j < numRegStorage;j++) { int t = threadIdx.x + j*blockDim.x; posMmkIn[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i); posMmkOut[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i); } } TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = glMbar[warpLane]; } MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.y;posMbar < volMbar;posMbar+=gridDim.y) { int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i); } int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i); } // Read from global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posIn = posMbarIn + posMmkIn[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmkSplit)); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } // Write to global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posOut = posMbarOut + posMmkOut[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmkSplit)); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); if (posMmk < volMmkSplit) shSegOut[posMmk] = posOut / cacheWidth; // countCacheLines(posOut, n, cacheWidth, warpLane, memStat.cl_full, memStat.cl_part); } __syncthreads(); countCacheLines(shSegOut, volMmkSplit, cacheWidth, memStat.cl_full_l2, memStat.cl_part_l2); // Go from L2 segments to L1 segments __syncthreads(); const int L2toL1 = accWidth/cacheWidth; for (int i=threadIdx.x;i < volMmkSplit;i+=blockDim.x) { shSegOut[i] /= L2toL1; } __syncthreads(); countCacheLines(shSegOut, volMmkSplit, accWidth, memStat.cl_full_l1, memStat.cl_part_l1); // __syncthreads(); // memStat.l1_tran += countGlTransactions(shSegOut, volMmkSplit); } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Transpose when the lead dimension is the same, e.g. (1, 2, 3) -> (1, 3, 2) // // dim3 numthread(TILEDIM, TILEROWS, 1); // dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMkBar-1)/TILEDIM+1), 1, plan.volMbar); // __global__ void __launch_bounds__(TILEDIM*TILEROWS, 1) countTiledCopy( const int numMm, const int volMbar, const int sizeMbar, const int cuDimMk, const int cuDimMm, const int2 tiledVol, const TensorConvInOut* RESTRICT gl_Mbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = gl_Mbar[warpLane]; } const int bx = (blockIdx.x % numMm)*TILEDIM; const int by = (blockIdx.x / numMm)*TILEDIM; const int x = bx + threadIdx.x; const int y = by + threadIdx.y; MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z) { // Read global memory { int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_in, Mbar.d_in, Mbar.ct_in); pos0 += x + y*cuDimMk; #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int pos = pos0 + j*cuDimMk; int n = __popc(__ballot_sync(0xffffffff,(x < tiledVol.x) && (y + j < tiledVol.y))); memStat.gld_tran += countGlTransactions(pos, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } } // Write global memory { int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_out, Mbar.d_out, Mbar.ct_out); pos0 += x + y*cuDimMm; #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int pos = pos0 + j*cuDimMm; int n = __popc(__ballot_sync(0xffffffff,(x < tiledVol.x) && (y + j < tiledVol.y))); memStat.gst_tran += countGlTransactions(pos, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); countCacheLines(pos, n, cacheWidth, warpLane, memStat.cl_full_l2, memStat.cl_part_l2); } } } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } //###################################################################################### //###################################################################################### //###################################################################################### void runCounters(const int warpSize, const int* hostPosData, const int numPosData, const int accWidth, const int cacheWidth, int* host_tran, int* host_cl_full, int* host_cl_part) { const int numWarp = numPosData/warpSize; int* devPosData; allocate_device<int>(&devPosData, numPosData); copy_HtoD<int>(hostPosData, devPosData, numPosData); int* dev_tran; int* dev_cl_full; int* dev_cl_part; allocate_device<int>(&dev_tran, numWarp); allocate_device<int>(&dev_cl_full, numWarp); allocate_device<int>(&dev_cl_part, numWarp); int nthread = 512; int nblock = (numPosData - 1)/nthread + 1; hipLaunchKernelGGL(( runCountersKernel), dim3(nblock), dim3(nthread) , 0, 0, devPosData, numPosData, accWidth, cacheWidth, dev_tran, dev_cl_full, dev_cl_part); cudaCheck(hipGetLastError()); copy_DtoH<int>(dev_tran, host_tran, numWarp); copy_DtoH<int>(dev_cl_full, host_cl_full, numWarp); copy_DtoH<int>(dev_cl_part, host_cl_part, numWarp); cudaCheck(hipDeviceSynchronize()); deallocate_device<int>(&dev_tran); deallocate_device<int>(&dev_cl_full); deallocate_device<int>(&dev_cl_part); deallocate_device<int>(&devPosData); } bool cuttGpuModelKernel(cuttPlan_t& plan, const int accWidth, const int cacheWidth, int& gld_tran, int& gst_tran, int& gld_req, int& gst_req, int& cl_full_l2, int& cl_part_l2, int& cl_full_l1, int& cl_part_l1) { LaunchConfig& lc = plan.launchConfig; TensorSplit& ts = plan.tensorSplit; MemStat* devMemStat; allocate_device<MemStat>(&devMemStat, 1); set_device_array<MemStat>(devMemStat, 0, 1, plan.stream); switch(ts.method) { case Trivial: { return false; } case Packed: { switch(lc.numRegStorage) { #define CALL0(NREG) \ hipLaunchKernelGGL(( countPacked<NREG>) , dim3(lc.numblock), dim3(lc.numthread), ts.volMmk*sizeof(int), plan.stream , \ ts.volMmk, ts.volMbar, ts.sizeMmk, ts.sizeMbar, \ plan.Mmk, plan.Mbar, accWidth, cacheWidth, devMemStat) #define CALL(ICASE) case ICASE: CALL0(ICASE); break #include "calls.h" default: printf("cuttGpuModelKernel no template implemented for numRegStorage %d\n", lc.numRegStorage); return false; #undef CALL #undef CALL0 } } break; case PackedSplit: { // Calculate max. volume of split Mmk const int volSplit = (ts.splitDim/ts.numSplit) + ((ts.splitDim % ts.numSplit) != 0); const int volMmkSplit = volSplit*ts.volMmkUnsplit; switch(lc.numRegStorage) { #define CALL0(NREG) \ hipLaunchKernelGGL(( countPackedSplit<NREG>) , dim3(lc.numblock), dim3(lc.numthread), volMmkSplit*sizeof(int), plan.stream , \ ts.splitDim, ts.volMmkUnsplit, ts. volMbar, ts.sizeMmk, ts.sizeMbar, \ plan.cuDimMm, plan.cuDimMk, plan.Mmk, plan.Mbar, accWidth, cacheWidth, devMemStat) #define CALL(ICASE) case ICASE: CALL0(ICASE); break #include "calls.h" default: printf("cuttGpuModelKernel no template implemented for numRegStorage %d\n", lc.numRegStorage); return false; #undef CALL #undef CALL0 } } break; case Tiled: { hipLaunchKernelGGL(( countTiled) , dim3(lc.numblock), dim3(lc.numthread), 0, plan.stream , ((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.tiledVol, plan.cuDimMk, plan.cuDimMm, plan.Mbar, accWidth, cacheWidth, devMemStat); } break; case TiledCopy: { hipLaunchKernelGGL(( countTiledCopy) , dim3(lc.numblock), dim3(lc.numthread), 0, plan.stream , ((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.cuDimMk, plan.cuDimMm, plan.tiledVol, plan.Mbar, accWidth, cacheWidth, devMemStat); } break; } cudaCheck(hipGetLastError()); MemStat hostMemStat; copy_DtoH<MemStat>(devMemStat, &hostMemStat, 1, plan.stream); cudaCheck(hipDeviceSynchronize()); deallocate_device<MemStat>(&devMemStat); gld_tran = hostMemStat.gld_tran; gst_tran = hostMemStat.gst_tran; gld_req = hostMemStat.gld_req; gst_req = hostMemStat.gst_req; cl_full_l2 = hostMemStat.cl_full_l2; cl_part_l2 = hostMemStat.cl_part_l2; cl_full_l1 = hostMemStat.cl_full_l1; cl_part_l1 = hostMemStat.cl_part_l1; // l1_tran = hostMemStat.l1_tran; return true; }
df5f307f9d31f412a688c53396ff283763bd78ee.cu
/****************************************************************************** MIT License Copyright (c) 2016 Antti-Pekka Hynninen Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *******************************************************************************/ #include <cuda.h> #include "CudaUtils.h" #include "CudaMem.h" #include "cuttGpuModelKernel.h" #define RESTRICT //__restrict__ // // Global memory access statistics // struct MemStat { int gld_tran; int gst_tran; int gld_req; int gst_req; int cl_full_l2; int cl_part_l2; int cl_full_l1; int cl_part_l1; // int l1_tran; __device__ __forceinline__ void clear() { gld_tran = 0; gst_tran = 0; gld_req = 0; gst_req = 0; cl_full_l2 = 0; cl_part_l2 = 0; cl_full_l1 = 0; cl_part_l1 = 0; // l1_tran = 0; } }; // // Returns scalar tensor position. Each lane has the same p // NOTE: c and d on inactive warps must be 1 !! // __device__ __forceinline__ int tensorPos( const int p, const int rank, const int c, const int d, const int ct, const int numLane=warpSize ) { int r = ((p/c) % d)*ct; #pragma unroll for (int i=numLane/2;i >= 1;i/=2) { r += __shfl_xor_sync(0xffffffff,r,i); } return r; } // // Counts number of global memory transactions for a warp that accesses // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ int countGlTransactions(const int pos, const int n, const int accWidth, const int warpLane) { int seg0 = pos/accWidth; int srcLane = (warpLane == 0 || warpLane >= n) ? (warpLane) : (warpLane - 1); int seg1 = __shfl_sync(0xffffffff,seg0,srcLane); int count = __popc(__ballot_sync(0xffffffff,seg0 != seg1)) + 1; count = (n == 0) ? 0 : count; return count; } // // Counts number of global memory transactions for a warp that accesses // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ int countGlTransactions(const int* segbuf, const int n) { int count = 0; for (int i = threadIdx.x;i < n;i += blockDim.x) { int seg = segbuf[i]; int seg_prev = (i - 1 >= 0) ? segbuf[i - 1] : -1; count += (seg != seg_prev); } return count; } // // Counts number of full and partial cache lines for a warp that accesses per warp // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ void countCacheLines(const int pos, const int n, const int cacheWidth, const int warpLane, int& cl_full, int& cl_part) { int seg = pos/cacheWidth; // Lane is at the beginning of a full cache line, if seg0 matches seg0 cacheWidth - 1 away int readLane = warpLane + (cacheWidth - 1); int val = (seg == __shfl_sync(0xffffffff,seg,readLane)); val = (readLane < n) ? val : 0; cl_full += val; unsigned int valbit = (((val << cacheWidth) - 1)*val) << warpLane; // Perform warpSize-way bitwise or #pragma unroll for (int i=warpSize/2;i >= 1;i/=2) { valbit |= __shfl_xor_sync(0xffffffff,valbit,i); } // Now: lanes with valbit set are part of a full cache line, // lanes with valbit unset are part of a partial cache line int full = (valbit >> warpLane) & 1; seg = (warpLane < n) ? seg : -1; int segP1 = __shfl_down_sync(0xffffffff,seg,1); segP1 = (warpLane + 1 < warpSize) ? segP1 : -1; int val2 = ((!full) && seg != segP1); cl_part += val2; } // // Counts number of full and partial cache lines for a warp that accesses // memory at cachelines segbuf[0] ... segbuf[n - 1] // __device__ __forceinline__ void countCacheLines(int* segbuf, const int n, const int cacheWidth, int& cl_full, int& cl_part) { const int topbit = (1 << 31); const int lowbits = ~(1 << 31); for (int i = threadIdx.x;i < n;i += blockDim.x) { // seg[i] is at the beginning of a full cache line, if seg[i] matches seg[i + cacheWidth - 1] int i1 = i + (cacheWidth - 1); int val = 0; if (i1 < n) val = ((segbuf[i] & lowbits) == (segbuf[i1] & lowbits)); cl_full += val; // Mark full cache lines with top bit set to 1 if (val) { for (int j=0;j < cacheWidth;j++) { if (i + j < n) segbuf[i + j] |= topbit; } } } __syncthreads(); for (int i = threadIdx.x;i < n;i += blockDim.x) { int seg = segbuf[i]; int segP1 = (i + 1 < n) ? segbuf[i + 1] : -1; int part = ((seg & topbit) == 0); int val2 = (part && seg != segP1); cl_part += val2; } // Clear top bits __syncthreads(); for (int i = threadIdx.x;i < n;i += blockDim.x) { segbuf[i] &= lowbits; } } // // Runs countGlTransactions and countCacheLines counters for testing // Unused values in posData[] are marked with "-1" // __global__ void runCountersKernel(const int* posData, const int numPosData, const int accWidth, const int cacheWidth, int* tranData, int* cl_fullData, int* cl_partData) { const int warpLane = threadIdx.x & (warpSize - 1); for (int i=threadIdx.x + blockIdx.x*blockDim.x;i < numPosData;i+=blockDim.x*gridDim.x) { int pos = posData[i]; int flag = (pos == -1); int ffsval = __ffs(__ballot_sync(0xffffffff,flag)) - 1; int n = (__any_sync(0xffffffff,flag)) ? ffsval : warpSize; int tran = countGlTransactions(pos, n, accWidth, warpLane); int cl_full = 0; int cl_part = 0; countCacheLines(pos, n, cacheWidth, warpLane, cl_full, cl_part); #pragma unroll for (int k=warpSize/2;k >= 1;k/=2) { cl_full += __shfl_xor_sync(0xffffffff,cl_full,k); cl_part += __shfl_xor_sync(0xffffffff,cl_part,k); } int j = i / warpSize; tranData[j] = tran; cl_fullData[j] = cl_full; cl_partData[j] = cl_part; } } // // Reduce memStat within warp and write result to global memory // NOTE: Not super-efficient since every warp does atomicAdd(). // __device__ __forceinline__ void writeMemStat(const int warpLane, MemStat memStat, MemStat* RESTRICT glMemStat) { for (int i=16;i >= 1;i/=2) { // memStat.gld_tran += __shfl_xor_sync(0xffffffff,memStat.gld_tran,i); // memStat.gst_tran += __shfl_xor_sync(0xffffffff,memStat.gst_tran,i); // memStat.gld_req += __shfl_xor_sync(0xffffffff,memStat.gld_req,i); // memStat.gst_req += __shfl_xor_sync(0xffffffff,memStat.gst_req,i); memStat.cl_full_l2 += __shfl_xor_sync(0xffffffff,memStat.cl_full_l2,i); memStat.cl_part_l2 += __shfl_xor_sync(0xffffffff,memStat.cl_part_l2,i); memStat.cl_full_l1 += __shfl_xor_sync(0xffffffff,memStat.cl_full_l1,i); memStat.cl_part_l1 += __shfl_xor_sync(0xffffffff,memStat.cl_part_l1,i); // memStat.l1_tran += __shfl_xor_sync(0xffffffff,memStat.l1_tran,i); } if (warpLane == 0) { atomicAdd(&(glMemStat->gld_tran), memStat.gld_tran); atomicAdd(&(glMemStat->gst_tran), memStat.gst_tran); atomicAdd(&(glMemStat->gld_req), memStat.gld_req); atomicAdd(&(glMemStat->gst_req), memStat.gst_req); atomicAdd(&(glMemStat->cl_full_l2), memStat.cl_full_l2); atomicAdd(&(glMemStat->cl_part_l2), memStat.cl_part_l2); atomicAdd(&(glMemStat->cl_full_l1), memStat.cl_full_l1); atomicAdd(&(glMemStat->cl_part_l1), memStat.cl_part_l1); // atomicAdd(&(glMemStat->l1_tran), memStat.l1_tran); } } // // Transpose when Mm and Mk don't overlap and contain only single rank // // dim3 numthread(TILEDIM, TILEROWS, 1); // dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMk-1)/TILEDIM+1), 1, plan.volMbar); // __global__ void __launch_bounds__(TILEDIM*TILEROWS, 1) countTiled( const int numMm, const int volMbar, const int sizeMbar, const int2 tiledVol, const int cuDimMk, const int cuDimMm, const TensorConvInOut* RESTRICT glMbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = glMbar[warpLane]; } const int bx = (blockIdx.x % numMm)*TILEDIM; const int by = (blockIdx.x / numMm)*TILEDIM; const int xin = bx + threadIdx.x; const int yin = by + threadIdx.y; const int xout = bx + threadIdx.y; const int yout = by + threadIdx.x; const unsigned int maskIny = __ballot_sync(0xffffffff,(yin + warpLane < tiledVol.y))*(xin < tiledVol.x); const unsigned int maskOutx = __ballot_sync(0xffffffff,(xout + warpLane < tiledVol.x))*(yout < tiledVol.y); const int posMinorIn = xin + yin*cuDimMk; const int posMinorOut = yout + xout*cuDimMm; const int posInAdd = TILEROWS*cuDimMk; const int posOutAdd = TILEROWS*cuDimMm; MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z) { // Compute global memory positions int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMajorIn += __shfl_xor_sync(0xffffffff,posMajorIn,i); posMajorOut += __shfl_xor_sync(0xffffffff,posMajorOut,i); } int posIn = posMajorIn + posMinorIn; int posOut = posMajorOut + posMinorOut; // Read data into shared memory tile #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int n = __popc(__ballot_sync(0xffffffff,maskIny & (1 << j))); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); posIn += posInAdd; } #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int n = __popc(__ballot_sync(0xffffffff,maskOutx & (1 << j))); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); countCacheLines(posOut, n, cacheWidth, warpLane, memStat.cl_full_l2, memStat.cl_part_l2); posOut += posOutAdd; } } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Packed transpose. Thread block loads plan.volMmk number of elements // template <int numRegStorage> __global__ void __launch_bounds__(1024, 1) countPacked( const int volMmk, const int volMbar, const int sizeMmk, const int sizeMbar, const TensorConvInOut* RESTRICT gl_Mmk, const TensorConvInOut* RESTRICT gl_Mbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { extern __shared__ int shSegOut[]; const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mmk; Mmk.c_in = 1; Mmk.d_in = 1; Mmk.c_out = 1; Mmk.d_out = 1; if (warpLane < sizeMmk) { Mmk = gl_Mmk[warpLane]; } // Pre-compute tensor positions in Mmk // 3*numRegStorage registers int posMmkIn[numRegStorage]; int posMmkOut[numRegStorage]; #pragma unroll for (int j=0;j < numRegStorage;j++) { posMmkIn[j] = 0; posMmkOut[j] = 0; } for (int i=0;i < sizeMmk;i++) { #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; posMmkIn[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i); posMmkOut[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i); } } // 6 registers TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = gl_Mbar[warpLane]; } MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.x;posMbar < volMbar;posMbar += gridDim.x) { int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i); } int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i); } // Read from global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posIn = posMbarIn + posMmkIn[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmk)); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } // Write to global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posOut = posMbarOut + posMmkOut[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmk)); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); if (posMmk < volMmk) shSegOut[posMmk] = posOut/cacheWidth; } __syncthreads(); countCacheLines(shSegOut, volMmk, cacheWidth, memStat.cl_full_l2, memStat.cl_part_l2); // Go from L2 segments to L1 segments __syncthreads(); const int L2toL1 = accWidth/cacheWidth; for (int i=threadIdx.x;i < volMmk;i+=blockDim.x) { shSegOut[i] /= L2toL1; } __syncthreads(); countCacheLines(shSegOut, volMmk, accWidth, memStat.cl_full_l1, memStat.cl_part_l1); // __syncthreads(); // memStat.l1_tran += countGlTransactions(shSegOut, volMmk); } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Packed method with a split rank // // dim nthread(((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize, 1, 1) // dim nblock(ts.numSplit, min(256, max(1, ts.volMbar)), 1) // template <int numRegStorage> __global__ void __launch_bounds__(1024, 1) countPackedSplit( const int splitDim, const int volMmkUnsplit, const int volMbar, const int sizeMmk, const int sizeMbar, const int cMmSplit, const int cMkSplit, const TensorConvInOut* RESTRICT glMmk, const TensorConvInOut* RESTRICT glMbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { extern __shared__ int shSegOut[]; const int warpLane = threadIdx.x & (warpSize - 1); // const int plusone = (blockIdx.x < (splitDim % gridDim.x)); const int p0 = blockIdx.x*splitDim/gridDim.x; const int volSplit = (blockIdx.x + 1)*splitDim/gridDim.x - p0; const int plusone = volSplit - splitDim/gridDim.x; TensorConvInOut Mmk; Mmk.c_in = 1; Mmk.d_in = 1; Mmk.c_out = 1; Mmk.d_out = 1; if (warpLane < sizeMmk) { Mmk = glMmk[warpLane + plusone*sizeMmk]; } // gridDim.x = number of splits // blockIdx.x = {0 ... gridDim.x - 1} is the split-index // Volume of this split // const int volSplit = (splitDim/gridDim.x) + plusone; // Start position in this split // const int p0 = (splitDim/gridDim.x)*blockIdx.x + min(blockIdx.x, (splitDim % gridDim.x)); const int posMmkIn0 = p0*cMmSplit; const int posMmkOut0 = p0*cMkSplit; // Volume of split Mmk const int volMmkSplit = volSplit*volMmkUnsplit; // Pre-compute tensor positions in Mmk // 3*numRegStorage registers int posMmkIn[numRegStorage]; int posMmkOut[numRegStorage]; #pragma unroll for (int j=0;j < numRegStorage;j++) { posMmkIn[j] = posMmkIn0; posMmkOut[j] = posMmkOut0; } for (int i=0;i < sizeMmk;i++) { #pragma unroll for (int j=0;j < numRegStorage;j++) { int t = threadIdx.x + j*blockDim.x; posMmkIn[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i); posMmkOut[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i); } } TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = glMbar[warpLane]; } MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.y;posMbar < volMbar;posMbar+=gridDim.y) { int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i); } int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i); } // Read from global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posIn = posMbarIn + posMmkIn[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmkSplit)); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } // Write to global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posOut = posMbarOut + posMmkOut[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmkSplit)); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); if (posMmk < volMmkSplit) shSegOut[posMmk] = posOut / cacheWidth; // countCacheLines(posOut, n, cacheWidth, warpLane, memStat.cl_full, memStat.cl_part); } __syncthreads(); countCacheLines(shSegOut, volMmkSplit, cacheWidth, memStat.cl_full_l2, memStat.cl_part_l2); // Go from L2 segments to L1 segments __syncthreads(); const int L2toL1 = accWidth/cacheWidth; for (int i=threadIdx.x;i < volMmkSplit;i+=blockDim.x) { shSegOut[i] /= L2toL1; } __syncthreads(); countCacheLines(shSegOut, volMmkSplit, accWidth, memStat.cl_full_l1, memStat.cl_part_l1); // __syncthreads(); // memStat.l1_tran += countGlTransactions(shSegOut, volMmkSplit); } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Transpose when the lead dimension is the same, e.g. (1, 2, 3) -> (1, 3, 2) // // dim3 numthread(TILEDIM, TILEROWS, 1); // dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMkBar-1)/TILEDIM+1), 1, plan.volMbar); // __global__ void __launch_bounds__(TILEDIM*TILEROWS, 1) countTiledCopy( const int numMm, const int volMbar, const int sizeMbar, const int cuDimMk, const int cuDimMm, const int2 tiledVol, const TensorConvInOut* RESTRICT gl_Mbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = gl_Mbar[warpLane]; } const int bx = (blockIdx.x % numMm)*TILEDIM; const int by = (blockIdx.x / numMm)*TILEDIM; const int x = bx + threadIdx.x; const int y = by + threadIdx.y; MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z) { // Read global memory { int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_in, Mbar.d_in, Mbar.ct_in); pos0 += x + y*cuDimMk; #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int pos = pos0 + j*cuDimMk; int n = __popc(__ballot_sync(0xffffffff,(x < tiledVol.x) && (y + j < tiledVol.y))); memStat.gld_tran += countGlTransactions(pos, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } } // Write global memory { int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_out, Mbar.d_out, Mbar.ct_out); pos0 += x + y*cuDimMm; #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int pos = pos0 + j*cuDimMm; int n = __popc(__ballot_sync(0xffffffff,(x < tiledVol.x) && (y + j < tiledVol.y))); memStat.gst_tran += countGlTransactions(pos, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); countCacheLines(pos, n, cacheWidth, warpLane, memStat.cl_full_l2, memStat.cl_part_l2); } } } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } //###################################################################################### //###################################################################################### //###################################################################################### void runCounters(const int warpSize, const int* hostPosData, const int numPosData, const int accWidth, const int cacheWidth, int* host_tran, int* host_cl_full, int* host_cl_part) { const int numWarp = numPosData/warpSize; int* devPosData; allocate_device<int>(&devPosData, numPosData); copy_HtoD<int>(hostPosData, devPosData, numPosData); int* dev_tran; int* dev_cl_full; int* dev_cl_part; allocate_device<int>(&dev_tran, numWarp); allocate_device<int>(&dev_cl_full, numWarp); allocate_device<int>(&dev_cl_part, numWarp); int nthread = 512; int nblock = (numPosData - 1)/nthread + 1; runCountersKernel<<< nblock, nthread >>>(devPosData, numPosData, accWidth, cacheWidth, dev_tran, dev_cl_full, dev_cl_part); cudaCheck(cudaGetLastError()); copy_DtoH<int>(dev_tran, host_tran, numWarp); copy_DtoH<int>(dev_cl_full, host_cl_full, numWarp); copy_DtoH<int>(dev_cl_part, host_cl_part, numWarp); cudaCheck(cudaDeviceSynchronize()); deallocate_device<int>(&dev_tran); deallocate_device<int>(&dev_cl_full); deallocate_device<int>(&dev_cl_part); deallocate_device<int>(&devPosData); } bool cuttGpuModelKernel(cuttPlan_t& plan, const int accWidth, const int cacheWidth, int& gld_tran, int& gst_tran, int& gld_req, int& gst_req, int& cl_full_l2, int& cl_part_l2, int& cl_full_l1, int& cl_part_l1) { LaunchConfig& lc = plan.launchConfig; TensorSplit& ts = plan.tensorSplit; MemStat* devMemStat; allocate_device<MemStat>(&devMemStat, 1); set_device_array<MemStat>(devMemStat, 0, 1, plan.stream); switch(ts.method) { case Trivial: { return false; } case Packed: { switch(lc.numRegStorage) { #define CALL0(NREG) \ countPacked<NREG> <<< lc.numblock, lc.numthread, ts.volMmk*sizeof(int), plan.stream >>> \ (ts.volMmk, ts.volMbar, ts.sizeMmk, ts.sizeMbar, \ plan.Mmk, plan.Mbar, accWidth, cacheWidth, devMemStat) #define CALL(ICASE) case ICASE: CALL0(ICASE); break #include "calls.h" default: printf("cuttGpuModelKernel no template implemented for numRegStorage %d\n", lc.numRegStorage); return false; #undef CALL #undef CALL0 } } break; case PackedSplit: { // Calculate max. volume of split Mmk const int volSplit = (ts.splitDim/ts.numSplit) + ((ts.splitDim % ts.numSplit) != 0); const int volMmkSplit = volSplit*ts.volMmkUnsplit; switch(lc.numRegStorage) { #define CALL0(NREG) \ countPackedSplit<NREG> <<< lc.numblock, lc.numthread, volMmkSplit*sizeof(int), plan.stream >>> \ (ts.splitDim, ts.volMmkUnsplit, ts. volMbar, ts.sizeMmk, ts.sizeMbar, \ plan.cuDimMm, plan.cuDimMk, plan.Mmk, plan.Mbar, accWidth, cacheWidth, devMemStat) #define CALL(ICASE) case ICASE: CALL0(ICASE); break #include "calls.h" default: printf("cuttGpuModelKernel no template implemented for numRegStorage %d\n", lc.numRegStorage); return false; #undef CALL #undef CALL0 } } break; case Tiled: { countTiled <<< lc.numblock, lc.numthread, 0, plan.stream >>> (((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.tiledVol, plan.cuDimMk, plan.cuDimMm, plan.Mbar, accWidth, cacheWidth, devMemStat); } break; case TiledCopy: { countTiledCopy <<< lc.numblock, lc.numthread, 0, plan.stream >>> (((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.cuDimMk, plan.cuDimMm, plan.tiledVol, plan.Mbar, accWidth, cacheWidth, devMemStat); } break; } cudaCheck(cudaGetLastError()); MemStat hostMemStat; copy_DtoH<MemStat>(devMemStat, &hostMemStat, 1, plan.stream); cudaCheck(cudaDeviceSynchronize()); deallocate_device<MemStat>(&devMemStat); gld_tran = hostMemStat.gld_tran; gst_tran = hostMemStat.gst_tran; gld_req = hostMemStat.gld_req; gst_req = hostMemStat.gst_req; cl_full_l2 = hostMemStat.cl_full_l2; cl_part_l2 = hostMemStat.cl_part_l2; cl_full_l1 = hostMemStat.cl_full_l1; cl_part_l1 = hostMemStat.cl_part_l1; // l1_tran = hostMemStat.l1_tran; return true; }
48896703fda3bb9c1e1fb54f28cbfa4270dfb6cc.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utilities/base_fixture.hpp> #include <utilities/device_comm_wrapper.hpp> #include <utilities/high_res_clock.h> #include <utilities/test_graphs.hpp> #include <utilities/test_utilities.hpp> #include <utilities/thrust_wrapper.hpp> #include <cugraph/algorithms.hpp> #include <cugraph/partition_manager.hpp> #include <cugraph/utilities/dataframe_buffer.cuh> #include <cuco/detail/hash_functions.cuh> #include <cugraph/edge_partition_view.hpp> #include <cugraph/graph_view.hpp> #include <cugraph/prims/count_if_e.cuh> #include <cugraph/prims/edge_partition_src_dst_property.cuh> #include <cugraph/prims/update_edge_partition_src_dst_property.cuh> #include <raft/comms/comms.hpp> #include <raft/comms/mpi_comms.hpp> #include <raft/handle.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <thrust/count.h> #include <gtest/gtest.h> #include <random> template <typename... Args> struct property_type { using type = std::conditional_t<(sizeof...(Args) > 1), thrust::tuple<Args...>, typename thrust::tuple_element<0, thrust::tuple<Args...>>::type>; }; template <typename vertex_t, typename... Args> struct property_transform : public thrust::unary_function<vertex_t, typename property_type<Args...>::type> { int mod{}; property_transform(int mod_count) : mod(mod_count) {} template <typename type = typename property_type<Args...>::type> constexpr __device__ typename std::enable_if_t<cugraph::is_thrust_tuple_of_arithmetic<type>::value, type> operator()(const vertex_t& val) { cuco::detail::MurmurHash3_32<vertex_t> hash_func{}; auto value = hash_func(val) % mod; return thrust::make_tuple(static_cast<Args>(value)...); } template <typename type = typename property_type<Args...>::type> constexpr __device__ typename std::enable_if_t<std::is_arithmetic<type>::value, type> operator()( const vertex_t& val) { cuco::detail::MurmurHash3_32<vertex_t> hash_func{}; auto value = hash_func(val) % mod; return static_cast<type>(value); } }; template <typename vertex_t, template <typename...> typename Tuple, typename... Args> struct property_transform<vertex_t, Tuple<Args...>> : public property_transform<vertex_t, Args...> { }; template <typename... Args> struct generate_impl { private: using type = typename property_type<Args...>::type; using property_buffer_type = std::conditional_t< (sizeof...(Args) > 1), std::tuple<rmm::device_uvector<Args>...>, rmm::device_uvector<typename thrust::tuple_element<0, thrust::tuple<Args...>>::type>>; public: static thrust::tuple<Args...> initial_value(int init) { return thrust::make_tuple(static_cast<Args>(init)...); } template <typename label_t> static auto vertex_property(rmm::device_uvector<label_t>& labels, int hash_bin_count, raft::handle_t const& handle) { auto data = cugraph::allocate_dataframe_buffer<type>(labels.size(), handle.get_stream()); auto zip = cugraph::get_dataframe_buffer_begin(data); thrust::transform(handle.get_thrust_policy(), labels.begin(), labels.end(), zip, property_transform<label_t, Args...>(hash_bin_count)); return data; } template <typename label_t> static auto vertex_property(thrust::counting_iterator<label_t> begin, thrust::counting_iterator<label_t> end, int hash_bin_count, raft::handle_t const& handle) { auto length = thrust::distance(begin, end); auto data = cugraph::allocate_dataframe_buffer<type>(length, handle.get_stream()); auto zip = cugraph::get_dataframe_buffer_begin(data); thrust::transform(handle.get_thrust_policy(), begin, end, zip, property_transform<label_t, Args...>(hash_bin_count)); return data; } template <typename graph_view_type> static auto column_property(raft::handle_t const& handle, graph_view_type const& graph_view, property_buffer_type& property) { auto output_property = cugraph::edge_partition_dst_property_t<graph_view_type, type>(handle, graph_view); update_edge_partition_dst_property( handle, graph_view, cugraph::get_dataframe_buffer_begin(property), output_property); return output_property; } template <typename graph_view_type> static auto row_property(raft::handle_t const& handle, graph_view_type const& graph_view, property_buffer_type& property) { auto output_property = cugraph::edge_partition_src_property_t<graph_view_type, type>(handle, graph_view); update_edge_partition_src_property( handle, graph_view, cugraph::get_dataframe_buffer_begin(property), output_property); return output_property; } }; template <typename T> struct generate : public generate_impl<T> { static T initial_value(int init) { return static_cast<T>(init); } }; template <typename... Args> struct generate<std::tuple<Args...>> : public generate_impl<Args...> { }; struct Prims_Usecase { bool check_correctness{true}; bool test_weighted{false}; }; template <typename input_usecase_t> class Tests_MG_TransformCountIfE : public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> { public: Tests_MG_TransformCountIfE() {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} // Verify the results of count_if_e primitive template <typename vertex_t, typename edge_t, typename weight_t, typename result_t, bool store_transposed> void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase) { // 1. initialize handle raft::handle_t handle{}; HighResClock hr_clock{}; raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD); auto& comm = handle.get_comms(); auto const comm_size = comm.get_size(); auto const comm_rank = comm.get_rank(); auto row_comm_size = static_cast<int>(sqrt(static_cast<double>(comm_size))); while (comm_size % row_comm_size != 0) { --row_comm_size; } cugraph::partition_2d::subcomm_factory_t<cugraph::partition_2d::key_naming_t, vertex_t> subcomm_factory(handle, row_comm_size); // 2. create MG graph if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement handle.get_comms().barrier(); hr_clock.start(); } auto [mg_graph, d_mg_renumber_map_labels] = cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>( handle, input_usecase, prims_usecase.test_weighted, true); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement handle.get_comms().barrier(); double elapsed_time{0.0}; hr_clock.stop(&elapsed_time); std::cout << "MG construct_graph took " << elapsed_time * 1e-6 << " s.\n"; } auto mg_graph_view = mg_graph.view(); // 3. run MG count_if_e const int hash_bin_count = 5; auto vertex_property_data = generate<result_t>::vertex_property((*d_mg_renumber_map_labels), hash_bin_count, handle); auto col_prop = generate<result_t>::column_property(handle, mg_graph_view, vertex_property_data); auto row_prop = generate<result_t>::row_property(handle, mg_graph_view, vertex_property_data); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement handle.get_comms().barrier(); hr_clock.start(); } auto result = count_if_e( handle, mg_graph_view, row_prop.device_view(), col_prop.device_view(), [] __device__(auto row, auto col, weight_t wt, auto row_property, auto col_property) { return row_property < col_property; }); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement handle.get_comms().barrier(); double elapsed_time{0.0}; hr_clock.stop(&elapsed_time); std::cout << "MG count if e took " << elapsed_time * 1e-6 << " s.\n"; } //// 4. compare SG & MG results if (prims_usecase.check_correctness) { cugraph::graph_t<vertex_t, edge_t, weight_t, store_transposed, false> sg_graph(handle); std::tie(sg_graph, std::ignore) = cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, false>( handle, input_usecase, prims_usecase.test_weighted, false); auto sg_graph_view = sg_graph.view(); auto sg_vertex_property_data = generate<result_t>::vertex_property( thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()), thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_last()), hash_bin_count, handle); auto sg_col_prop = generate<result_t>::column_property(handle, sg_graph_view, sg_vertex_property_data); auto sg_row_prop = generate<result_t>::row_property(handle, sg_graph_view, sg_vertex_property_data); auto expected_result = count_if_e( handle, sg_graph_view, sg_row_prop.device_view(), sg_col_prop.device_view(), [] __device__(auto row, auto col, weight_t wt, auto row_property, auto col_property) { return row_property < col_property; }); ASSERT_TRUE(expected_result == result); } } }; using Tests_MG_TransformCountIfE_File = Tests_MG_TransformCountIfE<cugraph::test::File_Usecase>; using Tests_MG_TransformCountIfE_Rmat = Tests_MG_TransformCountIfE<cugraph::test::Rmat_Usecase>; TEST_P(Tests_MG_TransformCountIfE_File, CheckInt32Int32FloatTupleIntFloatTransposeFalse) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MG_TransformCountIfE_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeFalse) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MG_TransformCountIfE_File, CheckInt32Int32FloatTupleIntFloatTransposeTrue) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MG_TransformCountIfE_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeTrue) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MG_TransformCountIfE_File, CheckInt32Int32FloatTransposeFalse) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int, false>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MG_TransformCountIfE_Rmat, CheckInt32Int32FloatTransposeFalse) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int, false>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MG_TransformCountIfE_File, CheckInt32Int32FloatTransposeTrue) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int, true>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MG_TransformCountIfE_Rmat, CheckInt32Int32FloatTransposeTrue) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int, true>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } INSTANTIATE_TEST_SUITE_P( file_test, Tests_MG_TransformCountIfE_File, ::testing::Combine( ::testing::Values(Prims_Usecase{true}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx")))); INSTANTIATE_TEST_SUITE_P( rmat_small_test, Tests_MG_TransformCountIfE_Rmat, ::testing::Combine(::testing::Values(Prims_Usecase{true}), ::testing::Values(cugraph::test::Rmat_Usecase( 10, 16, 0.57, 0.19, 0.19, 0, false, false, 0, true)))); INSTANTIATE_TEST_SUITE_P( rmat_large_test, Tests_MG_TransformCountIfE_Rmat, ::testing::Combine(::testing::Values(Prims_Usecase{false}), ::testing::Values(cugraph::test::Rmat_Usecase( 20, 32, 0.57, 0.19, 0.19, 0, false, false, 0, true)))); CUGRAPH_MG_TEST_PROGRAM_MAIN()
48896703fda3bb9c1e1fb54f28cbfa4270dfb6cc.cu
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utilities/base_fixture.hpp> #include <utilities/device_comm_wrapper.hpp> #include <utilities/high_res_clock.h> #include <utilities/test_graphs.hpp> #include <utilities/test_utilities.hpp> #include <utilities/thrust_wrapper.hpp> #include <cugraph/algorithms.hpp> #include <cugraph/partition_manager.hpp> #include <cugraph/utilities/dataframe_buffer.cuh> #include <cuco/detail/hash_functions.cuh> #include <cugraph/edge_partition_view.hpp> #include <cugraph/graph_view.hpp> #include <cugraph/prims/count_if_e.cuh> #include <cugraph/prims/edge_partition_src_dst_property.cuh> #include <cugraph/prims/update_edge_partition_src_dst_property.cuh> #include <raft/comms/comms.hpp> #include <raft/comms/mpi_comms.hpp> #include <raft/handle.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <thrust/count.h> #include <gtest/gtest.h> #include <random> template <typename... Args> struct property_type { using type = std::conditional_t<(sizeof...(Args) > 1), thrust::tuple<Args...>, typename thrust::tuple_element<0, thrust::tuple<Args...>>::type>; }; template <typename vertex_t, typename... Args> struct property_transform : public thrust::unary_function<vertex_t, typename property_type<Args...>::type> { int mod{}; property_transform(int mod_count) : mod(mod_count) {} template <typename type = typename property_type<Args...>::type> constexpr __device__ typename std::enable_if_t<cugraph::is_thrust_tuple_of_arithmetic<type>::value, type> operator()(const vertex_t& val) { cuco::detail::MurmurHash3_32<vertex_t> hash_func{}; auto value = hash_func(val) % mod; return thrust::make_tuple(static_cast<Args>(value)...); } template <typename type = typename property_type<Args...>::type> constexpr __device__ typename std::enable_if_t<std::is_arithmetic<type>::value, type> operator()( const vertex_t& val) { cuco::detail::MurmurHash3_32<vertex_t> hash_func{}; auto value = hash_func(val) % mod; return static_cast<type>(value); } }; template <typename vertex_t, template <typename...> typename Tuple, typename... Args> struct property_transform<vertex_t, Tuple<Args...>> : public property_transform<vertex_t, Args...> { }; template <typename... Args> struct generate_impl { private: using type = typename property_type<Args...>::type; using property_buffer_type = std::conditional_t< (sizeof...(Args) > 1), std::tuple<rmm::device_uvector<Args>...>, rmm::device_uvector<typename thrust::tuple_element<0, thrust::tuple<Args...>>::type>>; public: static thrust::tuple<Args...> initial_value(int init) { return thrust::make_tuple(static_cast<Args>(init)...); } template <typename label_t> static auto vertex_property(rmm::device_uvector<label_t>& labels, int hash_bin_count, raft::handle_t const& handle) { auto data = cugraph::allocate_dataframe_buffer<type>(labels.size(), handle.get_stream()); auto zip = cugraph::get_dataframe_buffer_begin(data); thrust::transform(handle.get_thrust_policy(), labels.begin(), labels.end(), zip, property_transform<label_t, Args...>(hash_bin_count)); return data; } template <typename label_t> static auto vertex_property(thrust::counting_iterator<label_t> begin, thrust::counting_iterator<label_t> end, int hash_bin_count, raft::handle_t const& handle) { auto length = thrust::distance(begin, end); auto data = cugraph::allocate_dataframe_buffer<type>(length, handle.get_stream()); auto zip = cugraph::get_dataframe_buffer_begin(data); thrust::transform(handle.get_thrust_policy(), begin, end, zip, property_transform<label_t, Args...>(hash_bin_count)); return data; } template <typename graph_view_type> static auto column_property(raft::handle_t const& handle, graph_view_type const& graph_view, property_buffer_type& property) { auto output_property = cugraph::edge_partition_dst_property_t<graph_view_type, type>(handle, graph_view); update_edge_partition_dst_property( handle, graph_view, cugraph::get_dataframe_buffer_begin(property), output_property); return output_property; } template <typename graph_view_type> static auto row_property(raft::handle_t const& handle, graph_view_type const& graph_view, property_buffer_type& property) { auto output_property = cugraph::edge_partition_src_property_t<graph_view_type, type>(handle, graph_view); update_edge_partition_src_property( handle, graph_view, cugraph::get_dataframe_buffer_begin(property), output_property); return output_property; } }; template <typename T> struct generate : public generate_impl<T> { static T initial_value(int init) { return static_cast<T>(init); } }; template <typename... Args> struct generate<std::tuple<Args...>> : public generate_impl<Args...> { }; struct Prims_Usecase { bool check_correctness{true}; bool test_weighted{false}; }; template <typename input_usecase_t> class Tests_MG_TransformCountIfE : public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> { public: Tests_MG_TransformCountIfE() {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} // Verify the results of count_if_e primitive template <typename vertex_t, typename edge_t, typename weight_t, typename result_t, bool store_transposed> void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase) { // 1. initialize handle raft::handle_t handle{}; HighResClock hr_clock{}; raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD); auto& comm = handle.get_comms(); auto const comm_size = comm.get_size(); auto const comm_rank = comm.get_rank(); auto row_comm_size = static_cast<int>(sqrt(static_cast<double>(comm_size))); while (comm_size % row_comm_size != 0) { --row_comm_size; } cugraph::partition_2d::subcomm_factory_t<cugraph::partition_2d::key_naming_t, vertex_t> subcomm_factory(handle, row_comm_size); // 2. create MG graph if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement handle.get_comms().barrier(); hr_clock.start(); } auto [mg_graph, d_mg_renumber_map_labels] = cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>( handle, input_usecase, prims_usecase.test_weighted, true); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement handle.get_comms().barrier(); double elapsed_time{0.0}; hr_clock.stop(&elapsed_time); std::cout << "MG construct_graph took " << elapsed_time * 1e-6 << " s.\n"; } auto mg_graph_view = mg_graph.view(); // 3. run MG count_if_e const int hash_bin_count = 5; auto vertex_property_data = generate<result_t>::vertex_property((*d_mg_renumber_map_labels), hash_bin_count, handle); auto col_prop = generate<result_t>::column_property(handle, mg_graph_view, vertex_property_data); auto row_prop = generate<result_t>::row_property(handle, mg_graph_view, vertex_property_data); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement handle.get_comms().barrier(); hr_clock.start(); } auto result = count_if_e( handle, mg_graph_view, row_prop.device_view(), col_prop.device_view(), [] __device__(auto row, auto col, weight_t wt, auto row_property, auto col_property) { return row_property < col_property; }); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement handle.get_comms().barrier(); double elapsed_time{0.0}; hr_clock.stop(&elapsed_time); std::cout << "MG count if e took " << elapsed_time * 1e-6 << " s.\n"; } //// 4. compare SG & MG results if (prims_usecase.check_correctness) { cugraph::graph_t<vertex_t, edge_t, weight_t, store_transposed, false> sg_graph(handle); std::tie(sg_graph, std::ignore) = cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, false>( handle, input_usecase, prims_usecase.test_weighted, false); auto sg_graph_view = sg_graph.view(); auto sg_vertex_property_data = generate<result_t>::vertex_property( thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()), thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_last()), hash_bin_count, handle); auto sg_col_prop = generate<result_t>::column_property(handle, sg_graph_view, sg_vertex_property_data); auto sg_row_prop = generate<result_t>::row_property(handle, sg_graph_view, sg_vertex_property_data); auto expected_result = count_if_e( handle, sg_graph_view, sg_row_prop.device_view(), sg_col_prop.device_view(), [] __device__(auto row, auto col, weight_t wt, auto row_property, auto col_property) { return row_property < col_property; }); ASSERT_TRUE(expected_result == result); } } }; using Tests_MG_TransformCountIfE_File = Tests_MG_TransformCountIfE<cugraph::test::File_Usecase>; using Tests_MG_TransformCountIfE_Rmat = Tests_MG_TransformCountIfE<cugraph::test::Rmat_Usecase>; TEST_P(Tests_MG_TransformCountIfE_File, CheckInt32Int32FloatTupleIntFloatTransposeFalse) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MG_TransformCountIfE_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeFalse) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MG_TransformCountIfE_File, CheckInt32Int32FloatTupleIntFloatTransposeTrue) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MG_TransformCountIfE_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeTrue) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MG_TransformCountIfE_File, CheckInt32Int32FloatTransposeFalse) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int, false>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MG_TransformCountIfE_Rmat, CheckInt32Int32FloatTransposeFalse) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int, false>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MG_TransformCountIfE_File, CheckInt32Int32FloatTransposeTrue) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int, true>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MG_TransformCountIfE_Rmat, CheckInt32Int32FloatTransposeTrue) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int, true>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } INSTANTIATE_TEST_SUITE_P( file_test, Tests_MG_TransformCountIfE_File, ::testing::Combine( ::testing::Values(Prims_Usecase{true}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx")))); INSTANTIATE_TEST_SUITE_P( rmat_small_test, Tests_MG_TransformCountIfE_Rmat, ::testing::Combine(::testing::Values(Prims_Usecase{true}), ::testing::Values(cugraph::test::Rmat_Usecase( 10, 16, 0.57, 0.19, 0.19, 0, false, false, 0, true)))); INSTANTIATE_TEST_SUITE_P( rmat_large_test, Tests_MG_TransformCountIfE_Rmat, ::testing::Combine(::testing::Values(Prims_Usecase{false}), ::testing::Values(cugraph::test::Rmat_Usecase( 20, 32, 0.57, 0.19, 0.19, 0, false, false, 0, true)))); CUGRAPH_MG_TEST_PROGRAM_MAIN()
a25dd7fd95368ac8094d894af9941866c1897522.hip
// !!! This is a file automatically generated by hipify!!! #include <rocblas.h> #include <cusolverDn.h> #include <iostream> #include "cuutils.h" #include "absnf.h" #include "utils.hpp" #include <chrono> #define t_def double typedef std::chrono::high_resolution_clock::time_point TimeVar; void test(int s) { t_def *h_a = (t_def *)malloc(s*sizeof(t_def)); t_def *h_b = (t_def *)malloc(s*sizeof(t_def)); t_def *h_Z = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_L = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_J = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_Y = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_dz = (t_def *)malloc(s*sizeof(t_def)); t_def *d_a; hipMalloc((void **)&d_a, s*sizeof(t_def)); t_def *d_b; hipMalloc((void **)&d_b, s*sizeof(t_def)); t_def *d_Z; hipMalloc((void **)&d_Z, s*s*sizeof(t_def)); t_def *d_L; hipMalloc((void **)&d_L, s*s*sizeof(t_def)); t_def *d_J; hipMalloc((void **)&d_J, s*s*sizeof(t_def)); t_def *d_Y; hipMalloc((void **)&d_Y, s*s*sizeof(t_def)); t_def *d_dz; hipMalloc((void **)&d_dz, s*sizeof(t_def)); t_def *d_gamma; hipMalloc((void **)&d_gamma, s*sizeof(t_def)); t_def *d_Gamma; hipMalloc((void **)&d_Gamma, s*s*sizeof(t_def)); t_def *d_Tss; hipMalloc((void **)&d_Tss, s*s*sizeof(t_def)); t_def *d_I; hipMalloc((void **)&d_I, s*s*sizeof(t_def)); t_def *d_K; hipMalloc((void **)&d_K, s*s*sizeof(t_def)); utils::fillRandVector(h_a, s,-10,10); utils::fillRandVector(h_b, s,-10,10); utils::fillRandVector(h_Z, s*s,-10,10); utils::fillRandMatrix(h_L, s,s,-10,10,0,utils::MATRIXOPT::LOWER); utils::fillRandVector(h_J, s*s,-10,10); utils::fillRandVector(h_Y, s*s,-10,10); utils::fillRandVector(h_dz, s,-10,10); hipblasHandle_t cublas_handle; hipblasCreate(&cublas_handle); TimeVar t_0 = std::chrono::high_resolution_clock::now(); hipMemcpy(d_a, h_a, s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_Z, h_Z, s*s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_L, h_L, s*s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_J, h_J, s*s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_Y, h_Y, s*s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_dz, h_dz, s*sizeof(t_def), hipMemcpyHostToDevice); hipDeviceSynchronize(); TimeVar t_1 = std::chrono::high_resolution_clock::now(); auto time_copy = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count(); int gridsize, blocksize; cuutils::getGridBlockSize(&gridsize, &blocksize); t_0 = std::chrono::high_resolution_clock::now(); absnf::gradient_core(cublas_handle, d_a, d_b, d_Z, d_L, d_J, d_Y, d_dz, d_Tss, d_I, d_K, s, s, s, gridsize, blocksize, d_gamma, d_Gamma); hipDeviceSynchronize(); t_1 = std::chrono::high_resolution_clock::now(); auto time_exec = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count(); // TimeVar t_3 = std::chrono::high_resolution_clock::now(); // auto int_upload = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count(); // auto int_download = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_2 ).count(); // auto int_total = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_0 ).count(); std::cout << "---------------" << std::endl; std::cout << "s=m=n: " << s << std::endl; std::cout << "mem: " << time_copy << std::endl; std::cout << "exec: " << time_exec << std::endl; free(h_a); free(h_b); free(h_Z); free(h_L); free(h_J); free(h_Y); free(h_dz); hipFree(d_a); hipFree(d_b); hipFree(d_Z); hipFree(d_L); hipFree(d_J); hipFree(d_Y); hipFree(d_dz); hipFree(d_Tss); hipFree(d_I); hipFree(d_K); hipFree(d_gamma); hipFree(d_Gamma); hipblasDestroy(cublas_handle); } void test_1000(int s) { t_def *h_a = (t_def *)malloc(s*sizeof(t_def)); t_def *h_b = (t_def *)malloc(s*sizeof(t_def)); t_def *h_Z = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_L = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_J = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_Y = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_dz = (t_def *)malloc(s*sizeof(t_def)); t_def *d_a; hipMalloc((void **)&d_a, s*sizeof(t_def)); t_def *d_b; hipMalloc((void **)&d_b, s*sizeof(t_def)); t_def *d_Z; hipMalloc((void **)&d_Z, s*s*sizeof(t_def)); t_def *d_L; hipMalloc((void **)&d_L, s*s*sizeof(t_def)); t_def *d_J; hipMalloc((void **)&d_J, s*s*sizeof(t_def)); t_def *d_Y; hipMalloc((void **)&d_Y, s*s*sizeof(t_def)); t_def *d_dz; hipMalloc((void **)&d_dz, s*sizeof(t_def)); t_def *d_gamma; hipMalloc((void **)&d_gamma, s*sizeof(t_def)); t_def *d_Gamma; hipMalloc((void **)&d_Gamma, s*s*sizeof(t_def)); t_def *d_Tss; hipMalloc((void **)&d_Tss, s*s*sizeof(t_def)); t_def *d_I; hipMalloc((void **)&d_I, s*s*sizeof(t_def)); t_def *d_K; hipMalloc((void **)&d_K, s*s*sizeof(t_def)); utils::fillRandVector(h_a, s,-10,10); utils::fillRandVector(h_b, s,-10,10); utils::fillRandVector(h_Z, s*s,-10,10); utils::fillRandMatrix(h_L, s,s,-10,10,0,utils::MATRIXOPT::LOWER); utils::fillRandVector(h_J, s*s,-10,10); utils::fillRandVector(h_Y, s*s,-10,10); utils::fillRandVector(h_dz, s,-10,10); hipblasHandle_t cublas_handle; hipblasCreate(&cublas_handle); hipMemcpy(d_a, h_a, s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_Z, h_Z, s*s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_L, h_L, s*s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_J, h_J, s*s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_Y, h_Y, s*s*sizeof(t_def), hipMemcpyHostToDevice); hipMemcpy(d_dz, h_dz, s*sizeof(t_def), hipMemcpyHostToDevice); hipDeviceSynchronize(); int gridsize, blocksize; cuutils::getGridBlockSize(&gridsize, &blocksize); TimeVar t_0 = std::chrono::high_resolution_clock::now(); for(int i=0; i<100; i++) { absnf::gradient_core(cublas_handle, d_a, d_b, d_Z, d_L, d_J, d_Y, d_dz, d_Tss, d_I, d_K, s, s, s, gridsize, blocksize, d_gamma, d_Gamma); } hipDeviceSynchronize(); TimeVar t_1 = std::chrono::high_resolution_clock::now(); auto time_exec = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count(); // TimeVar t_3 = std::chrono::high_resolution_clock::now(); // auto int_upload = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count(); // auto int_download = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_2 ).count(); // auto int_total = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_0 ).count(); std::cout << "----1000--------" << std::endl; std::cout << "s=m=n: " << s << std::endl; std::cout << "exec: " << time_exec << std::endl; free(h_a); free(h_b); free(h_Z); free(h_L); free(h_J); free(h_Y); free(h_dz); hipFree(d_a); hipFree(d_b); hipFree(d_Z); hipFree(d_L); hipFree(d_J); hipFree(d_Y); hipFree(d_dz); hipFree(d_Tss); hipFree(d_I); hipFree(d_K); hipFree(d_gamma); hipFree(d_Gamma); hipblasDestroy(cublas_handle); } void test_gridsize(int s) { } int main() { for (int i=1000; i<=7000; i+=1000) { test(i); } return 0; }
a25dd7fd95368ac8094d894af9941866c1897522.cu
#include <cublas_v2.h> #include <cusolverDn.h> #include <iostream> #include "cuutils.h" #include "absnf.h" #include "utils.hpp" #include <chrono> #define t_def double typedef std::chrono::high_resolution_clock::time_point TimeVar; void test(int s) { t_def *h_a = (t_def *)malloc(s*sizeof(t_def)); t_def *h_b = (t_def *)malloc(s*sizeof(t_def)); t_def *h_Z = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_L = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_J = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_Y = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_dz = (t_def *)malloc(s*sizeof(t_def)); t_def *d_a; cudaMalloc((void **)&d_a, s*sizeof(t_def)); t_def *d_b; cudaMalloc((void **)&d_b, s*sizeof(t_def)); t_def *d_Z; cudaMalloc((void **)&d_Z, s*s*sizeof(t_def)); t_def *d_L; cudaMalloc((void **)&d_L, s*s*sizeof(t_def)); t_def *d_J; cudaMalloc((void **)&d_J, s*s*sizeof(t_def)); t_def *d_Y; cudaMalloc((void **)&d_Y, s*s*sizeof(t_def)); t_def *d_dz; cudaMalloc((void **)&d_dz, s*sizeof(t_def)); t_def *d_gamma; cudaMalloc((void **)&d_gamma, s*sizeof(t_def)); t_def *d_Gamma; cudaMalloc((void **)&d_Gamma, s*s*sizeof(t_def)); t_def *d_Tss; cudaMalloc((void **)&d_Tss, s*s*sizeof(t_def)); t_def *d_I; cudaMalloc((void **)&d_I, s*s*sizeof(t_def)); t_def *d_K; cudaMalloc((void **)&d_K, s*s*sizeof(t_def)); utils::fillRandVector(h_a, s,-10,10); utils::fillRandVector(h_b, s,-10,10); utils::fillRandVector(h_Z, s*s,-10,10); utils::fillRandMatrix(h_L, s,s,-10,10,0,utils::MATRIXOPT::LOWER); utils::fillRandVector(h_J, s*s,-10,10); utils::fillRandVector(h_Y, s*s,-10,10); utils::fillRandVector(h_dz, s,-10,10); cublasHandle_t cublas_handle; cublasCreate(&cublas_handle); TimeVar t_0 = std::chrono::high_resolution_clock::now(); cudaMemcpy(d_a, h_a, s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_Z, h_Z, s*s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_L, h_L, s*s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_J, h_J, s*s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_Y, h_Y, s*s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_dz, h_dz, s*sizeof(t_def), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); TimeVar t_1 = std::chrono::high_resolution_clock::now(); auto time_copy = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count(); int gridsize, blocksize; cuutils::getGridBlockSize(&gridsize, &blocksize); t_0 = std::chrono::high_resolution_clock::now(); absnf::gradient_core(cublas_handle, d_a, d_b, d_Z, d_L, d_J, d_Y, d_dz, d_Tss, d_I, d_K, s, s, s, gridsize, blocksize, d_gamma, d_Gamma); cudaDeviceSynchronize(); t_1 = std::chrono::high_resolution_clock::now(); auto time_exec = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count(); // TimeVar t_3 = std::chrono::high_resolution_clock::now(); // auto int_upload = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count(); // auto int_download = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_2 ).count(); // auto int_total = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_0 ).count(); std::cout << "---------------" << std::endl; std::cout << "s=m=n: " << s << std::endl; std::cout << "mem: " << time_copy << std::endl; std::cout << "exec: " << time_exec << std::endl; free(h_a); free(h_b); free(h_Z); free(h_L); free(h_J); free(h_Y); free(h_dz); cudaFree(d_a); cudaFree(d_b); cudaFree(d_Z); cudaFree(d_L); cudaFree(d_J); cudaFree(d_Y); cudaFree(d_dz); cudaFree(d_Tss); cudaFree(d_I); cudaFree(d_K); cudaFree(d_gamma); cudaFree(d_Gamma); cublasDestroy(cublas_handle); } void test_1000(int s) { t_def *h_a = (t_def *)malloc(s*sizeof(t_def)); t_def *h_b = (t_def *)malloc(s*sizeof(t_def)); t_def *h_Z = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_L = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_J = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_Y = (t_def *)malloc(s*s*sizeof(t_def)); t_def *h_dz = (t_def *)malloc(s*sizeof(t_def)); t_def *d_a; cudaMalloc((void **)&d_a, s*sizeof(t_def)); t_def *d_b; cudaMalloc((void **)&d_b, s*sizeof(t_def)); t_def *d_Z; cudaMalloc((void **)&d_Z, s*s*sizeof(t_def)); t_def *d_L; cudaMalloc((void **)&d_L, s*s*sizeof(t_def)); t_def *d_J; cudaMalloc((void **)&d_J, s*s*sizeof(t_def)); t_def *d_Y; cudaMalloc((void **)&d_Y, s*s*sizeof(t_def)); t_def *d_dz; cudaMalloc((void **)&d_dz, s*sizeof(t_def)); t_def *d_gamma; cudaMalloc((void **)&d_gamma, s*sizeof(t_def)); t_def *d_Gamma; cudaMalloc((void **)&d_Gamma, s*s*sizeof(t_def)); t_def *d_Tss; cudaMalloc((void **)&d_Tss, s*s*sizeof(t_def)); t_def *d_I; cudaMalloc((void **)&d_I, s*s*sizeof(t_def)); t_def *d_K; cudaMalloc((void **)&d_K, s*s*sizeof(t_def)); utils::fillRandVector(h_a, s,-10,10); utils::fillRandVector(h_b, s,-10,10); utils::fillRandVector(h_Z, s*s,-10,10); utils::fillRandMatrix(h_L, s,s,-10,10,0,utils::MATRIXOPT::LOWER); utils::fillRandVector(h_J, s*s,-10,10); utils::fillRandVector(h_Y, s*s,-10,10); utils::fillRandVector(h_dz, s,-10,10); cublasHandle_t cublas_handle; cublasCreate(&cublas_handle); cudaMemcpy(d_a, h_a, s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_Z, h_Z, s*s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_L, h_L, s*s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_J, h_J, s*s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_Y, h_Y, s*s*sizeof(t_def), cudaMemcpyHostToDevice); cudaMemcpy(d_dz, h_dz, s*sizeof(t_def), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); int gridsize, blocksize; cuutils::getGridBlockSize(&gridsize, &blocksize); TimeVar t_0 = std::chrono::high_resolution_clock::now(); for(int i=0; i<100; i++) { absnf::gradient_core(cublas_handle, d_a, d_b, d_Z, d_L, d_J, d_Y, d_dz, d_Tss, d_I, d_K, s, s, s, gridsize, blocksize, d_gamma, d_Gamma); } cudaDeviceSynchronize(); TimeVar t_1 = std::chrono::high_resolution_clock::now(); auto time_exec = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count(); // TimeVar t_3 = std::chrono::high_resolution_clock::now(); // auto int_upload = std::chrono::duration_cast<std::chrono::milliseconds>( t_1 - t_0 ).count(); // auto int_download = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_2 ).count(); // auto int_total = std::chrono::duration_cast<std::chrono::milliseconds>( t_3 - t_0 ).count(); std::cout << "----1000--------" << std::endl; std::cout << "s=m=n: " << s << std::endl; std::cout << "exec: " << time_exec << std::endl; free(h_a); free(h_b); free(h_Z); free(h_L); free(h_J); free(h_Y); free(h_dz); cudaFree(d_a); cudaFree(d_b); cudaFree(d_Z); cudaFree(d_L); cudaFree(d_J); cudaFree(d_Y); cudaFree(d_dz); cudaFree(d_Tss); cudaFree(d_I); cudaFree(d_K); cudaFree(d_gamma); cudaFree(d_Gamma); cublasDestroy(cublas_handle); } void test_gridsize(int s) { } int main() { for (int i=1000; i<=7000; i+=1000) { test(i); } return 0; }
d6a3d555ef80469a6ed8a1aab5e37d15ddb10085.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <cstdio> #include <ctime> #include "util/cuda/debug.h" // catches all CUDA errors #include "phantom.h" namespace PET2D { namespace Barrel { namespace GPU { void run_gpu_phantom(int n_threads_per_block, int n_blocks, int n_emissions, int n_pixels_in_row, F s_size, Pixel* lookup_table_pixel, int* pixel_hits) { hipSetDevice(1); #if PHANTOM_IS_BROKEN_FOR_NOW dim3 blocks(n_blocks); dim3 threads(n_threads_per_block); unsigned int* cpu_prng_seed; cpu_prng_seed = new unsigned int[n_blocks * n_threads_per_block * 4]; for (int i = 0; i < 4 * n_blocks * n_threads_per_block; ++i) { cpu_prng_seed[i] = 53445 + i; } int triangular_matrix_size = ((n_pixels_in_row / 2) * ((n_pixels_in_row / 2) + 1) / 2); for (int i = 0; i < triangular_matrix_size; ++i) { for (int lor = 0; lor < LORS; ++lor) { gpu_output[i].hit[lor] = 0; } } MatrixElement* cpu_matrix = new MatrixElement[n_blocks]; unsigned int* gpu_prng_seed; MatrixElement* gpu_MatrixElement; hipMalloc((void**)&gpu_prng_seed, n_blocks * n_threads_per_block * 4 * sizeof(unsigned int)); hipMalloc((void**)&gpu_MatrixElement, n_blocks * sizeof(MatrixElement)); hipMemcpy(gpu_prng_seed, cpu_prng_seed, n_blocks * n_threads_per_block * 4 * sizeof(unsigned int), hipMemcpyHostToDevice); printf("GPU kernel start\n"); printf("DETECTORS %d LORS: %d\n", NUMBER_OF_DETECTORS, LORS); for (int p = 0; p < triangular_matrix_size; ++p) { Pixel pixel = lookup_table_pixel[p]; int i = pixel.x; int j = pixel.y; #if BROKEN mem_clean_lors(cpu_matrix, number_of_blocks); #endif hipMemcpy(gpu_MatrixElement, cpu_matrix, n_blocks * sizeof(MatrixElement), hipMemcpyHostToDevice); long total_emissions = (long)n_emissions * n_blocks * n_threads_per_block; printf("Pixel(%d,%d) n_emissions: %d %ld\n", i, j, n_emissions, total_emissions); F fov_radius = radius / M_SQRT2; if ((i * i + j * j) * s_size * s_size < fov_radius * fov_radius) { #if __HIPCC__ #define gpu_phantom_generationhipLaunchKernelGGL(( gpu_phantom_generation), dim3(blocks), dim3(threads), 0, 0, #endif #if WHERE_IS_PHANTOM ( gpu_phantom_generation)i, j, n_emissions, gpu_prng_seed, gpu_MatrixElement, number_of_threads_per_block, pixels_in_row, radius, h_detector, w_detector, pixel_size); #endif hipPeekAtLastError(); // ensure kernel was run successfully hipDeviceSynchronize(); } hipMemcpy(cpu_matrix, gpu_MatrixElement, n_blocks * sizeof(MatrixElement), hipMemcpyDeviceToHost); if (p == 0) { for (int i = 0; i < LORS; i++) { F temp = 0.f; for (int j = 0; j < n_blocks; ++j) { temp += cpu_matrix[j].hit[i]; } #if BROKEN if (temp > 0.0f) { GPU::LOR lor(lookup_table_lors[i].lor_a, lookup_table_lors[i].lor_b); gpu_output[p].hit[lor.index()] = temp; } #endif } } } hipFree(gpu_prng_seed); hipFree(gpu_MatrixElement); #endif } } // GPU } // Barrel } // PET2D
d6a3d555ef80469a6ed8a1aab5e37d15ddb10085.cu
#include <cuda_runtime.h> #include <cstdio> #include <ctime> #include "util/cuda/debug.h" // catches all CUDA errors #include "phantom.h" namespace PET2D { namespace Barrel { namespace GPU { void run_gpu_phantom(int n_threads_per_block, int n_blocks, int n_emissions, int n_pixels_in_row, F s_size, Pixel* lookup_table_pixel, int* pixel_hits) { cudaSetDevice(1); #if PHANTOM_IS_BROKEN_FOR_NOW dim3 blocks(n_blocks); dim3 threads(n_threads_per_block); unsigned int* cpu_prng_seed; cpu_prng_seed = new unsigned int[n_blocks * n_threads_per_block * 4]; for (int i = 0; i < 4 * n_blocks * n_threads_per_block; ++i) { cpu_prng_seed[i] = 53445 + i; } int triangular_matrix_size = ((n_pixels_in_row / 2) * ((n_pixels_in_row / 2) + 1) / 2); for (int i = 0; i < triangular_matrix_size; ++i) { for (int lor = 0; lor < LORS; ++lor) { gpu_output[i].hit[lor] = 0; } } MatrixElement* cpu_matrix = new MatrixElement[n_blocks]; unsigned int* gpu_prng_seed; MatrixElement* gpu_MatrixElement; cudaMalloc((void**)&gpu_prng_seed, n_blocks * n_threads_per_block * 4 * sizeof(unsigned int)); cudaMalloc((void**)&gpu_MatrixElement, n_blocks * sizeof(MatrixElement)); cudaMemcpy(gpu_prng_seed, cpu_prng_seed, n_blocks * n_threads_per_block * 4 * sizeof(unsigned int), cudaMemcpyHostToDevice); printf("GPU kernel start\n"); printf("DETECTORS %d LORS: %d\n", NUMBER_OF_DETECTORS, LORS); for (int p = 0; p < triangular_matrix_size; ++p) { Pixel pixel = lookup_table_pixel[p]; int i = pixel.x; int j = pixel.y; #if BROKEN mem_clean_lors(cpu_matrix, number_of_blocks); #endif cudaMemcpy(gpu_MatrixElement, cpu_matrix, n_blocks * sizeof(MatrixElement), cudaMemcpyHostToDevice); long total_emissions = (long)n_emissions * n_blocks * n_threads_per_block; printf("Pixel(%d,%d) n_emissions: %d %ld\n", i, j, n_emissions, total_emissions); F fov_radius = radius / M_SQRT2; if ((i * i + j * j) * s_size * s_size < fov_radius * fov_radius) { #if __CUDACC__ #define gpu_phantom_generation gpu_phantom_generation<<<blocks, threads>>> #endif #if WHERE_IS_PHANTOM gpu_phantom_generation(i, j, n_emissions, gpu_prng_seed, gpu_MatrixElement, number_of_threads_per_block, pixels_in_row, radius, h_detector, w_detector, pixel_size); #endif cudaPeekAtLastError(); // ensure kernel was run successfully cudaThreadSynchronize(); } cudaMemcpy(cpu_matrix, gpu_MatrixElement, n_blocks * sizeof(MatrixElement), cudaMemcpyDeviceToHost); if (p == 0) { for (int i = 0; i < LORS; i++) { F temp = 0.f; for (int j = 0; j < n_blocks; ++j) { temp += cpu_matrix[j].hit[i]; } #if BROKEN if (temp > 0.0f) { GPU::LOR lor(lookup_table_lors[i].lor_a, lookup_table_lors[i].lor_b); gpu_output[p].hit[lor.index()] = temp; } #endif } } } cudaFree(gpu_prng_seed); cudaFree(gpu_MatrixElement); #endif } } // GPU } // Barrel } // PET2D
9f599159cfe28e6aa4c808a511f36e0cdd05cded.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file csv-reader.cu code to read csv data * * CSV Reader */ #include <hip/hip_runtime.h> #include <iostream> #include <vector> #include <string> #include <stdio.h> #include <numeric> #include <iomanip> #include <unordered_map> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <thrust/scan.h> #include <thrust/reduce.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include "type_conversion.cuh" #include "datetime_parser.cuh" #include "cudf.h" #include "utilities/error_utils.h" #include "rmm/rmm.h" #include "rmm/thrust_rmm_allocator.h" #include "io/comp/io_uncomp.h" #include "NVStrings.h" constexpr int32_t HASH_SEED = 33; constexpr size_t max_chunk_bytes = 64*1024*1024; // 64MB using std::vector; using std::string; using cu_reccnt_t = unsigned long long int; using cu_recstart_t = unsigned long long int; //-- define the structure for raw data handling - for internal use typedef struct raw_csv_ { char * data; // on-device: the raw unprocessed CSV data - loaded as a large char * array cu_recstart_t* recStart; // on-device: Starting position of the records. char delimiter; // host: the delimiter char terminator; // host: the line terminator char quotechar; // host: the quote character bool keepquotes; // host: indicates to keep the start and end quotechar bool doublequote; // host: indicates to interpret two consecutive quotechar as a single long num_bytes; // host: the number of bytes in the data long num_bits; // host: the number of 64-bit bitmaps (different than valid) gdf_size_type num_records; // host: number of records loaded into device memory, and then number of records to read // int num_cols; // host: number of columns int num_active_cols; // host: number of columns that will be return to user. int num_actual_cols; // host: number of columns in the file --- based on the number of columns in header vector<gdf_dtype> dtypes; // host: array of dtypes (since gdf_columns are not created until end) vector<string> col_names; // host: array of column names bool* h_parseCol; // host : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out. bool* d_parseCol; // device : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out. long header_row; // Row id of the header bool dayfirst; char decimal; char thousands; gdf_size_type nrows; // number of rows of file to read. default is -1, and all rows are read in this case gdf_size_type skiprows; // number of rows at the start of the file to skip, default is 0 gdf_size_type skipfooter; // number of rows at the bottom of the file to skip, default is 0 rmm::device_vector<int32_t> d_trueValues; // device: array of values to recognize as true rmm::device_vector<int32_t> d_falseValues; // device: array of values to recognize as false } raw_csv_t; typedef struct column_data_ { unsigned long long countFloat; unsigned long long countDateAndTime; unsigned long long countString; unsigned long long countInt8; unsigned long long countInt16; unsigned long long countInt32; unsigned long long countInt64; gdf_size_type countNULL; } column_data_t; typedef struct parsing_opts_ { char delimiter; char terminator; char quotechar; bool keepquotes; char decimal; char thousands; int32_t* trueValues; int32_t* falseValues; int32_t trueValuesCount; int32_t falseValuesCount; } parsing_opts_t; using string_pair = std::pair<const char*,size_t>; // //---------------create and process --------------------------------------------- // gdf_error parseArguments(csv_read_arg *args, raw_csv_t *csv); // gdf_error getColNamesAndTypes(const char **col_names, const char **dtypes, raw_csv_t *d); gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type); gdf_error getUncompressedHostData(const char* h_data, size_t num_bytes, const string& compression, vector<char>& h_uncomp_data); gdf_error uploadDataToDevice(const char* h_uncomp_data, size_t h_uncomp_size, raw_csv_t * raw_csv); gdf_error allocateGdfDataSpace(gdf_column *); gdf_dtype convertStringToDtype(std::string &dtype); #define checkError(error, txt) if ( error != GDF_SUCCESS) { std::cerr << "ERROR: " << error << " in " << txt << std::endl; return error; } // //---------------CUDA Kernel --------------------------------------------- // __device__ int findSetBit(int tid, long num_bits, uint64_t *f_bits, int x); gdf_error launch_countRecords(const char* h_data, size_t h_size, char terminator, char quote, gdf_size_type& rec_cnt); gdf_error launch_storeRecordStart(const char* h_data, size_t h_size, raw_csv_t * csvData); gdf_error launch_dataConvertColumns(raw_csv_t * raw_csv, void** d_gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes, string_pair **str_cols, unsigned long long *); gdf_error launch_dataTypeDetection(raw_csv_t * raw_csv, column_data_t* d_columnData); __global__ void countRecords(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, cu_reccnt_t* num_records); __global__ void storeRecordStart(char *data, size_t chunk_offset, const char terminator, const char quotechar, long num_bytes, long num_bits, cu_reccnt_t* num_records, cu_recstart_t* recStart); __global__ void convertCsvToGdf(char *csv, const parsing_opts_t opts, gdf_size_type num_records, int num_columns, bool *parseCol, cu_recstart_t *recStart,gdf_dtype *dtype, void **gdf_data, gdf_valid_type **valid, string_pair **str_cols, bool dayfirst, unsigned long long *num_valid); __global__ void dataTypeDetection(char *raw_csv, const parsing_opts_t opts, gdf_size_type num_records, int num_columns, bool *parseCol, cu_recstart_t *recStart, column_data_t* d_columnData); // //---------------CUDA Valid (8 blocks of 8-bits) Bitmap Kernels --------------------------------------------- // __device__ int whichBitmap(int record) { return (record/8); } __device__ int whichBit(int bit) { return (bit % 8); } __inline__ __device__ void validAtomicOR(gdf_valid_type* address, gdf_valid_type val) { int32_t *base_address = (int32_t*)((gdf_valid_type*)address - ((size_t)address & 3)); int32_t int_val = (int32_t)val << (((size_t) address & 3) * 8); atomicOr(base_address, int_val); } __device__ void setBit(gdf_valid_type* address, int bit) { gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128}; validAtomicOR(address, bitMask[bit]); } std::string stringType(gdf_dtype dt){ switch (dt){ case GDF_STRING: return std::string("str"); case GDF_DATE64: return std::string("date64"); case GDF_CATEGORY: return std::string("category"); case GDF_FLOAT64: return std::string("float64"); case GDF_INT8: return std::string("int8"); case GDF_INT16: return std::string("int16"); case GDF_INT32: return std::string("int32"); case GDF_INT64: return std::string("int64"); default: return "long"; } } /**---------------------------------------------------------------------------* * @brief Read in a CSV file, extract all fields and return * a GDF (array of gdf_columns) * * @param[in,out] args Structure containing both the the input arguments * and the returned data * * @return gdf_error *---------------------------------------------------------------------------**/ gdf_error read_csv(csv_read_arg *args) { gdf_error error = gdf_error::GDF_SUCCESS; //----------------------------------------------------------------------------- // create the CSV data structure - this will be filled in as the CSV data is processed. // Done first to validate data types raw_csv_t * raw_csv = new raw_csv_t; // error = parseArguments(args, raw_csv); raw_csv->num_actual_cols = args->num_cols; raw_csv->num_active_cols = args->num_cols; raw_csv->num_records = 0; if(args->delim_whitespace == true) { raw_csv->delimiter = ' '; } else { raw_csv->delimiter = args->delimiter; } if(args->windowslinetermination) { raw_csv->terminator = '\n'; } else { raw_csv->terminator = args->lineterminator; } raw_csv->quotechar = args->quotechar; if(raw_csv->quotechar != '\0') { raw_csv->keepquotes = !args->quoting; raw_csv->doublequote = args->doublequote; } else { raw_csv->keepquotes = true; raw_csv->doublequote = false; } if (args->names == nullptr) { raw_csv->header_row = args->header; } else{ raw_csv->header_row = -1; } raw_csv->dayfirst = args->dayfirst; raw_csv->decimal = args->decimal; raw_csv->thousands = args->thousands; raw_csv->skiprows = args->skiprows; raw_csv->skipfooter = args->skipfooter; raw_csv->nrows = args->nrows; if (raw_csv->header_row >= 0 && args->nrows >= 0) { ++raw_csv->nrows; } if (raw_csv->decimal == raw_csv->delimiter) { checkError(GDF_INVALID_API_CALL, "Decimal point cannot be the same as the delimiter"); } if (raw_csv->thousands == raw_csv->delimiter) { checkError(GDF_INVALID_API_CALL, "Thousands separator cannot be the same as the delimiter"); } // Handle user-defined booleans values, whereby field data is substituted // with true/false values; CUDF booleans are int types of 0 or 1 // The true/false value strings are converted to integers which are used // by the data conversion kernel for comparison and value replacement if ((args->true_values != NULL) && (args->num_true_values > 0)) { thrust::host_vector<int32_t> h_values(args->num_true_values); for (int i = 0; i < args->num_true_values; ++i) { h_values[i] = convertStrtoInt<int32_t>(args->true_values[i], 0, strlen(args->true_values[i]) - 1); } raw_csv->d_trueValues = h_values; } if ((args->false_values != NULL) && (args->num_false_values > 0)) { thrust::host_vector<int32_t> h_values(args->num_false_values); for (int i = 0; i < args->num_false_values; ++i) { h_values[i] = convertStrtoInt<int32_t>(args->false_values[i], 0, strlen(args->false_values[i]) - 1); } raw_csv->d_falseValues = h_values; } //----------------------------------------------------------------------------- // memory map in the data void * map_data = NULL; size_t map_size = 0; int fd = 0; if (args->input_data_form == gdf_csv_input_form::FILE_PATH) { fd = open(args->filepath_or_buffer, O_RDONLY ); if (fd < 0) { close(fd); checkError(GDF_FILE_ERROR, "Error opening file"); } struct stat st{}; if (fstat(fd, &st)) { close(fd); checkError(GDF_FILE_ERROR, "cannot stat file"); } map_size = st.st_size; raw_csv->num_bytes = map_size; map_data = mmap(0, map_size, PROT_READ, MAP_PRIVATE, fd, 0); if (map_data == MAP_FAILED || map_size==0) { close(fd); checkError(GDF_C_ERROR, "Error mapping file"); } } else if (args->input_data_form == gdf_csv_input_form::HOST_BUFFER) { map_data = (void *)args->filepath_or_buffer; raw_csv->num_bytes = map_size = args->buffer_size; } else { checkError(GDF_C_ERROR, "invalid input type"); } string compression_type; error = inferCompressionType(args->compression, args->filepath_or_buffer, compression_type); checkError(error, "call to inferCompressionType"); const char* h_uncomp_data; size_t h_uncomp_size = 0; // Used when the input data is compressed, to ensure the allocated uncompressed data is freed vector<char> h_uncomp_data_owner; if (compression_type == "none") { // Do not use the owner vector here to avoid copying the whole file to the heap h_uncomp_data = (const char*)map_data; h_uncomp_size = map_size; } else { error = getUncompressedHostData( (const char *)map_data, map_size, compression_type, h_uncomp_data_owner); checkError(error, "call to getUncompressedHostData"); h_uncomp_data = h_uncomp_data_owner.data(); h_uncomp_size = h_uncomp_data_owner.size(); } assert(h_uncomp_data != nullptr); assert(h_uncomp_size != 0); error = launch_countRecords(h_uncomp_data, h_uncomp_size, raw_csv->terminator, raw_csv->quotechar, raw_csv->num_records); if (error != GDF_SUCCESS) { return error; } //----------------------------------------------------------------------------- //-- Allocate space to hold the record starting point RMM_TRY( RMM_ALLOC((void**)&(raw_csv->recStart), (sizeof(cu_recstart_t) * (raw_csv->num_records + 1)), 0) ); //----------------------------------------------------------------------------- //-- Scan data and set the starting positions error = launch_storeRecordStart(h_uncomp_data, h_uncomp_size, raw_csv); checkError(error, "call to record initial position store"); // Previous kernel stores the record pinput_file.typeositions as encountered by all threads // Sort the record positions as subsequent processing may require filtering // certain rows or other processing on specific records thrust::sort(thrust::device, raw_csv->recStart, raw_csv->recStart + raw_csv->num_records + 1); // Currently, ignoring lineterminations within quotes is handled by recording // the records of both, and then filtering out the records that is a quotechar // or a linetermination within a quotechar pair. The future major refactoring // of csv_reader and its kernels will probably use a different tactic. if (raw_csv->quotechar != '\0') { vector<cu_recstart_t> h_rec_starts(raw_csv->num_records + 1); const size_t rec_start_size = sizeof(cu_recstart_t) * (h_rec_starts.size()); CUDA_TRY( hipMemcpy(h_rec_starts.data(), raw_csv->recStart, rec_start_size, hipMemcpyDeviceToHost) ); auto recCount = raw_csv->num_records; bool quotation = false; for (gdf_size_type i = 1; i < raw_csv->num_records; ++i) { if (h_uncomp_data[h_rec_starts[i] - 1] == raw_csv->quotechar) { quotation = !quotation; h_rec_starts[i] = raw_csv->num_bytes; recCount--; } else if (quotation) { h_rec_starts[i] = raw_csv->num_bytes; recCount--; } } CUDA_TRY( hipMemcpy(raw_csv->recStart, h_rec_starts.data(), rec_start_size, hipMemcpyHostToDevice) ); thrust::sort(thrust::device, raw_csv->recStart, raw_csv->recStart + raw_csv->num_records + 1); raw_csv->num_records = recCount; } error = uploadDataToDevice(h_uncomp_data, h_uncomp_size, raw_csv); if (error != GDF_SUCCESS) { return error; } //----------------------------------------------------------------------------- //-- Populate the header // Check if the user gave us a list of column names if(args->names == nullptr) { int h_num_cols = 0; // Getting the first row of data from the file. We will parse the data to find lineterminator as // well as the column delimiter. cu_recstart_t second_rec_start; CUDA_TRY(hipMemcpy(&second_rec_start, raw_csv->recStart + 1, sizeof(cu_recstart_t), hipMemcpyDefault)); vector<char> first_row(second_rec_start); CUDA_TRY(hipMemcpy(first_row.data(), raw_csv->data, sizeof(char) * first_row.size(), hipMemcpyDefault)); // datect the number of rows and assign the column name raw_csv->col_names.clear(); if(raw_csv->header_row >= 0) { size_t prev = 0; size_t c = 0; // Storing the names of the columns into a vector of strings while(c < first_row.size()) { if (first_row[c] == args->delimiter || first_row[c] == args->lineterminator){ std::string colName(first_row.data() + prev, c - prev ); prev = c + 1; raw_csv->col_names.push_back(colName); h_num_cols++; } c++; } } else { size_t c = 0; while(c < first_row.size()) { if (first_row[c] == args->lineterminator) { h_num_cols++; break; } else if(first_row[c] == '\r' && (c+1L) < first_row.size() && first_row[c+1] == '\n'){ h_num_cols++; break; }else if (first_row[c] == args->delimiter) h_num_cols++; c++; } // assign column indexes as names if the header column is not present for (int i = 0; i<h_num_cols; i++) { std::string newColName = std::to_string(i); raw_csv->col_names.push_back(newColName); } } // Allocating a boolean array that will use to state if a column needs to read or filtered. raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (h_num_cols)); RMM_TRY( RMM_ALLOC((void**)&raw_csv->d_parseCol,(sizeof(bool) * (h_num_cols)),0 ) ); for (int i = 0; i<h_num_cols; i++) raw_csv->h_parseCol[i]=true; int h_dup_cols_removed = 0; // Looking for duplicates for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){ bool found_dupe = false; for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){ if (*it==*it2){ found_dupe=true; break; } } if(found_dupe){ int count=1; for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){ if (*it==*it2){ if(args->mangle_dupe_cols){ // Replace all the duplicates of column X with X.1,X.2,... First appearance stays as X. std::string newColName = *it2; newColName += "." + std::to_string(count); count++; *it2 = newColName; } else{ // All duplicate fields will be ignored. int pos=std::distance(raw_csv->col_names.begin(), it2); raw_csv->h_parseCol[pos]=false; h_dup_cols_removed++; } } } } } raw_csv->num_actual_cols = h_num_cols; // Actual number of columns in the CSV file raw_csv->num_active_cols = h_num_cols-h_dup_cols_removed; // Number of fields that need to be processed based on duplicatation fields CUDA_TRY(hipMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (h_num_cols), hipMemcpyHostToDevice)); } else { raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (args->num_cols)); RMM_TRY( RMM_ALLOC((void**)&raw_csv->d_parseCol,(sizeof(bool) * (args->num_cols)),0 ) ); for (int i = 0; i<raw_csv->num_actual_cols; i++){ raw_csv->h_parseCol[i]=true; std::string col_name = args->names[i]; raw_csv->col_names.push_back(col_name); } CUDA_TRY(hipMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (args->num_cols), hipMemcpyHostToDevice)); } // User can give if (args->use_cols_int!=NULL || args->use_cols_char!=NULL){ if(args->use_cols_int!=NULL){ for (int i = 0; i<raw_csv->num_actual_cols; i++) raw_csv->h_parseCol[i]=false; for(int i=0; i < args->use_cols_int_len; i++){ int pos = args->use_cols_int[i]; raw_csv->h_parseCol[pos]=true; } raw_csv->num_active_cols = args->use_cols_int_len; }else{ for (int i = 0; i<raw_csv->num_actual_cols; i++) raw_csv->h_parseCol[i]=false; int countFound=0; for(int i=0; i < args->use_cols_char_len; i++){ std::string colName(args->use_cols_char[i]); for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){ if(colName==*it){ countFound++; int pos=std::distance(raw_csv->col_names.begin(), it); raw_csv->h_parseCol[pos]=true; break; } } } raw_csv->num_active_cols = countFound; } CUDA_TRY(hipMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (raw_csv->num_actual_cols), hipMemcpyHostToDevice)); } if (raw_csv->header_row>=0) { raw_csv->num_records-=1; } //----------------------------------------------------------------------------- //--- done with host data if (args->input_data_form == gdf_csv_input_form::FILE_PATH) { close(fd); munmap(map_data, map_size); } //----------------------------------------------------------------------------- //--- Auto detect types of the vectors if(args->dtype==NULL){ if (raw_csv->num_records == 0) { checkError(GDF_INVALID_API_CALL, "read_csv: no data available for data type inference"); } column_data_t *d_ColumnData,*h_ColumnData; h_ColumnData = (column_data_t*)malloc(sizeof(column_data_t) * (raw_csv->num_active_cols)); RMM_TRY( RMM_ALLOC((void**)&d_ColumnData,(sizeof(column_data_t) * (raw_csv->num_active_cols)),0 ) ); CUDA_TRY( hipMemset(d_ColumnData, 0, (sizeof(column_data_t) * (raw_csv->num_active_cols)) ) ) ; launch_dataTypeDetection(raw_csv, d_ColumnData); CUDA_TRY( hipMemcpy(h_ColumnData,d_ColumnData, sizeof(column_data_t) * (raw_csv->num_active_cols), hipMemcpyDeviceToHost)); vector<gdf_dtype> d_detectedTypes; // host: array of dtypes (since gdf_columns are not created until end) raw_csv->dtypes.clear(); for(int col = 0; col < raw_csv->num_active_cols; col++){ unsigned long long countInt = h_ColumnData[col].countInt8+h_ColumnData[col].countInt16+ h_ColumnData[col].countInt32+h_ColumnData[col].countInt64; if (h_ColumnData[col].countNULL == raw_csv->num_records){ d_detectedTypes.push_back(GDF_INT8); // Entire column is NULL. Allocating the smallest amount of memory } else if(h_ColumnData[col].countString>0L){ d_detectedTypes.push_back(GDF_CATEGORY); // For auto-detection, we are currently not supporting strings. } else if(h_ColumnData[col].countDateAndTime>0L){ d_detectedTypes.push_back(GDF_DATE64); } else if(h_ColumnData[col].countFloat > 0L || (h_ColumnData[col].countFloat==0L && countInt >0L && h_ColumnData[col].countNULL >0L) ) { // The second condition has been added to conform to PANDAS which states that a colum of // integers with a single NULL record need to be treated as floats. d_detectedTypes.push_back(GDF_FLOAT64); } else { d_detectedTypes.push_back(GDF_INT64); } } raw_csv->dtypes=d_detectedTypes; free(h_ColumnData); RMM_TRY( RMM_FREE( d_ColumnData, 0 ) ); } else{ for ( int x = 0; x < raw_csv->num_actual_cols; x++) { std::string temp_type = args->dtype[x]; gdf_dtype col_dtype = convertStringToDtype( temp_type ); if (col_dtype == GDF_invalid) return GDF_UNSUPPORTED_DTYPE; raw_csv->dtypes.push_back(col_dtype); } } //----------------------------------------------------------------------------- //--- allocate space for the results gdf_column **cols = (gdf_column **)malloc( sizeof(gdf_column *) * raw_csv->num_active_cols); void **d_data,**h_data; gdf_valid_type **d_valid,**h_valid; unsigned long long *d_valid_count; gdf_dtype *d_dtypes,*h_dtypes; h_dtypes = (gdf_dtype*)malloc ( sizeof(gdf_dtype)* (raw_csv->num_active_cols)); h_data = (void**)malloc ( sizeof(void*)* (raw_csv->num_active_cols)); h_valid = (gdf_valid_type**)malloc ( sizeof(gdf_valid_type*)* (raw_csv->num_active_cols)); RMM_TRY( RMM_ALLOC((void**)&d_dtypes, (sizeof(gdf_dtype) * raw_csv->num_active_cols), 0 ) ); RMM_TRY( RMM_ALLOC((void**)&d_data, (sizeof(void *) * raw_csv->num_active_cols), 0 ) ); RMM_TRY( RMM_ALLOC((void**)&d_valid, (sizeof(gdf_valid_type *) * raw_csv->num_active_cols), 0 ) ); RMM_TRY( RMM_ALLOC((void**)&d_valid_count, (sizeof(unsigned long long) * raw_csv->num_active_cols), 0 ) ); CUDA_TRY( hipMemset(d_valid_count, 0, (sizeof(unsigned long long) * raw_csv->num_active_cols)) ); int stringColCount=0; for (int col = 0; col < raw_csv->num_active_cols; col++) { if(raw_csv->dtypes[col]==gdf_dtype::GDF_STRING) stringColCount++; } string_pair **h_str_cols = NULL, **d_str_cols = NULL; if (stringColCount > 0 ) { h_str_cols = (string_pair**) malloc ((sizeof(string_pair *) * stringColCount)); RMM_TRY( RMM_ALLOC((void**)&d_str_cols, (sizeof(string_pair *) * stringColCount), 0) ); for (int col = 0; col < stringColCount; col++) { RMM_TRY( RMM_ALLOC((void**)(h_str_cols + col), sizeof(string_pair) * (raw_csv->num_records), 0) ); } CUDA_TRY(hipMemcpy(d_str_cols, h_str_cols, sizeof(string_pair *) * stringColCount, hipMemcpyHostToDevice)); } for (int acol = 0,col=-1; acol < raw_csv->num_actual_cols; acol++) { if(raw_csv->h_parseCol[acol]==false) continue; col++; gdf_column *gdf = (gdf_column *)malloc(sizeof(gdf_column) * 1); gdf->size = raw_csv->num_records; gdf->dtype = raw_csv->dtypes[col]; gdf->null_count = 0; // will be filled in later //--- column name std::string str = raw_csv->col_names[acol]; int len = str.length() + 1; gdf->col_name = (char *)malloc(sizeof(char) * len); memcpy(gdf->col_name, str.c_str(), len); gdf->col_name[len -1] = '\0'; allocateGdfDataSpace(gdf); cols[col] = gdf; h_dtypes[col] = gdf->dtype; h_data[col] = gdf->data; h_valid[col] = gdf->valid; } CUDA_TRY( hipMemcpy(d_dtypes,h_dtypes, sizeof(gdf_dtype) * (raw_csv->num_active_cols), hipMemcpyHostToDevice)); CUDA_TRY( hipMemcpy(d_data,h_data, sizeof(void*) * (raw_csv->num_active_cols), hipMemcpyHostToDevice)); CUDA_TRY( hipMemcpy(d_valid,h_valid, sizeof(gdf_valid_type*) * (raw_csv->num_active_cols), hipMemcpyHostToDevice)); free(h_dtypes); free(h_valid); free(h_data); if (raw_csv->num_records != 0) { error = launch_dataConvertColumns(raw_csv, d_data, d_valid, d_dtypes, d_str_cols, d_valid_count); if (error != GDF_SUCCESS) { return error; } // Sync with the default stream, just in case create_from_index() is asynchronous hipStreamSynchronize(0); stringColCount=0; for (int col = 0; col < raw_csv->num_active_cols; col++) { gdf_column *gdf = cols[col]; if (gdf->dtype != gdf_dtype::GDF_STRING) continue; NVStrings* const stringCol = NVStrings::create_from_index(h_str_cols[stringColCount],size_t(raw_csv->num_records)); if ((raw_csv->quotechar != '\0') && (raw_csv->doublequote==true)) { // In PANDAS, default of enabling doublequote for two consecutive // quotechar in quote fields results in reduction to single const string quotechar(1, raw_csv->quotechar); const string doublequotechar(2, raw_csv->quotechar); gdf->data = stringCol->replace(doublequotechar.c_str(), quotechar.c_str()); NVStrings::destroy(stringCol); } else { gdf->data = stringCol; } RMM_TRY( RMM_FREE( h_str_cols [stringColCount], 0 ) ); stringColCount++; } vector<unsigned long long> h_valid_count(raw_csv->num_active_cols); CUDA_TRY( hipMemcpy(h_valid_count.data(), d_valid_count, sizeof(unsigned long long) * h_valid_count.size(), hipMemcpyDeviceToHost)); //--- set the null count for (size_t col = 0; col < h_valid_count.size(); col++) { cols[col]->null_count = raw_csv->num_records - h_valid_count[col]; } } // free up space that is no longer needed if (h_str_cols != NULL) free ( h_str_cols); free(raw_csv->h_parseCol); if (d_str_cols != NULL) RMM_TRY( RMM_FREE( d_str_cols, 0 ) ); RMM_TRY( RMM_FREE( d_valid, 0 ) ); RMM_TRY( RMM_FREE( d_valid_count, 0 ) ); RMM_TRY( RMM_FREE( d_dtypes, 0 ) ); RMM_TRY( RMM_FREE( d_data, 0 ) ); RMM_TRY( RMM_FREE( raw_csv->recStart, 0 ) ); RMM_TRY( RMM_FREE( raw_csv->d_parseCol, 0 ) ); RMM_TRY( RMM_FREE ( raw_csv->data, 0) ); args->data = cols; args->num_cols_out = raw_csv->num_active_cols; args->num_rows_out = raw_csv->num_records; delete raw_csv; return error; } /* * What is passed in is the data type as a string, need to convert that into gdf_dtype enum */ gdf_dtype convertStringToDtype(std::string &dtype) { if (dtype.compare( "str") == 0) return GDF_STRING; if (dtype.compare( "date") == 0) return GDF_DATE64; if (dtype.compare( "date32") == 0) return GDF_DATE32; if (dtype.compare( "date64") == 0) return GDF_DATE64; if (dtype.compare( "timestamp") == 0) return GDF_TIMESTAMP; if (dtype.compare( "category") == 0) return GDF_CATEGORY; if (dtype.compare( "float") == 0) return GDF_FLOAT32; if (dtype.compare( "float32") == 0) return GDF_FLOAT32; if (dtype.compare( "float64") == 0) return GDF_FLOAT64; if (dtype.compare( "double") == 0) return GDF_FLOAT64; if (dtype.compare( "short") == 0) return GDF_INT16; if (dtype.compare( "int") == 0) return GDF_INT32; if (dtype.compare( "int32") == 0) return GDF_INT32; if (dtype.compare( "int64") == 0) return GDF_INT64; if (dtype.compare( "long") == 0) return GDF_INT64; return GDF_invalid; } /**---------------------------------------------------------------------------* * @brief Infer the compression type from the compression parameter and * the input file name * * Returns "none" if the input is not compressed. * * @param[in] compression_arg Input string that is potentially describing * the compression type. Can also be nullptr, "none", or "infer" * @param[in] filepath path + name of the input file * @param[out] compression_type String describing the inferred compression type * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type) { if (compression_arg && 0 == strcasecmp(compression_arg, "none")) { compression_arg = nullptr; } if (compression_arg && 0 == strcasecmp(compression_arg, "infer")) { const char *file_ext = strrchr(filepath, '.'); compression_arg = nullptr; if (file_ext) { if (!strcasecmp(file_ext, ".gz")) compression_arg = "gzip"; else if (!strcasecmp(file_ext, ".zip")) compression_arg = "zip"; else if (!strcasecmp(file_ext, ".bz2")) compression_arg = "bz2"; else if (!strcasecmp(file_ext, ".xz")) compression_arg = "xz"; else { // TODO: return error here } } } compression_type = compression_arg == nullptr? "none":string(compression_arg); return GDF_SUCCESS; } /**---------------------------------------------------------------------------* * @brief Uncompresses the input data and stores the allocated result into * a vector. * * @param[in] h_data Pointer to the csv data in host memory * @param[in] num_bytes Size of the input data, in bytes * @param[in] compression String describing the compression type * @param[out] h_uncomp_data Vector containing the output uncompressed data * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ gdf_error getUncompressedHostData(const char* h_data, size_t num_bytes, const string& compression, vector<char>& h_uncomp_data) { int comp_type = IO_UNCOMP_STREAM_TYPE_INFER; if (compression == "gzip") comp_type = IO_UNCOMP_STREAM_TYPE_GZIP; else if (compression == "zip") comp_type = IO_UNCOMP_STREAM_TYPE_ZIP; else if (compression == "bz2") comp_type = IO_UNCOMP_STREAM_TYPE_BZIP2; else if (compression == "xz") comp_type = IO_UNCOMP_STREAM_TYPE_XZ; return io_uncompress_single_h2d(h_data, num_bytes, comp_type, h_uncomp_data); } /**---------------------------------------------------------------------------* * @brief Uploads the relevant segment of the input csv data onto the GPU. * * Only rows that need to be read are copied to the GPU, based on parameters * like nrows, skipheader, skipfooter. * Also updates the array of record starts to match the device data offset. * * @param[in] h_uncomp_data Pointer to the uncompressed csv data in host memory * @param[in] h_uncomp_size Size of the input data, in bytes * @param[in,out] csvData Structure containing the csv parsing parameters * and intermediate results * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ gdf_error uploadDataToDevice(const char* h_uncomp_data, size_t h_uncomp_size, raw_csv_t * raw_csv) { vector<cu_recstart_t> h_rec_starts(raw_csv->num_records + 1); CUDA_TRY( hipMemcpy(h_rec_starts.data(), raw_csv->recStart, sizeof(cu_recstart_t) * h_rec_starts.size(), hipMemcpyDefault)); // Exclude the rows user chose to skip at the start of the file const gdf_size_type first_row = raw_csv->skiprows + max(raw_csv->header_row, 0l); if (raw_csv->num_records > first_row) { raw_csv->num_records = raw_csv->num_records - (long)first_row; } else { checkError(GDF_FILE_ERROR, "Number of records is too small for the specified skiprows and header parameters"); } // Restrict the rows to nrows if nrows is smaller than the remaining number of rows if (raw_csv->nrows >= 0 && (gdf_size_type)raw_csv->nrows < raw_csv->num_records) { raw_csv->num_records = (gdf_size_type)raw_csv->nrows; } // Exclude the rows user chose to skip at the end of the file if (raw_csv->skipfooter != 0) { raw_csv->num_records = gdf_size_type(max(raw_csv->num_records - raw_csv->skipfooter, gdf_size_type{0})); } // Have to at least read the header row if (raw_csv->header_row >= 0 && raw_csv->num_records == 0) raw_csv->num_records = 1; // If specified, header row will always be the first row in the GPU data raw_csv->header_row = min(raw_csv->header_row, 0l); const auto start_offset = h_rec_starts[first_row]; const auto end_offset = h_rec_starts[first_row + raw_csv->num_records] - 1; raw_csv->num_bytes = end_offset - start_offset + 1; assert(raw_csv->num_bytes <= h_uncomp_size); raw_csv->num_bits = (raw_csv->num_bytes + 63) / 64; // Update the record starts to match the device data (skip missing records, fix offset) for (gdf_size_type i = first_row; i <= first_row + raw_csv->num_records; ++i) h_rec_starts[i] -= start_offset; RMM_TRY(RMM_REALLOC(&raw_csv->recStart, sizeof(cu_recstart_t) * (raw_csv->num_records + 1), 0)); CUDA_TRY( hipMemcpy(raw_csv->recStart, h_rec_starts.data() + first_row, sizeof(cu_recstart_t) * (raw_csv->num_records + 1), hipMemcpyDefault)); // Allocate and copy to the GPU RMM_TRY(RMM_ALLOC ((void**)&raw_csv->data, (sizeof(char) * raw_csv->num_bytes), 0)); CUDA_TRY(hipMemcpy(raw_csv->data, h_uncomp_data + start_offset, raw_csv->num_bytes, hipMemcpyHostToDevice)); return GDF_SUCCESS; } /* * For each of the gdf_cvolumns, create the on-device space. the on-host fields should already be filled in */ gdf_error allocateGdfDataSpace(gdf_column *gdf) { long N = gdf->size; long num_bitmaps = (N + 31) / 8; // 8 bytes per bitmap //--- allocate space for the valid bitmaps RMM_TRY( RMM_ALLOC((void**)&gdf->valid, (sizeof(gdf_valid_type) * num_bitmaps), 0) ); CUDA_TRY(hipMemset(gdf->valid, 0, (sizeof(gdf_valid_type) * num_bitmaps)) ); int elementSize=0; //--- Allocate space for the data switch(gdf->dtype) { case gdf_dtype::GDF_INT8: elementSize = sizeof(int8_t); break; case gdf_dtype::GDF_INT16: elementSize = sizeof(int16_t); break; case gdf_dtype::GDF_INT32: elementSize = sizeof(int32_t); break; case gdf_dtype::GDF_INT64: elementSize = sizeof(int64_t); break; case gdf_dtype::GDF_FLOAT32: elementSize = sizeof(float); break; case gdf_dtype::GDF_FLOAT64: elementSize = sizeof(double); break; case gdf_dtype::GDF_DATE32: elementSize = sizeof(gdf_date32); break; case gdf_dtype::GDF_DATE64: elementSize = sizeof(gdf_date64); break; case gdf_dtype::GDF_TIMESTAMP: elementSize = sizeof(int64_t); break; case gdf_dtype::GDF_CATEGORY: elementSize = sizeof(gdf_category); break; case gdf_dtype::GDF_STRING: return gdf_error::GDF_SUCCESS; // Memory for gdf->data allocated by string class eventually default: return GDF_UNSUPPORTED_DTYPE; } RMM_TRY( RMM_ALLOC((void**)&gdf->data, elementSize * N, 0) ); return gdf_error::GDF_SUCCESS; } //---------------------------------------------------------------------------------------------------------------- // CUDA Kernels //---------------------------------------------------------------------------------------------------------------- /**---------------------------------------------------------------------------* * @brief Counts the number of rows in the input csv file. * * Does not load the entire file into the GPU memory at any time, so it can * be used to parse large files. * Does not take quotes into consideration, so it will return extra rows * if the line terminating characters are present within quotes. * Because of this the result should be postprocessed to remove * the fake line endings. * * @param[in] h_data Pointer to the csv data in host memory * @param[in] h_size Size of the input data, in bytes * @param[in] terminator Line terminator character * @param[in] quote Quote character * @param[out] rec_cnt The resulting number of rows (records) * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ gdf_error launch_countRecords(const char* h_data, size_t h_size, char terminator, char quote, gdf_size_type& rec_cnt) { const size_t chunk_count = (h_size + max_chunk_bytes - 1) / max_chunk_bytes; vector<cu_reccnt_t> h_cnts(chunk_count); cu_reccnt_t* d_cnts = nullptr; RMM_TRY(RMM_ALLOC (&d_cnts, sizeof(cu_reccnt_t)* chunk_count, 0)); CUDA_TRY(hipMemset(d_cnts, 0, sizeof(cu_reccnt_t)* chunk_count)); char* d_chunk = nullptr; // Allocate extra byte in case \r\n is at the chunk border RMM_TRY(RMM_ALLOC (&d_chunk, max_chunk_bytes + 1, 0)); int blockSize; // suggested thread count to use int minGridSize; // minimum block count required CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, countRecords)); for (size_t ci = 0; ci < chunk_count; ++ci) { const auto h_chunk = h_data + ci * max_chunk_bytes; const auto chunk_bytes = ::min((size_t)(h_size - ci * max_chunk_bytes), max_chunk_bytes); const auto chunk_bits = (chunk_bytes + 63) / 64; // Copy chunk to device. Copy extra byte if not last chunk CUDA_TRY(hipMemcpy(d_chunk, h_chunk, ci < (chunk_count - 1)?chunk_bytes:chunk_bytes + 1, hipMemcpyDefault)); const int gridSize = (chunk_bits + blockSize - 1) / blockSize; hipLaunchKernelGGL(( countRecords) , dim3(gridSize), dim3(blockSize) , 0, 0, d_chunk, terminator, quote, chunk_bytes, chunk_bits, &d_cnts[ci] ); } CUDA_TRY(hipMemcpy(h_cnts.data(), d_cnts, chunk_count*sizeof(cu_reccnt_t), hipMemcpyDefault)); RMM_TRY( RMM_FREE(d_chunk, 0) ); RMM_TRY( RMM_FREE(d_cnts, 0) ); CUDA_TRY(hipGetLastError()); rec_cnt = std::accumulate(h_cnts.begin(), h_cnts.end(), gdf_size_type(0)); return GDF_SUCCESS; } /**---------------------------------------------------------------------------* * @brief CUDA kernel that counts the number of rows in the given * file segment, based on the location of line terminators. * * @param[in] data Device memory pointer to the csv data, * potentially a chunk of the whole file * @param[in] terminator Line terminator character * @param[in] quotechar Quote character * @param[in] num_bytes Number of bytes in the input data * @param[in] num_bits Number of 'bits' in the input data. Each 'bit' is * processed by a separate CUDA thread * @param[in,out] num_records Device memory pointer to the number of found rows * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ __global__ void countRecords(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, cu_reccnt_t* num_records) { // thread IDs range per block, so also need the block id long tid = threadIdx.x + (blockDim.x * blockIdx.x); if (tid >= num_bits) return; // data ID is a multiple of 64 long did = tid * 64L; char *raw = (data + did); long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did); // process the data cu_reccnt_t tokenCount = 0; for (long x = 0; x < byteToProcess; x++) { // Scan and log records. If quotations are enabled, then also log quotes // for a postprocess ignore, as the chunk here has limited visibility. if ((raw[x] == terminator) || (quotechar != '\0' && raw[x] == quotechar)) { tokenCount++; } else if (raw[x] == '\r' && raw[x +1] == '\n') { x++; tokenCount++; } } atomicAdd(num_records, tokenCount); } /**---------------------------------------------------------------------------* * @brief Finds the start of each row (record) in the given file, based on * the location of line terminators. The offset of each found row is stored * in the recStart data member of the csvData parameter. * * Does not load the entire file into the GPU memory at any time, so it can * be used to parse large files. * Does not take quotes into consideration, so it will return extra rows * if the line terminating characters are present within quotes. * Because of this the result should be postprocessed to remove * the fake line endings. * * @param[in] h_data Pointer to the csv data in host memory * @param[in] h_size Size of the input data, in bytes * @param[in,out] csvData Structure containing the csv parsing parameters * and intermediate results * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ gdf_error launch_storeRecordStart(const char* h_data, size_t h_size, raw_csv_t * csvData) { char* d_chunk = nullptr; // Allocate extra byte in case \r\n is at the chunk border RMM_TRY(RMM_ALLOC (&d_chunk, max_chunk_bytes + 1, 0)); cu_reccnt_t* d_num_records; RMM_TRY(RMM_ALLOC((void**)&d_num_records, sizeof(cu_reccnt_t), 0) ); // set the first record starting a zero instead of setting it in the kernel const auto one = 1ull; CUDA_TRY(hipMemcpy(d_num_records, &one, sizeof(cu_reccnt_t), hipMemcpyDefault)); CUDA_TRY(hipMemset(csvData->recStart, 0ull, (sizeof(cu_recstart_t)))); int blockSize; // suggested thread count to use int minGridSize; // minimum block count required CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, storeRecordStart) ); const size_t chunk_count = (h_size + max_chunk_bytes - 1) / max_chunk_bytes; for (size_t ci = 0; ci < chunk_count; ++ci) { const auto chunk_offset = ci * max_chunk_bytes; const auto h_chunk = h_data + chunk_offset; const auto chunk_bytes = ::min((size_t)(h_size - ci * max_chunk_bytes), max_chunk_bytes); const auto chunk_bits = (chunk_bytes + 63) / 64; // Copy chunk to device. Copy extra byte if not last chunk CUDA_TRY(hipMemcpy(d_chunk, h_chunk, ci < (chunk_count - 1)?chunk_bytes:chunk_bytes + 1, hipMemcpyDefault)); const int gridSize = (chunk_bits + blockSize - 1) / blockSize; hipLaunchKernelGGL(( storeRecordStart) , dim3(gridSize), dim3(blockSize) , 0, 0, d_chunk, chunk_offset, csvData->terminator, csvData->quotechar, chunk_bytes, chunk_bits, d_num_records, csvData->recStart ); } RMM_TRY( RMM_FREE( d_num_records, 0 ) ); RMM_TRY( RMM_FREE( d_chunk, 0 ) ); CUDA_TRY( hipGetLastError() ); return GDF_SUCCESS; } /**---------------------------------------------------------------------------* * @brief CUDA kernel that finds the start of each row (record) in the given * file segment, based on the location of line terminators. * * The offset of each found row is stored in a device memory array. * The kernel operate on a segment (chunk) of the csv file. * * @param[in] data Device memory pointer to the csv data, * potentially a chunk of the whole file * @param[in] chunk_offset Offset of the data pointer from the start of the file * @param[in] terminator Line terminator character * @param[in] quotechar Quote character * @param[in] num_bytes Number of bytes in the input data * @param[in] num_bits Number of 'bits' in the input data. Each 'bit' is * processed by a separate CUDA thread * @param[in,out] num_records Device memory pointer to the number of found rows * @param[out] recStart device memory array containing the offset of each record * * @return void *---------------------------------------------------------------------------**/ __global__ void storeRecordStart(char *data, size_t chunk_offset, const char terminator, const char quotechar, long num_bytes, long num_bits, cu_reccnt_t* num_records, cu_recstart_t* recStart) { // thread IDs range per block, so also need the block id long tid = threadIdx.x + (blockDim.x * blockIdx.x); if ( tid >= num_bits) return; // data ID - multiple of 64 long did = tid * 64L; char *raw = (data + did); long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did); // process the data for (long x = 0; x < byteToProcess; x++) { // Scan and log records. If quotations are enabled, then also log quotes // for a postprocess ignore, as the chunk here has limited visibility. if ((raw[x] == terminator) || (quotechar != '\0' && raw[x] == quotechar)) { const auto pos = atomicAdd(num_records, 1ull); recStart[pos] = did + chunk_offset + x + 1; } else if (raw[x] == '\r' && (x+1L)<num_bytes && raw[x +1] == '\n') { x++; const auto pos = atomicAdd(num_records, 1ull); recStart[pos] = did + chunk_offset + x + 1; } } } //---------------------------------------------------------------------------------------------------------------- gdf_error launch_dataConvertColumns(raw_csv_t *raw_csv, void **gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes,string_pair **str_cols, unsigned long long *num_valid) { int blockSize; // suggested thread count to use int minGridSize; // minimum block count required CUDA_TRY( hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, convertCsvToGdf) ); // Calculate actual block count to use based on records count int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize; parsing_opts_t opts; opts.delimiter = raw_csv->delimiter; opts.terminator = raw_csv->terminator; opts.quotechar = raw_csv->quotechar; opts.keepquotes = raw_csv->keepquotes; opts.decimal = raw_csv->decimal; opts.thousands = raw_csv->thousands; opts.trueValues = thrust::raw_pointer_cast(raw_csv->d_trueValues.data()); opts.trueValuesCount = raw_csv->d_trueValues.size(); opts.falseValues = thrust::raw_pointer_cast(raw_csv->d_falseValues.data()); opts.falseValuesCount = raw_csv->d_falseValues.size(); auto first_data_rec_start = raw_csv->recStart; if (raw_csv->header_row >= 0) { // skip the header row if present ++first_data_rec_start; } hipLaunchKernelGGL(( convertCsvToGdf) , dim3(gridSize), dim3(blockSize) , 0, 0, raw_csv->data, opts, raw_csv->num_records, raw_csv->num_actual_cols, raw_csv->d_parseCol, first_data_rec_start, d_dtypes, gdf, valid, str_cols, raw_csv->dayfirst, num_valid ); CUDA_TRY( hipGetLastError() ); return GDF_SUCCESS; } /* * Data is processed in one row\record at a time - so the number of total threads (tid) is equal to the number of rows. * */ __global__ void convertCsvToGdf( char *raw_csv, const parsing_opts_t opts, gdf_size_type num_records, int num_columns, bool *parseCol, cu_recstart_t *recStart, gdf_dtype *dtype, void **gdf_data, gdf_valid_type **valid, string_pair **str_cols, bool dayfirst, unsigned long long *num_valid ) { // thread IDs range per block, so also need the block id long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array // we can have more threads than data, make sure we are not past the end of the data if ( rec_id >= num_records) return; long start = recStart[rec_id]; long stop = recStart[rec_id + 1]; long pos = start; int col = 0; int actual_col = 0; int stringCol = 0; bool quotation = false; while(col<num_columns){ if(start>stop) break; while(true){ // Use simple logic to ignore control chars between any quote seq // Handles nominal cases including doublequotes within quotes, but // may not output exact failures as PANDAS for malformed fields if(raw_csv[pos] == opts.quotechar){ quotation = !quotation; } else if(quotation==false){ if(raw_csv[pos] == opts.delimiter){ break; } else if(raw_csv[pos] == opts.terminator){ break; } else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1] == '\n')){ stop--; break; } } if(pos>=stop) break; pos++; } if(parseCol[col]==true){ long tempPos=pos-1; // Modify start & end to ignore whitespace and quotechars if(dtype[actual_col] != gdf_dtype::GDF_CATEGORY && dtype[actual_col] != gdf_dtype::GDF_STRING){ adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos, opts.quotechar); } if(start<=(tempPos)) { // Empty strings are not legal values switch(dtype[actual_col]) { case gdf_dtype::GDF_INT8: { int8_t *gdf_out = (int8_t *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoInt<int8_t>(raw_csv, start, tempPos, opts.thousands); if(isBooleanValue(gdf_out[rec_id], opts.trueValues, opts.trueValuesCount)==true){ gdf_out[rec_id] = 1; }else if(isBooleanValue(gdf_out[rec_id], opts.falseValues, opts.falseValuesCount)==true){ gdf_out[rec_id] = 0; } } break; case gdf_dtype::GDF_INT16: { int16_t *gdf_out = (int16_t *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoInt<int16_t>(raw_csv, start, tempPos, opts.thousands); if(isBooleanValue(gdf_out[rec_id], opts.trueValues, opts.trueValuesCount)==true){ gdf_out[rec_id] = 1; }else if(isBooleanValue(gdf_out[rec_id], opts.falseValues, opts.falseValuesCount)==true){ gdf_out[rec_id] = 0; } } break; case gdf_dtype::GDF_INT32: { int32_t *gdf_out = (int32_t *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoInt<int32_t>(raw_csv, start, tempPos, opts.thousands); if(isBooleanValue(gdf_out[rec_id], opts.trueValues, opts.trueValuesCount)==true){ gdf_out[rec_id] = 1; }else if(isBooleanValue(gdf_out[rec_id], opts.falseValues, opts.falseValuesCount)==true){ gdf_out[rec_id] = 0; } } break; case gdf_dtype::GDF_INT64: { int64_t *gdf_out = (int64_t *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos, opts.thousands); if(isBooleanValue(gdf_out[rec_id], opts.trueValues, opts.trueValuesCount)==true){ gdf_out[rec_id] = 1; }else if(isBooleanValue(gdf_out[rec_id], opts.falseValues, opts.falseValuesCount)==true){ gdf_out[rec_id] = 0; } } break; case gdf_dtype::GDF_FLOAT32: { float *gdf_out = (float *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoFloat<float>(raw_csv, start, tempPos, opts.decimal, opts.thousands); } break; case gdf_dtype::GDF_FLOAT64: { double *gdf_out = (double *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoFloat<double>(raw_csv, start, tempPos, opts.decimal, opts.thousands); } break; case gdf_dtype::GDF_DATE32: { gdf_date32 *gdf_out = (gdf_date32 *)gdf_data[actual_col]; gdf_out[rec_id] = parseDateFormat(raw_csv, start, tempPos, dayfirst); } break; case gdf_dtype::GDF_DATE64: { gdf_date64 *gdf_out = (gdf_date64 *)gdf_data[actual_col]; gdf_out[rec_id] = parseDateTimeFormat(raw_csv, start, tempPos, dayfirst); } break; case gdf_dtype::GDF_TIMESTAMP: { int64_t *gdf_out = (int64_t *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos, opts.thousands); } break; case gdf_dtype::GDF_CATEGORY: { gdf_category *gdf_out = (gdf_category *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoHash(raw_csv, start, pos, HASH_SEED); } break; case gdf_dtype::GDF_STRING: { long end = pos; if(opts.keepquotes==false){ if((raw_csv[start] == opts.quotechar) && (raw_csv[end-1] == opts.quotechar)){ start++; end--; } } str_cols[stringCol][rec_id].first = raw_csv+start; str_cols[stringCol][rec_id].second = size_t(end-start); stringCol++; } break; default: break; } // set the valid bitmap - all bits were set to 0 to start int bitmapIdx = whichBitmap(rec_id); // which bitmap int bitIdx = whichBit(rec_id); // which bit - over an 8-bit index setBit(valid[actual_col]+bitmapIdx, bitIdx); // This is done with atomics atomicAdd((unsigned long long int*)&num_valid[actual_col],(unsigned long long int)1); } else if(dtype[actual_col]==gdf_dtype::GDF_STRING){ str_cols[stringCol][rec_id].first = NULL; str_cols[stringCol][rec_id].second = 0; stringCol++; } actual_col++; } pos++; start=pos; col++; } } //---------------------------------------------------------------------------------------------------------------- gdf_error launch_dataTypeDetection( raw_csv_t * raw_csv, column_data_t* d_columnData) { int blockSize; // suggested thread count to use int minGridSize; // minimum block count required CUDA_TRY( hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dataTypeDetection) ); // Calculate actual block count to use based on records count int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize; parsing_opts_t opts; opts.delimiter = raw_csv->delimiter; opts.terminator = raw_csv->terminator; opts.quotechar = raw_csv->quotechar; opts.keepquotes = raw_csv->keepquotes; opts.trueValues = thrust::raw_pointer_cast(raw_csv->d_trueValues.data()); opts.trueValuesCount = raw_csv->d_trueValues.size(); opts.falseValues = thrust::raw_pointer_cast(raw_csv->d_falseValues.data()); opts.falseValuesCount = raw_csv->d_falseValues.size(); auto first_data_rec_start = raw_csv->recStart; if (raw_csv->header_row >= 0) { // skip the header row if present ++first_data_rec_start; } hipLaunchKernelGGL(( dataTypeDetection) , dim3(gridSize), dim3(blockSize) , 0, 0, raw_csv->data, opts, raw_csv->num_records, raw_csv->num_actual_cols, raw_csv->d_parseCol, first_data_rec_start, d_columnData ); CUDA_TRY( hipGetLastError() ); return GDF_SUCCESS; } /* */ __global__ void dataTypeDetection( char *raw_csv, const parsing_opts_t opts, gdf_size_type num_records, int num_columns, bool *parseCol, cu_recstart_t *recStart, column_data_t* d_columnData ) { // thread IDs range per block, so also need the block id long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array // we can have more threads than data, make sure we are not past the end of the data if ( rec_id >= num_records) return; long start = recStart[rec_id]; long stop = recStart[rec_id + 1]; long pos = start; int col = 0; int actual_col = 0; bool quotation = false; // Going through all the columns of a given record while(col<num_columns){ if(start>stop) break; // Finding the breaking point for each column while(true){ // Use simple logic to ignore control chars between any quote seq // Handles nominal cases including doublequotes within quotes, but // may not output exact failures as PANDAS for malformed fields if(raw_csv[pos] == opts.quotechar){ quotation = !quotation; } else if(quotation==false){ if(raw_csv[pos] == opts.delimiter){ break; } else if(raw_csv[pos] == opts.terminator){ break; } else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1] == '\n')){ stop--; break; } } if(pos>=stop) break; pos++; } // Checking if this is a column that the user wants --- user can filter columns if(parseCol[col]==true){ long tempPos=pos-1; // Checking if the record is NULL if(start>(tempPos)){ atomicAdd(& d_columnData[actual_col].countNULL, 1L); pos++; start=pos; col++; actual_col++; continue; } long countNumber=0; long countDecimal=0; long countSlash=0; long countDash=0; long countColon=0; long countString=0; // Modify start & end to ignore whitespace and quotechars // This could possibly result in additional empty fields adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos); long strLen=tempPos-start+1; for(long startPos=start; startPos<=tempPos; startPos++){ if(raw_csv[startPos]>= '0' && raw_csv[startPos] <= '9'){ countNumber++; continue; } // Looking for unique characters that will help identify column types. switch (raw_csv[startPos]){ case '.': countDecimal++;break; case '-': countDash++; break; case '/': countSlash++;break; case ':': countColon++;break; default: countString++; break; } } if(strLen==0){ // Removed spaces ' ' in the pre-processing and thus we can have an empty string. atomicAdd(& d_columnData[actual_col].countNULL, 1L); } // Integers have to have the length of the string or can be off by one if they start with a minus sign else if(countNumber==(strLen) || ( strLen>1 && countNumber==(strLen-1) && raw_csv[start]=='-') ){ // Checking to see if we the integer value requires 8,16,32,64 bits. // This will allow us to allocate the exact amount of memory. int64_t value = convertStrtoInt<int64_t>(raw_csv, start, tempPos, opts.thousands); if (isBooleanValue<int32_t>(value, opts.trueValues, opts.trueValuesCount) || isBooleanValue<int32_t>(value, opts.falseValues, opts.falseValuesCount)){ atomicAdd(& d_columnData[actual_col].countInt8, 1L); } else if(value >= (1L<<31)){ atomicAdd(& d_columnData[actual_col].countInt64, 1L); } else if(value >= (1L<<15)){ atomicAdd(& d_columnData[actual_col].countInt32, 1L); } else if(value >= (1L<<7)){ atomicAdd(& d_columnData[actual_col].countInt16, 1L); } else{ atomicAdd(& d_columnData[actual_col].countInt8, 1L); } } // Floating point numbers are made up of numerical strings, have to have a decimal sign, and can have a minus sign. else if((countNumber==(strLen-1) && countDecimal==1) || (strLen>2 && countNumber==(strLen-2) && raw_csv[start]=='-')){ atomicAdd(& d_columnData[actual_col].countFloat, 1L); } // The date-time field cannot have more than 3 strings. As such if an entry has more than 3 string characters, it is not // a data-time field. Also, if a string has multiple decimals, then is not a legit number. else if(countString > 3 || countDecimal > 1){ atomicAdd(& d_columnData[actual_col].countString, 1L); } else { // A date field can have either one or two '-' or '\'. A legal combination will only have one of them. // To simplify the process of auto column detection, we are not covering all the date-time formation permutations. if((countDash>0 && countDash<=2 && countSlash==0)|| (countDash==0 && countSlash>0 && countSlash<=2) ){ if((countColon<=2)){ atomicAdd(& d_columnData[actual_col].countDateAndTime, 1L); } else{ atomicAdd(& d_columnData[actual_col].countString, 1L); } } // Default field is string type. else{ atomicAdd(& d_columnData[actual_col].countString, 1L); } } actual_col++; } pos++; start=pos; col++; } } //---------------------------------------------------------------------------------------------------------------- /* * Return which bit is set * x is the occurrence: 1 = first, 2 = seconds, ... */ __device__ int findSetBit(int tid, long num_bits, uint64_t *r_bits, int x) { int idx = tid; if ( x == 0 ) return -1; int withinBitCount = 0; int offset = 0; int found = 0; uint64_t bitmap = r_bits[idx]; while (found != x) { if(bitmap == 0) { idx++; if (idx >= num_bits) return -1; bitmap = r_bits[idx]; offset += 64; withinBitCount = 0; } if ( bitmap & 1 ) { found++; //found a set bit } bitmap >>= 1; ++withinBitCount; } offset += withinBitCount -1; return offset; }
9f599159cfe28e6aa4c808a511f36e0cdd05cded.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file csv-reader.cu code to read csv data * * CSV Reader */ #include <cuda_runtime.h> #include <iostream> #include <vector> #include <string> #include <stdio.h> #include <numeric> #include <iomanip> #include <unordered_map> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <thrust/scan.h> #include <thrust/reduce.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include "type_conversion.cuh" #include "datetime_parser.cuh" #include "cudf.h" #include "utilities/error_utils.h" #include "rmm/rmm.h" #include "rmm/thrust_rmm_allocator.h" #include "io/comp/io_uncomp.h" #include "NVStrings.h" constexpr int32_t HASH_SEED = 33; constexpr size_t max_chunk_bytes = 64*1024*1024; // 64MB using std::vector; using std::string; using cu_reccnt_t = unsigned long long int; using cu_recstart_t = unsigned long long int; //-- define the structure for raw data handling - for internal use typedef struct raw_csv_ { char * data; // on-device: the raw unprocessed CSV data - loaded as a large char * array cu_recstart_t* recStart; // on-device: Starting position of the records. char delimiter; // host: the delimiter char terminator; // host: the line terminator char quotechar; // host: the quote character bool keepquotes; // host: indicates to keep the start and end quotechar bool doublequote; // host: indicates to interpret two consecutive quotechar as a single long num_bytes; // host: the number of bytes in the data long num_bits; // host: the number of 64-bit bitmaps (different than valid) gdf_size_type num_records; // host: number of records loaded into device memory, and then number of records to read // int num_cols; // host: number of columns int num_active_cols; // host: number of columns that will be return to user. int num_actual_cols; // host: number of columns in the file --- based on the number of columns in header vector<gdf_dtype> dtypes; // host: array of dtypes (since gdf_columns are not created until end) vector<string> col_names; // host: array of column names bool* h_parseCol; // host : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out. bool* d_parseCol; // device : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out. long header_row; // Row id of the header bool dayfirst; char decimal; char thousands; gdf_size_type nrows; // number of rows of file to read. default is -1, and all rows are read in this case gdf_size_type skiprows; // number of rows at the start of the file to skip, default is 0 gdf_size_type skipfooter; // number of rows at the bottom of the file to skip, default is 0 rmm::device_vector<int32_t> d_trueValues; // device: array of values to recognize as true rmm::device_vector<int32_t> d_falseValues; // device: array of values to recognize as false } raw_csv_t; typedef struct column_data_ { unsigned long long countFloat; unsigned long long countDateAndTime; unsigned long long countString; unsigned long long countInt8; unsigned long long countInt16; unsigned long long countInt32; unsigned long long countInt64; gdf_size_type countNULL; } column_data_t; typedef struct parsing_opts_ { char delimiter; char terminator; char quotechar; bool keepquotes; char decimal; char thousands; int32_t* trueValues; int32_t* falseValues; int32_t trueValuesCount; int32_t falseValuesCount; } parsing_opts_t; using string_pair = std::pair<const char*,size_t>; // //---------------create and process --------------------------------------------- // gdf_error parseArguments(csv_read_arg *args, raw_csv_t *csv); // gdf_error getColNamesAndTypes(const char **col_names, const char **dtypes, raw_csv_t *d); gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type); gdf_error getUncompressedHostData(const char* h_data, size_t num_bytes, const string& compression, vector<char>& h_uncomp_data); gdf_error uploadDataToDevice(const char* h_uncomp_data, size_t h_uncomp_size, raw_csv_t * raw_csv); gdf_error allocateGdfDataSpace(gdf_column *); gdf_dtype convertStringToDtype(std::string &dtype); #define checkError(error, txt) if ( error != GDF_SUCCESS) { std::cerr << "ERROR: " << error << " in " << txt << std::endl; return error; } // //---------------CUDA Kernel --------------------------------------------- // __device__ int findSetBit(int tid, long num_bits, uint64_t *f_bits, int x); gdf_error launch_countRecords(const char* h_data, size_t h_size, char terminator, char quote, gdf_size_type& rec_cnt); gdf_error launch_storeRecordStart(const char* h_data, size_t h_size, raw_csv_t * csvData); gdf_error launch_dataConvertColumns(raw_csv_t * raw_csv, void** d_gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes, string_pair **str_cols, unsigned long long *); gdf_error launch_dataTypeDetection(raw_csv_t * raw_csv, column_data_t* d_columnData); __global__ void countRecords(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, cu_reccnt_t* num_records); __global__ void storeRecordStart(char *data, size_t chunk_offset, const char terminator, const char quotechar, long num_bytes, long num_bits, cu_reccnt_t* num_records, cu_recstart_t* recStart); __global__ void convertCsvToGdf(char *csv, const parsing_opts_t opts, gdf_size_type num_records, int num_columns, bool *parseCol, cu_recstart_t *recStart,gdf_dtype *dtype, void **gdf_data, gdf_valid_type **valid, string_pair **str_cols, bool dayfirst, unsigned long long *num_valid); __global__ void dataTypeDetection(char *raw_csv, const parsing_opts_t opts, gdf_size_type num_records, int num_columns, bool *parseCol, cu_recstart_t *recStart, column_data_t* d_columnData); // //---------------CUDA Valid (8 blocks of 8-bits) Bitmap Kernels --------------------------------------------- // __device__ int whichBitmap(int record) { return (record/8); } __device__ int whichBit(int bit) { return (bit % 8); } __inline__ __device__ void validAtomicOR(gdf_valid_type* address, gdf_valid_type val) { int32_t *base_address = (int32_t*)((gdf_valid_type*)address - ((size_t)address & 3)); int32_t int_val = (int32_t)val << (((size_t) address & 3) * 8); atomicOr(base_address, int_val); } __device__ void setBit(gdf_valid_type* address, int bit) { gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128}; validAtomicOR(address, bitMask[bit]); } std::string stringType(gdf_dtype dt){ switch (dt){ case GDF_STRING: return std::string("str"); case GDF_DATE64: return std::string("date64"); case GDF_CATEGORY: return std::string("category"); case GDF_FLOAT64: return std::string("float64"); case GDF_INT8: return std::string("int8"); case GDF_INT16: return std::string("int16"); case GDF_INT32: return std::string("int32"); case GDF_INT64: return std::string("int64"); default: return "long"; } } /**---------------------------------------------------------------------------* * @brief Read in a CSV file, extract all fields and return * a GDF (array of gdf_columns) * * @param[in,out] args Structure containing both the the input arguments * and the returned data * * @return gdf_error *---------------------------------------------------------------------------**/ gdf_error read_csv(csv_read_arg *args) { gdf_error error = gdf_error::GDF_SUCCESS; //----------------------------------------------------------------------------- // create the CSV data structure - this will be filled in as the CSV data is processed. // Done first to validate data types raw_csv_t * raw_csv = new raw_csv_t; // error = parseArguments(args, raw_csv); raw_csv->num_actual_cols = args->num_cols; raw_csv->num_active_cols = args->num_cols; raw_csv->num_records = 0; if(args->delim_whitespace == true) { raw_csv->delimiter = ' '; } else { raw_csv->delimiter = args->delimiter; } if(args->windowslinetermination) { raw_csv->terminator = '\n'; } else { raw_csv->terminator = args->lineterminator; } raw_csv->quotechar = args->quotechar; if(raw_csv->quotechar != '\0') { raw_csv->keepquotes = !args->quoting; raw_csv->doublequote = args->doublequote; } else { raw_csv->keepquotes = true; raw_csv->doublequote = false; } if (args->names == nullptr) { raw_csv->header_row = args->header; } else{ raw_csv->header_row = -1; } raw_csv->dayfirst = args->dayfirst; raw_csv->decimal = args->decimal; raw_csv->thousands = args->thousands; raw_csv->skiprows = args->skiprows; raw_csv->skipfooter = args->skipfooter; raw_csv->nrows = args->nrows; if (raw_csv->header_row >= 0 && args->nrows >= 0) { ++raw_csv->nrows; } if (raw_csv->decimal == raw_csv->delimiter) { checkError(GDF_INVALID_API_CALL, "Decimal point cannot be the same as the delimiter"); } if (raw_csv->thousands == raw_csv->delimiter) { checkError(GDF_INVALID_API_CALL, "Thousands separator cannot be the same as the delimiter"); } // Handle user-defined booleans values, whereby field data is substituted // with true/false values; CUDF booleans are int types of 0 or 1 // The true/false value strings are converted to integers which are used // by the data conversion kernel for comparison and value replacement if ((args->true_values != NULL) && (args->num_true_values > 0)) { thrust::host_vector<int32_t> h_values(args->num_true_values); for (int i = 0; i < args->num_true_values; ++i) { h_values[i] = convertStrtoInt<int32_t>(args->true_values[i], 0, strlen(args->true_values[i]) - 1); } raw_csv->d_trueValues = h_values; } if ((args->false_values != NULL) && (args->num_false_values > 0)) { thrust::host_vector<int32_t> h_values(args->num_false_values); for (int i = 0; i < args->num_false_values; ++i) { h_values[i] = convertStrtoInt<int32_t>(args->false_values[i], 0, strlen(args->false_values[i]) - 1); } raw_csv->d_falseValues = h_values; } //----------------------------------------------------------------------------- // memory map in the data void * map_data = NULL; size_t map_size = 0; int fd = 0; if (args->input_data_form == gdf_csv_input_form::FILE_PATH) { fd = open(args->filepath_or_buffer, O_RDONLY ); if (fd < 0) { close(fd); checkError(GDF_FILE_ERROR, "Error opening file"); } struct stat st{}; if (fstat(fd, &st)) { close(fd); checkError(GDF_FILE_ERROR, "cannot stat file"); } map_size = st.st_size; raw_csv->num_bytes = map_size; map_data = mmap(0, map_size, PROT_READ, MAP_PRIVATE, fd, 0); if (map_data == MAP_FAILED || map_size==0) { close(fd); checkError(GDF_C_ERROR, "Error mapping file"); } } else if (args->input_data_form == gdf_csv_input_form::HOST_BUFFER) { map_data = (void *)args->filepath_or_buffer; raw_csv->num_bytes = map_size = args->buffer_size; } else { checkError(GDF_C_ERROR, "invalid input type"); } string compression_type; error = inferCompressionType(args->compression, args->filepath_or_buffer, compression_type); checkError(error, "call to inferCompressionType"); const char* h_uncomp_data; size_t h_uncomp_size = 0; // Used when the input data is compressed, to ensure the allocated uncompressed data is freed vector<char> h_uncomp_data_owner; if (compression_type == "none") { // Do not use the owner vector here to avoid copying the whole file to the heap h_uncomp_data = (const char*)map_data; h_uncomp_size = map_size; } else { error = getUncompressedHostData( (const char *)map_data, map_size, compression_type, h_uncomp_data_owner); checkError(error, "call to getUncompressedHostData"); h_uncomp_data = h_uncomp_data_owner.data(); h_uncomp_size = h_uncomp_data_owner.size(); } assert(h_uncomp_data != nullptr); assert(h_uncomp_size != 0); error = launch_countRecords(h_uncomp_data, h_uncomp_size, raw_csv->terminator, raw_csv->quotechar, raw_csv->num_records); if (error != GDF_SUCCESS) { return error; } //----------------------------------------------------------------------------- //-- Allocate space to hold the record starting point RMM_TRY( RMM_ALLOC((void**)&(raw_csv->recStart), (sizeof(cu_recstart_t) * (raw_csv->num_records + 1)), 0) ); //----------------------------------------------------------------------------- //-- Scan data and set the starting positions error = launch_storeRecordStart(h_uncomp_data, h_uncomp_size, raw_csv); checkError(error, "call to record initial position store"); // Previous kernel stores the record pinput_file.typeositions as encountered by all threads // Sort the record positions as subsequent processing may require filtering // certain rows or other processing on specific records thrust::sort(thrust::device, raw_csv->recStart, raw_csv->recStart + raw_csv->num_records + 1); // Currently, ignoring lineterminations within quotes is handled by recording // the records of both, and then filtering out the records that is a quotechar // or a linetermination within a quotechar pair. The future major refactoring // of csv_reader and its kernels will probably use a different tactic. if (raw_csv->quotechar != '\0') { vector<cu_recstart_t> h_rec_starts(raw_csv->num_records + 1); const size_t rec_start_size = sizeof(cu_recstart_t) * (h_rec_starts.size()); CUDA_TRY( cudaMemcpy(h_rec_starts.data(), raw_csv->recStart, rec_start_size, cudaMemcpyDeviceToHost) ); auto recCount = raw_csv->num_records; bool quotation = false; for (gdf_size_type i = 1; i < raw_csv->num_records; ++i) { if (h_uncomp_data[h_rec_starts[i] - 1] == raw_csv->quotechar) { quotation = !quotation; h_rec_starts[i] = raw_csv->num_bytes; recCount--; } else if (quotation) { h_rec_starts[i] = raw_csv->num_bytes; recCount--; } } CUDA_TRY( cudaMemcpy(raw_csv->recStart, h_rec_starts.data(), rec_start_size, cudaMemcpyHostToDevice) ); thrust::sort(thrust::device, raw_csv->recStart, raw_csv->recStart + raw_csv->num_records + 1); raw_csv->num_records = recCount; } error = uploadDataToDevice(h_uncomp_data, h_uncomp_size, raw_csv); if (error != GDF_SUCCESS) { return error; } //----------------------------------------------------------------------------- //-- Populate the header // Check if the user gave us a list of column names if(args->names == nullptr) { int h_num_cols = 0; // Getting the first row of data from the file. We will parse the data to find lineterminator as // well as the column delimiter. cu_recstart_t second_rec_start; CUDA_TRY(cudaMemcpy(&second_rec_start, raw_csv->recStart + 1, sizeof(cu_recstart_t), cudaMemcpyDefault)); vector<char> first_row(second_rec_start); CUDA_TRY(cudaMemcpy(first_row.data(), raw_csv->data, sizeof(char) * first_row.size(), cudaMemcpyDefault)); // datect the number of rows and assign the column name raw_csv->col_names.clear(); if(raw_csv->header_row >= 0) { size_t prev = 0; size_t c = 0; // Storing the names of the columns into a vector of strings while(c < first_row.size()) { if (first_row[c] == args->delimiter || first_row[c] == args->lineterminator){ std::string colName(first_row.data() + prev, c - prev ); prev = c + 1; raw_csv->col_names.push_back(colName); h_num_cols++; } c++; } } else { size_t c = 0; while(c < first_row.size()) { if (first_row[c] == args->lineterminator) { h_num_cols++; break; } else if(first_row[c] == '\r' && (c+1L) < first_row.size() && first_row[c+1] == '\n'){ h_num_cols++; break; }else if (first_row[c] == args->delimiter) h_num_cols++; c++; } // assign column indexes as names if the header column is not present for (int i = 0; i<h_num_cols; i++) { std::string newColName = std::to_string(i); raw_csv->col_names.push_back(newColName); } } // Allocating a boolean array that will use to state if a column needs to read or filtered. raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (h_num_cols)); RMM_TRY( RMM_ALLOC((void**)&raw_csv->d_parseCol,(sizeof(bool) * (h_num_cols)),0 ) ); for (int i = 0; i<h_num_cols; i++) raw_csv->h_parseCol[i]=true; int h_dup_cols_removed = 0; // Looking for duplicates for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){ bool found_dupe = false; for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){ if (*it==*it2){ found_dupe=true; break; } } if(found_dupe){ int count=1; for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){ if (*it==*it2){ if(args->mangle_dupe_cols){ // Replace all the duplicates of column X with X.1,X.2,... First appearance stays as X. std::string newColName = *it2; newColName += "." + std::to_string(count); count++; *it2 = newColName; } else{ // All duplicate fields will be ignored. int pos=std::distance(raw_csv->col_names.begin(), it2); raw_csv->h_parseCol[pos]=false; h_dup_cols_removed++; } } } } } raw_csv->num_actual_cols = h_num_cols; // Actual number of columns in the CSV file raw_csv->num_active_cols = h_num_cols-h_dup_cols_removed; // Number of fields that need to be processed based on duplicatation fields CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (h_num_cols), cudaMemcpyHostToDevice)); } else { raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (args->num_cols)); RMM_TRY( RMM_ALLOC((void**)&raw_csv->d_parseCol,(sizeof(bool) * (args->num_cols)),0 ) ); for (int i = 0; i<raw_csv->num_actual_cols; i++){ raw_csv->h_parseCol[i]=true; std::string col_name = args->names[i]; raw_csv->col_names.push_back(col_name); } CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (args->num_cols), cudaMemcpyHostToDevice)); } // User can give if (args->use_cols_int!=NULL || args->use_cols_char!=NULL){ if(args->use_cols_int!=NULL){ for (int i = 0; i<raw_csv->num_actual_cols; i++) raw_csv->h_parseCol[i]=false; for(int i=0; i < args->use_cols_int_len; i++){ int pos = args->use_cols_int[i]; raw_csv->h_parseCol[pos]=true; } raw_csv->num_active_cols = args->use_cols_int_len; }else{ for (int i = 0; i<raw_csv->num_actual_cols; i++) raw_csv->h_parseCol[i]=false; int countFound=0; for(int i=0; i < args->use_cols_char_len; i++){ std::string colName(args->use_cols_char[i]); for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){ if(colName==*it){ countFound++; int pos=std::distance(raw_csv->col_names.begin(), it); raw_csv->h_parseCol[pos]=true; break; } } } raw_csv->num_active_cols = countFound; } CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (raw_csv->num_actual_cols), cudaMemcpyHostToDevice)); } if (raw_csv->header_row>=0) { raw_csv->num_records-=1; } //----------------------------------------------------------------------------- //--- done with host data if (args->input_data_form == gdf_csv_input_form::FILE_PATH) { close(fd); munmap(map_data, map_size); } //----------------------------------------------------------------------------- //--- Auto detect types of the vectors if(args->dtype==NULL){ if (raw_csv->num_records == 0) { checkError(GDF_INVALID_API_CALL, "read_csv: no data available for data type inference"); } column_data_t *d_ColumnData,*h_ColumnData; h_ColumnData = (column_data_t*)malloc(sizeof(column_data_t) * (raw_csv->num_active_cols)); RMM_TRY( RMM_ALLOC((void**)&d_ColumnData,(sizeof(column_data_t) * (raw_csv->num_active_cols)),0 ) ); CUDA_TRY( cudaMemset(d_ColumnData, 0, (sizeof(column_data_t) * (raw_csv->num_active_cols)) ) ) ; launch_dataTypeDetection(raw_csv, d_ColumnData); CUDA_TRY( cudaMemcpy(h_ColumnData,d_ColumnData, sizeof(column_data_t) * (raw_csv->num_active_cols), cudaMemcpyDeviceToHost)); vector<gdf_dtype> d_detectedTypes; // host: array of dtypes (since gdf_columns are not created until end) raw_csv->dtypes.clear(); for(int col = 0; col < raw_csv->num_active_cols; col++){ unsigned long long countInt = h_ColumnData[col].countInt8+h_ColumnData[col].countInt16+ h_ColumnData[col].countInt32+h_ColumnData[col].countInt64; if (h_ColumnData[col].countNULL == raw_csv->num_records){ d_detectedTypes.push_back(GDF_INT8); // Entire column is NULL. Allocating the smallest amount of memory } else if(h_ColumnData[col].countString>0L){ d_detectedTypes.push_back(GDF_CATEGORY); // For auto-detection, we are currently not supporting strings. } else if(h_ColumnData[col].countDateAndTime>0L){ d_detectedTypes.push_back(GDF_DATE64); } else if(h_ColumnData[col].countFloat > 0L || (h_ColumnData[col].countFloat==0L && countInt >0L && h_ColumnData[col].countNULL >0L) ) { // The second condition has been added to conform to PANDAS which states that a colum of // integers with a single NULL record need to be treated as floats. d_detectedTypes.push_back(GDF_FLOAT64); } else { d_detectedTypes.push_back(GDF_INT64); } } raw_csv->dtypes=d_detectedTypes; free(h_ColumnData); RMM_TRY( RMM_FREE( d_ColumnData, 0 ) ); } else{ for ( int x = 0; x < raw_csv->num_actual_cols; x++) { std::string temp_type = args->dtype[x]; gdf_dtype col_dtype = convertStringToDtype( temp_type ); if (col_dtype == GDF_invalid) return GDF_UNSUPPORTED_DTYPE; raw_csv->dtypes.push_back(col_dtype); } } //----------------------------------------------------------------------------- //--- allocate space for the results gdf_column **cols = (gdf_column **)malloc( sizeof(gdf_column *) * raw_csv->num_active_cols); void **d_data,**h_data; gdf_valid_type **d_valid,**h_valid; unsigned long long *d_valid_count; gdf_dtype *d_dtypes,*h_dtypes; h_dtypes = (gdf_dtype*)malloc ( sizeof(gdf_dtype)* (raw_csv->num_active_cols)); h_data = (void**)malloc ( sizeof(void*)* (raw_csv->num_active_cols)); h_valid = (gdf_valid_type**)malloc ( sizeof(gdf_valid_type*)* (raw_csv->num_active_cols)); RMM_TRY( RMM_ALLOC((void**)&d_dtypes, (sizeof(gdf_dtype) * raw_csv->num_active_cols), 0 ) ); RMM_TRY( RMM_ALLOC((void**)&d_data, (sizeof(void *) * raw_csv->num_active_cols), 0 ) ); RMM_TRY( RMM_ALLOC((void**)&d_valid, (sizeof(gdf_valid_type *) * raw_csv->num_active_cols), 0 ) ); RMM_TRY( RMM_ALLOC((void**)&d_valid_count, (sizeof(unsigned long long) * raw_csv->num_active_cols), 0 ) ); CUDA_TRY( cudaMemset(d_valid_count, 0, (sizeof(unsigned long long) * raw_csv->num_active_cols)) ); int stringColCount=0; for (int col = 0; col < raw_csv->num_active_cols; col++) { if(raw_csv->dtypes[col]==gdf_dtype::GDF_STRING) stringColCount++; } string_pair **h_str_cols = NULL, **d_str_cols = NULL; if (stringColCount > 0 ) { h_str_cols = (string_pair**) malloc ((sizeof(string_pair *) * stringColCount)); RMM_TRY( RMM_ALLOC((void**)&d_str_cols, (sizeof(string_pair *) * stringColCount), 0) ); for (int col = 0; col < stringColCount; col++) { RMM_TRY( RMM_ALLOC((void**)(h_str_cols + col), sizeof(string_pair) * (raw_csv->num_records), 0) ); } CUDA_TRY(cudaMemcpy(d_str_cols, h_str_cols, sizeof(string_pair *) * stringColCount, cudaMemcpyHostToDevice)); } for (int acol = 0,col=-1; acol < raw_csv->num_actual_cols; acol++) { if(raw_csv->h_parseCol[acol]==false) continue; col++; gdf_column *gdf = (gdf_column *)malloc(sizeof(gdf_column) * 1); gdf->size = raw_csv->num_records; gdf->dtype = raw_csv->dtypes[col]; gdf->null_count = 0; // will be filled in later //--- column name std::string str = raw_csv->col_names[acol]; int len = str.length() + 1; gdf->col_name = (char *)malloc(sizeof(char) * len); memcpy(gdf->col_name, str.c_str(), len); gdf->col_name[len -1] = '\0'; allocateGdfDataSpace(gdf); cols[col] = gdf; h_dtypes[col] = gdf->dtype; h_data[col] = gdf->data; h_valid[col] = gdf->valid; } CUDA_TRY( cudaMemcpy(d_dtypes,h_dtypes, sizeof(gdf_dtype) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice)); CUDA_TRY( cudaMemcpy(d_data,h_data, sizeof(void*) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice)); CUDA_TRY( cudaMemcpy(d_valid,h_valid, sizeof(gdf_valid_type*) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice)); free(h_dtypes); free(h_valid); free(h_data); if (raw_csv->num_records != 0) { error = launch_dataConvertColumns(raw_csv, d_data, d_valid, d_dtypes, d_str_cols, d_valid_count); if (error != GDF_SUCCESS) { return error; } // Sync with the default stream, just in case create_from_index() is asynchronous cudaStreamSynchronize(0); stringColCount=0; for (int col = 0; col < raw_csv->num_active_cols; col++) { gdf_column *gdf = cols[col]; if (gdf->dtype != gdf_dtype::GDF_STRING) continue; NVStrings* const stringCol = NVStrings::create_from_index(h_str_cols[stringColCount],size_t(raw_csv->num_records)); if ((raw_csv->quotechar != '\0') && (raw_csv->doublequote==true)) { // In PANDAS, default of enabling doublequote for two consecutive // quotechar in quote fields results in reduction to single const string quotechar(1, raw_csv->quotechar); const string doublequotechar(2, raw_csv->quotechar); gdf->data = stringCol->replace(doublequotechar.c_str(), quotechar.c_str()); NVStrings::destroy(stringCol); } else { gdf->data = stringCol; } RMM_TRY( RMM_FREE( h_str_cols [stringColCount], 0 ) ); stringColCount++; } vector<unsigned long long> h_valid_count(raw_csv->num_active_cols); CUDA_TRY( cudaMemcpy(h_valid_count.data(), d_valid_count, sizeof(unsigned long long) * h_valid_count.size(), cudaMemcpyDeviceToHost)); //--- set the null count for (size_t col = 0; col < h_valid_count.size(); col++) { cols[col]->null_count = raw_csv->num_records - h_valid_count[col]; } } // free up space that is no longer needed if (h_str_cols != NULL) free ( h_str_cols); free(raw_csv->h_parseCol); if (d_str_cols != NULL) RMM_TRY( RMM_FREE( d_str_cols, 0 ) ); RMM_TRY( RMM_FREE( d_valid, 0 ) ); RMM_TRY( RMM_FREE( d_valid_count, 0 ) ); RMM_TRY( RMM_FREE( d_dtypes, 0 ) ); RMM_TRY( RMM_FREE( d_data, 0 ) ); RMM_TRY( RMM_FREE( raw_csv->recStart, 0 ) ); RMM_TRY( RMM_FREE( raw_csv->d_parseCol, 0 ) ); RMM_TRY( RMM_FREE ( raw_csv->data, 0) ); args->data = cols; args->num_cols_out = raw_csv->num_active_cols; args->num_rows_out = raw_csv->num_records; delete raw_csv; return error; } /* * What is passed in is the data type as a string, need to convert that into gdf_dtype enum */ gdf_dtype convertStringToDtype(std::string &dtype) { if (dtype.compare( "str") == 0) return GDF_STRING; if (dtype.compare( "date") == 0) return GDF_DATE64; if (dtype.compare( "date32") == 0) return GDF_DATE32; if (dtype.compare( "date64") == 0) return GDF_DATE64; if (dtype.compare( "timestamp") == 0) return GDF_TIMESTAMP; if (dtype.compare( "category") == 0) return GDF_CATEGORY; if (dtype.compare( "float") == 0) return GDF_FLOAT32; if (dtype.compare( "float32") == 0) return GDF_FLOAT32; if (dtype.compare( "float64") == 0) return GDF_FLOAT64; if (dtype.compare( "double") == 0) return GDF_FLOAT64; if (dtype.compare( "short") == 0) return GDF_INT16; if (dtype.compare( "int") == 0) return GDF_INT32; if (dtype.compare( "int32") == 0) return GDF_INT32; if (dtype.compare( "int64") == 0) return GDF_INT64; if (dtype.compare( "long") == 0) return GDF_INT64; return GDF_invalid; } /**---------------------------------------------------------------------------* * @brief Infer the compression type from the compression parameter and * the input file name * * Returns "none" if the input is not compressed. * * @param[in] compression_arg Input string that is potentially describing * the compression type. Can also be nullptr, "none", or "infer" * @param[in] filepath path + name of the input file * @param[out] compression_type String describing the inferred compression type * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type) { if (compression_arg && 0 == strcasecmp(compression_arg, "none")) { compression_arg = nullptr; } if (compression_arg && 0 == strcasecmp(compression_arg, "infer")) { const char *file_ext = strrchr(filepath, '.'); compression_arg = nullptr; if (file_ext) { if (!strcasecmp(file_ext, ".gz")) compression_arg = "gzip"; else if (!strcasecmp(file_ext, ".zip")) compression_arg = "zip"; else if (!strcasecmp(file_ext, ".bz2")) compression_arg = "bz2"; else if (!strcasecmp(file_ext, ".xz")) compression_arg = "xz"; else { // TODO: return error here } } } compression_type = compression_arg == nullptr? "none":string(compression_arg); return GDF_SUCCESS; } /**---------------------------------------------------------------------------* * @brief Uncompresses the input data and stores the allocated result into * a vector. * * @param[in] h_data Pointer to the csv data in host memory * @param[in] num_bytes Size of the input data, in bytes * @param[in] compression String describing the compression type * @param[out] h_uncomp_data Vector containing the output uncompressed data * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ gdf_error getUncompressedHostData(const char* h_data, size_t num_bytes, const string& compression, vector<char>& h_uncomp_data) { int comp_type = IO_UNCOMP_STREAM_TYPE_INFER; if (compression == "gzip") comp_type = IO_UNCOMP_STREAM_TYPE_GZIP; else if (compression == "zip") comp_type = IO_UNCOMP_STREAM_TYPE_ZIP; else if (compression == "bz2") comp_type = IO_UNCOMP_STREAM_TYPE_BZIP2; else if (compression == "xz") comp_type = IO_UNCOMP_STREAM_TYPE_XZ; return io_uncompress_single_h2d(h_data, num_bytes, comp_type, h_uncomp_data); } /**---------------------------------------------------------------------------* * @brief Uploads the relevant segment of the input csv data onto the GPU. * * Only rows that need to be read are copied to the GPU, based on parameters * like nrows, skipheader, skipfooter. * Also updates the array of record starts to match the device data offset. * * @param[in] h_uncomp_data Pointer to the uncompressed csv data in host memory * @param[in] h_uncomp_size Size of the input data, in bytes * @param[in,out] csvData Structure containing the csv parsing parameters * and intermediate results * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ gdf_error uploadDataToDevice(const char* h_uncomp_data, size_t h_uncomp_size, raw_csv_t * raw_csv) { vector<cu_recstart_t> h_rec_starts(raw_csv->num_records + 1); CUDA_TRY( cudaMemcpy(h_rec_starts.data(), raw_csv->recStart, sizeof(cu_recstart_t) * h_rec_starts.size(), cudaMemcpyDefault)); // Exclude the rows user chose to skip at the start of the file const gdf_size_type first_row = raw_csv->skiprows + max(raw_csv->header_row, 0l); if (raw_csv->num_records > first_row) { raw_csv->num_records = raw_csv->num_records - (long)first_row; } else { checkError(GDF_FILE_ERROR, "Number of records is too small for the specified skiprows and header parameters"); } // Restrict the rows to nrows if nrows is smaller than the remaining number of rows if (raw_csv->nrows >= 0 && (gdf_size_type)raw_csv->nrows < raw_csv->num_records) { raw_csv->num_records = (gdf_size_type)raw_csv->nrows; } // Exclude the rows user chose to skip at the end of the file if (raw_csv->skipfooter != 0) { raw_csv->num_records = gdf_size_type(max(raw_csv->num_records - raw_csv->skipfooter, gdf_size_type{0})); } // Have to at least read the header row if (raw_csv->header_row >= 0 && raw_csv->num_records == 0) raw_csv->num_records = 1; // If specified, header row will always be the first row in the GPU data raw_csv->header_row = min(raw_csv->header_row, 0l); const auto start_offset = h_rec_starts[first_row]; const auto end_offset = h_rec_starts[first_row + raw_csv->num_records] - 1; raw_csv->num_bytes = end_offset - start_offset + 1; assert(raw_csv->num_bytes <= h_uncomp_size); raw_csv->num_bits = (raw_csv->num_bytes + 63) / 64; // Update the record starts to match the device data (skip missing records, fix offset) for (gdf_size_type i = first_row; i <= first_row + raw_csv->num_records; ++i) h_rec_starts[i] -= start_offset; RMM_TRY(RMM_REALLOC(&raw_csv->recStart, sizeof(cu_recstart_t) * (raw_csv->num_records + 1), 0)); CUDA_TRY( cudaMemcpy(raw_csv->recStart, h_rec_starts.data() + first_row, sizeof(cu_recstart_t) * (raw_csv->num_records + 1), cudaMemcpyDefault)); // Allocate and copy to the GPU RMM_TRY(RMM_ALLOC ((void**)&raw_csv->data, (sizeof(char) * raw_csv->num_bytes), 0)); CUDA_TRY(cudaMemcpy(raw_csv->data, h_uncomp_data + start_offset, raw_csv->num_bytes, cudaMemcpyHostToDevice)); return GDF_SUCCESS; } /* * For each of the gdf_cvolumns, create the on-device space. the on-host fields should already be filled in */ gdf_error allocateGdfDataSpace(gdf_column *gdf) { long N = gdf->size; long num_bitmaps = (N + 31) / 8; // 8 bytes per bitmap //--- allocate space for the valid bitmaps RMM_TRY( RMM_ALLOC((void**)&gdf->valid, (sizeof(gdf_valid_type) * num_bitmaps), 0) ); CUDA_TRY(cudaMemset(gdf->valid, 0, (sizeof(gdf_valid_type) * num_bitmaps)) ); int elementSize=0; //--- Allocate space for the data switch(gdf->dtype) { case gdf_dtype::GDF_INT8: elementSize = sizeof(int8_t); break; case gdf_dtype::GDF_INT16: elementSize = sizeof(int16_t); break; case gdf_dtype::GDF_INT32: elementSize = sizeof(int32_t); break; case gdf_dtype::GDF_INT64: elementSize = sizeof(int64_t); break; case gdf_dtype::GDF_FLOAT32: elementSize = sizeof(float); break; case gdf_dtype::GDF_FLOAT64: elementSize = sizeof(double); break; case gdf_dtype::GDF_DATE32: elementSize = sizeof(gdf_date32); break; case gdf_dtype::GDF_DATE64: elementSize = sizeof(gdf_date64); break; case gdf_dtype::GDF_TIMESTAMP: elementSize = sizeof(int64_t); break; case gdf_dtype::GDF_CATEGORY: elementSize = sizeof(gdf_category); break; case gdf_dtype::GDF_STRING: return gdf_error::GDF_SUCCESS; // Memory for gdf->data allocated by string class eventually default: return GDF_UNSUPPORTED_DTYPE; } RMM_TRY( RMM_ALLOC((void**)&gdf->data, elementSize * N, 0) ); return gdf_error::GDF_SUCCESS; } //---------------------------------------------------------------------------------------------------------------- // CUDA Kernels //---------------------------------------------------------------------------------------------------------------- /**---------------------------------------------------------------------------* * @brief Counts the number of rows in the input csv file. * * Does not load the entire file into the GPU memory at any time, so it can * be used to parse large files. * Does not take quotes into consideration, so it will return extra rows * if the line terminating characters are present within quotes. * Because of this the result should be postprocessed to remove * the fake line endings. * * @param[in] h_data Pointer to the csv data in host memory * @param[in] h_size Size of the input data, in bytes * @param[in] terminator Line terminator character * @param[in] quote Quote character * @param[out] rec_cnt The resulting number of rows (records) * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ gdf_error launch_countRecords(const char* h_data, size_t h_size, char terminator, char quote, gdf_size_type& rec_cnt) { const size_t chunk_count = (h_size + max_chunk_bytes - 1) / max_chunk_bytes; vector<cu_reccnt_t> h_cnts(chunk_count); cu_reccnt_t* d_cnts = nullptr; RMM_TRY(RMM_ALLOC (&d_cnts, sizeof(cu_reccnt_t)* chunk_count, 0)); CUDA_TRY(cudaMemset(d_cnts, 0, sizeof(cu_reccnt_t)* chunk_count)); char* d_chunk = nullptr; // Allocate extra byte in case \r\n is at the chunk border RMM_TRY(RMM_ALLOC (&d_chunk, max_chunk_bytes + 1, 0)); int blockSize; // suggested thread count to use int minGridSize; // minimum block count required CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, countRecords)); for (size_t ci = 0; ci < chunk_count; ++ci) { const auto h_chunk = h_data + ci * max_chunk_bytes; const auto chunk_bytes = std::min((size_t)(h_size - ci * max_chunk_bytes), max_chunk_bytes); const auto chunk_bits = (chunk_bytes + 63) / 64; // Copy chunk to device. Copy extra byte if not last chunk CUDA_TRY(cudaMemcpy(d_chunk, h_chunk, ci < (chunk_count - 1)?chunk_bytes:chunk_bytes + 1, cudaMemcpyDefault)); const int gridSize = (chunk_bits + blockSize - 1) / blockSize; countRecords <<< gridSize, blockSize >>> ( d_chunk, terminator, quote, chunk_bytes, chunk_bits, &d_cnts[ci] ); } CUDA_TRY(cudaMemcpy(h_cnts.data(), d_cnts, chunk_count*sizeof(cu_reccnt_t), cudaMemcpyDefault)); RMM_TRY( RMM_FREE(d_chunk, 0) ); RMM_TRY( RMM_FREE(d_cnts, 0) ); CUDA_TRY(cudaGetLastError()); rec_cnt = std::accumulate(h_cnts.begin(), h_cnts.end(), gdf_size_type(0)); return GDF_SUCCESS; } /**---------------------------------------------------------------------------* * @brief CUDA kernel that counts the number of rows in the given * file segment, based on the location of line terminators. * * @param[in] data Device memory pointer to the csv data, * potentially a chunk of the whole file * @param[in] terminator Line terminator character * @param[in] quotechar Quote character * @param[in] num_bytes Number of bytes in the input data * @param[in] num_bits Number of 'bits' in the input data. Each 'bit' is * processed by a separate CUDA thread * @param[in,out] num_records Device memory pointer to the number of found rows * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ __global__ void countRecords(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, cu_reccnt_t* num_records) { // thread IDs range per block, so also need the block id long tid = threadIdx.x + (blockDim.x * blockIdx.x); if (tid >= num_bits) return; // data ID is a multiple of 64 long did = tid * 64L; char *raw = (data + did); long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did); // process the data cu_reccnt_t tokenCount = 0; for (long x = 0; x < byteToProcess; x++) { // Scan and log records. If quotations are enabled, then also log quotes // for a postprocess ignore, as the chunk here has limited visibility. if ((raw[x] == terminator) || (quotechar != '\0' && raw[x] == quotechar)) { tokenCount++; } else if (raw[x] == '\r' && raw[x +1] == '\n') { x++; tokenCount++; } } atomicAdd(num_records, tokenCount); } /**---------------------------------------------------------------------------* * @brief Finds the start of each row (record) in the given file, based on * the location of line terminators. The offset of each found row is stored * in the recStart data member of the csvData parameter. * * Does not load the entire file into the GPU memory at any time, so it can * be used to parse large files. * Does not take quotes into consideration, so it will return extra rows * if the line terminating characters are present within quotes. * Because of this the result should be postprocessed to remove * the fake line endings. * * @param[in] h_data Pointer to the csv data in host memory * @param[in] h_size Size of the input data, in bytes * @param[in,out] csvData Structure containing the csv parsing parameters * and intermediate results * * @return gdf_error with error code on failure, otherwise GDF_SUCCESS *---------------------------------------------------------------------------**/ gdf_error launch_storeRecordStart(const char* h_data, size_t h_size, raw_csv_t * csvData) { char* d_chunk = nullptr; // Allocate extra byte in case \r\n is at the chunk border RMM_TRY(RMM_ALLOC (&d_chunk, max_chunk_bytes + 1, 0)); cu_reccnt_t* d_num_records; RMM_TRY(RMM_ALLOC((void**)&d_num_records, sizeof(cu_reccnt_t), 0) ); // set the first record starting a zero instead of setting it in the kernel const auto one = 1ull; CUDA_TRY(cudaMemcpy(d_num_records, &one, sizeof(cu_reccnt_t), cudaMemcpyDefault)); CUDA_TRY(cudaMemset(csvData->recStart, 0ull, (sizeof(cu_recstart_t)))); int blockSize; // suggested thread count to use int minGridSize; // minimum block count required CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, storeRecordStart) ); const size_t chunk_count = (h_size + max_chunk_bytes - 1) / max_chunk_bytes; for (size_t ci = 0; ci < chunk_count; ++ci) { const auto chunk_offset = ci * max_chunk_bytes; const auto h_chunk = h_data + chunk_offset; const auto chunk_bytes = std::min((size_t)(h_size - ci * max_chunk_bytes), max_chunk_bytes); const auto chunk_bits = (chunk_bytes + 63) / 64; // Copy chunk to device. Copy extra byte if not last chunk CUDA_TRY(cudaMemcpy(d_chunk, h_chunk, ci < (chunk_count - 1)?chunk_bytes:chunk_bytes + 1, cudaMemcpyDefault)); const int gridSize = (chunk_bits + blockSize - 1) / blockSize; storeRecordStart <<< gridSize, blockSize >>> ( d_chunk, chunk_offset, csvData->terminator, csvData->quotechar, chunk_bytes, chunk_bits, d_num_records, csvData->recStart ); } RMM_TRY( RMM_FREE( d_num_records, 0 ) ); RMM_TRY( RMM_FREE( d_chunk, 0 ) ); CUDA_TRY( cudaGetLastError() ); return GDF_SUCCESS; } /**---------------------------------------------------------------------------* * @brief CUDA kernel that finds the start of each row (record) in the given * file segment, based on the location of line terminators. * * The offset of each found row is stored in a device memory array. * The kernel operate on a segment (chunk) of the csv file. * * @param[in] data Device memory pointer to the csv data, * potentially a chunk of the whole file * @param[in] chunk_offset Offset of the data pointer from the start of the file * @param[in] terminator Line terminator character * @param[in] quotechar Quote character * @param[in] num_bytes Number of bytes in the input data * @param[in] num_bits Number of 'bits' in the input data. Each 'bit' is * processed by a separate CUDA thread * @param[in,out] num_records Device memory pointer to the number of found rows * @param[out] recStart device memory array containing the offset of each record * * @return void *---------------------------------------------------------------------------**/ __global__ void storeRecordStart(char *data, size_t chunk_offset, const char terminator, const char quotechar, long num_bytes, long num_bits, cu_reccnt_t* num_records, cu_recstart_t* recStart) { // thread IDs range per block, so also need the block id long tid = threadIdx.x + (blockDim.x * blockIdx.x); if ( tid >= num_bits) return; // data ID - multiple of 64 long did = tid * 64L; char *raw = (data + did); long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did); // process the data for (long x = 0; x < byteToProcess; x++) { // Scan and log records. If quotations are enabled, then also log quotes // for a postprocess ignore, as the chunk here has limited visibility. if ((raw[x] == terminator) || (quotechar != '\0' && raw[x] == quotechar)) { const auto pos = atomicAdd(num_records, 1ull); recStart[pos] = did + chunk_offset + x + 1; } else if (raw[x] == '\r' && (x+1L)<num_bytes && raw[x +1] == '\n') { x++; const auto pos = atomicAdd(num_records, 1ull); recStart[pos] = did + chunk_offset + x + 1; } } } //---------------------------------------------------------------------------------------------------------------- gdf_error launch_dataConvertColumns(raw_csv_t *raw_csv, void **gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes,string_pair **str_cols, unsigned long long *num_valid) { int blockSize; // suggested thread count to use int minGridSize; // minimum block count required CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, convertCsvToGdf) ); // Calculate actual block count to use based on records count int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize; parsing_opts_t opts; opts.delimiter = raw_csv->delimiter; opts.terminator = raw_csv->terminator; opts.quotechar = raw_csv->quotechar; opts.keepquotes = raw_csv->keepquotes; opts.decimal = raw_csv->decimal; opts.thousands = raw_csv->thousands; opts.trueValues = thrust::raw_pointer_cast(raw_csv->d_trueValues.data()); opts.trueValuesCount = raw_csv->d_trueValues.size(); opts.falseValues = thrust::raw_pointer_cast(raw_csv->d_falseValues.data()); opts.falseValuesCount = raw_csv->d_falseValues.size(); auto first_data_rec_start = raw_csv->recStart; if (raw_csv->header_row >= 0) { // skip the header row if present ++first_data_rec_start; } convertCsvToGdf <<< gridSize, blockSize >>>( raw_csv->data, opts, raw_csv->num_records, raw_csv->num_actual_cols, raw_csv->d_parseCol, first_data_rec_start, d_dtypes, gdf, valid, str_cols, raw_csv->dayfirst, num_valid ); CUDA_TRY( cudaGetLastError() ); return GDF_SUCCESS; } /* * Data is processed in one row\record at a time - so the number of total threads (tid) is equal to the number of rows. * */ __global__ void convertCsvToGdf( char *raw_csv, const parsing_opts_t opts, gdf_size_type num_records, int num_columns, bool *parseCol, cu_recstart_t *recStart, gdf_dtype *dtype, void **gdf_data, gdf_valid_type **valid, string_pair **str_cols, bool dayfirst, unsigned long long *num_valid ) { // thread IDs range per block, so also need the block id long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array // we can have more threads than data, make sure we are not past the end of the data if ( rec_id >= num_records) return; long start = recStart[rec_id]; long stop = recStart[rec_id + 1]; long pos = start; int col = 0; int actual_col = 0; int stringCol = 0; bool quotation = false; while(col<num_columns){ if(start>stop) break; while(true){ // Use simple logic to ignore control chars between any quote seq // Handles nominal cases including doublequotes within quotes, but // may not output exact failures as PANDAS for malformed fields if(raw_csv[pos] == opts.quotechar){ quotation = !quotation; } else if(quotation==false){ if(raw_csv[pos] == opts.delimiter){ break; } else if(raw_csv[pos] == opts.terminator){ break; } else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1] == '\n')){ stop--; break; } } if(pos>=stop) break; pos++; } if(parseCol[col]==true){ long tempPos=pos-1; // Modify start & end to ignore whitespace and quotechars if(dtype[actual_col] != gdf_dtype::GDF_CATEGORY && dtype[actual_col] != gdf_dtype::GDF_STRING){ adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos, opts.quotechar); } if(start<=(tempPos)) { // Empty strings are not legal values switch(dtype[actual_col]) { case gdf_dtype::GDF_INT8: { int8_t *gdf_out = (int8_t *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoInt<int8_t>(raw_csv, start, tempPos, opts.thousands); if(isBooleanValue(gdf_out[rec_id], opts.trueValues, opts.trueValuesCount)==true){ gdf_out[rec_id] = 1; }else if(isBooleanValue(gdf_out[rec_id], opts.falseValues, opts.falseValuesCount)==true){ gdf_out[rec_id] = 0; } } break; case gdf_dtype::GDF_INT16: { int16_t *gdf_out = (int16_t *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoInt<int16_t>(raw_csv, start, tempPos, opts.thousands); if(isBooleanValue(gdf_out[rec_id], opts.trueValues, opts.trueValuesCount)==true){ gdf_out[rec_id] = 1; }else if(isBooleanValue(gdf_out[rec_id], opts.falseValues, opts.falseValuesCount)==true){ gdf_out[rec_id] = 0; } } break; case gdf_dtype::GDF_INT32: { int32_t *gdf_out = (int32_t *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoInt<int32_t>(raw_csv, start, tempPos, opts.thousands); if(isBooleanValue(gdf_out[rec_id], opts.trueValues, opts.trueValuesCount)==true){ gdf_out[rec_id] = 1; }else if(isBooleanValue(gdf_out[rec_id], opts.falseValues, opts.falseValuesCount)==true){ gdf_out[rec_id] = 0; } } break; case gdf_dtype::GDF_INT64: { int64_t *gdf_out = (int64_t *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos, opts.thousands); if(isBooleanValue(gdf_out[rec_id], opts.trueValues, opts.trueValuesCount)==true){ gdf_out[rec_id] = 1; }else if(isBooleanValue(gdf_out[rec_id], opts.falseValues, opts.falseValuesCount)==true){ gdf_out[rec_id] = 0; } } break; case gdf_dtype::GDF_FLOAT32: { float *gdf_out = (float *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoFloat<float>(raw_csv, start, tempPos, opts.decimal, opts.thousands); } break; case gdf_dtype::GDF_FLOAT64: { double *gdf_out = (double *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoFloat<double>(raw_csv, start, tempPos, opts.decimal, opts.thousands); } break; case gdf_dtype::GDF_DATE32: { gdf_date32 *gdf_out = (gdf_date32 *)gdf_data[actual_col]; gdf_out[rec_id] = parseDateFormat(raw_csv, start, tempPos, dayfirst); } break; case gdf_dtype::GDF_DATE64: { gdf_date64 *gdf_out = (gdf_date64 *)gdf_data[actual_col]; gdf_out[rec_id] = parseDateTimeFormat(raw_csv, start, tempPos, dayfirst); } break; case gdf_dtype::GDF_TIMESTAMP: { int64_t *gdf_out = (int64_t *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos, opts.thousands); } break; case gdf_dtype::GDF_CATEGORY: { gdf_category *gdf_out = (gdf_category *)gdf_data[actual_col]; gdf_out[rec_id] = convertStrtoHash(raw_csv, start, pos, HASH_SEED); } break; case gdf_dtype::GDF_STRING: { long end = pos; if(opts.keepquotes==false){ if((raw_csv[start] == opts.quotechar) && (raw_csv[end-1] == opts.quotechar)){ start++; end--; } } str_cols[stringCol][rec_id].first = raw_csv+start; str_cols[stringCol][rec_id].second = size_t(end-start); stringCol++; } break; default: break; } // set the valid bitmap - all bits were set to 0 to start int bitmapIdx = whichBitmap(rec_id); // which bitmap int bitIdx = whichBit(rec_id); // which bit - over an 8-bit index setBit(valid[actual_col]+bitmapIdx, bitIdx); // This is done with atomics atomicAdd((unsigned long long int*)&num_valid[actual_col],(unsigned long long int)1); } else if(dtype[actual_col]==gdf_dtype::GDF_STRING){ str_cols[stringCol][rec_id].first = NULL; str_cols[stringCol][rec_id].second = 0; stringCol++; } actual_col++; } pos++; start=pos; col++; } } //---------------------------------------------------------------------------------------------------------------- gdf_error launch_dataTypeDetection( raw_csv_t * raw_csv, column_data_t* d_columnData) { int blockSize; // suggested thread count to use int minGridSize; // minimum block count required CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dataTypeDetection) ); // Calculate actual block count to use based on records count int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize; parsing_opts_t opts; opts.delimiter = raw_csv->delimiter; opts.terminator = raw_csv->terminator; opts.quotechar = raw_csv->quotechar; opts.keepquotes = raw_csv->keepquotes; opts.trueValues = thrust::raw_pointer_cast(raw_csv->d_trueValues.data()); opts.trueValuesCount = raw_csv->d_trueValues.size(); opts.falseValues = thrust::raw_pointer_cast(raw_csv->d_falseValues.data()); opts.falseValuesCount = raw_csv->d_falseValues.size(); auto first_data_rec_start = raw_csv->recStart; if (raw_csv->header_row >= 0) { // skip the header row if present ++first_data_rec_start; } dataTypeDetection <<< gridSize, blockSize >>>( raw_csv->data, opts, raw_csv->num_records, raw_csv->num_actual_cols, raw_csv->d_parseCol, first_data_rec_start, d_columnData ); CUDA_TRY( cudaGetLastError() ); return GDF_SUCCESS; } /* */ __global__ void dataTypeDetection( char *raw_csv, const parsing_opts_t opts, gdf_size_type num_records, int num_columns, bool *parseCol, cu_recstart_t *recStart, column_data_t* d_columnData ) { // thread IDs range per block, so also need the block id long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array // we can have more threads than data, make sure we are not past the end of the data if ( rec_id >= num_records) return; long start = recStart[rec_id]; long stop = recStart[rec_id + 1]; long pos = start; int col = 0; int actual_col = 0; bool quotation = false; // Going through all the columns of a given record while(col<num_columns){ if(start>stop) break; // Finding the breaking point for each column while(true){ // Use simple logic to ignore control chars between any quote seq // Handles nominal cases including doublequotes within quotes, but // may not output exact failures as PANDAS for malformed fields if(raw_csv[pos] == opts.quotechar){ quotation = !quotation; } else if(quotation==false){ if(raw_csv[pos] == opts.delimiter){ break; } else if(raw_csv[pos] == opts.terminator){ break; } else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1] == '\n')){ stop--; break; } } if(pos>=stop) break; pos++; } // Checking if this is a column that the user wants --- user can filter columns if(parseCol[col]==true){ long tempPos=pos-1; // Checking if the record is NULL if(start>(tempPos)){ atomicAdd(& d_columnData[actual_col].countNULL, 1L); pos++; start=pos; col++; actual_col++; continue; } long countNumber=0; long countDecimal=0; long countSlash=0; long countDash=0; long countColon=0; long countString=0; // Modify start & end to ignore whitespace and quotechars // This could possibly result in additional empty fields adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos); long strLen=tempPos-start+1; for(long startPos=start; startPos<=tempPos; startPos++){ if(raw_csv[startPos]>= '0' && raw_csv[startPos] <= '9'){ countNumber++; continue; } // Looking for unique characters that will help identify column types. switch (raw_csv[startPos]){ case '.': countDecimal++;break; case '-': countDash++; break; case '/': countSlash++;break; case ':': countColon++;break; default: countString++; break; } } if(strLen==0){ // Removed spaces ' ' in the pre-processing and thus we can have an empty string. atomicAdd(& d_columnData[actual_col].countNULL, 1L); } // Integers have to have the length of the string or can be off by one if they start with a minus sign else if(countNumber==(strLen) || ( strLen>1 && countNumber==(strLen-1) && raw_csv[start]=='-') ){ // Checking to see if we the integer value requires 8,16,32,64 bits. // This will allow us to allocate the exact amount of memory. int64_t value = convertStrtoInt<int64_t>(raw_csv, start, tempPos, opts.thousands); if (isBooleanValue<int32_t>(value, opts.trueValues, opts.trueValuesCount) || isBooleanValue<int32_t>(value, opts.falseValues, opts.falseValuesCount)){ atomicAdd(& d_columnData[actual_col].countInt8, 1L); } else if(value >= (1L<<31)){ atomicAdd(& d_columnData[actual_col].countInt64, 1L); } else if(value >= (1L<<15)){ atomicAdd(& d_columnData[actual_col].countInt32, 1L); } else if(value >= (1L<<7)){ atomicAdd(& d_columnData[actual_col].countInt16, 1L); } else{ atomicAdd(& d_columnData[actual_col].countInt8, 1L); } } // Floating point numbers are made up of numerical strings, have to have a decimal sign, and can have a minus sign. else if((countNumber==(strLen-1) && countDecimal==1) || (strLen>2 && countNumber==(strLen-2) && raw_csv[start]=='-')){ atomicAdd(& d_columnData[actual_col].countFloat, 1L); } // The date-time field cannot have more than 3 strings. As such if an entry has more than 3 string characters, it is not // a data-time field. Also, if a string has multiple decimals, then is not a legit number. else if(countString > 3 || countDecimal > 1){ atomicAdd(& d_columnData[actual_col].countString, 1L); } else { // A date field can have either one or two '-' or '\'. A legal combination will only have one of them. // To simplify the process of auto column detection, we are not covering all the date-time formation permutations. if((countDash>0 && countDash<=2 && countSlash==0)|| (countDash==0 && countSlash>0 && countSlash<=2) ){ if((countColon<=2)){ atomicAdd(& d_columnData[actual_col].countDateAndTime, 1L); } else{ atomicAdd(& d_columnData[actual_col].countString, 1L); } } // Default field is string type. else{ atomicAdd(& d_columnData[actual_col].countString, 1L); } } actual_col++; } pos++; start=pos; col++; } } //---------------------------------------------------------------------------------------------------------------- /* * Return which bit is set * x is the occurrence: 1 = first, 2 = seconds, ... */ __device__ int findSetBit(int tid, long num_bits, uint64_t *r_bits, int x) { int idx = tid; if ( x == 0 ) return -1; int withinBitCount = 0; int offset = 0; int found = 0; uint64_t bitmap = r_bits[idx]; while (found != x) { if(bitmap == 0) { idx++; if (idx >= num_bits) return -1; bitmap = r_bits[idx]; offset += 64; withinBitCount = 0; } if ( bitmap & 1 ) { found++; //found a set bit } bitmap >>= 1; ++withinBitCount; } offset += withinBitCount -1; return offset; }
69e61506c98cd9fb991d0a4a0231675a0fc39789.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapeFacetedSphere.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeFacetedSphere template hipError_t gpu_hpmc_free_volume<ShapeFacetedSphere>(const hpmc_free_volume_args_t &args, const typename ShapeFacetedSphere::param_type *d_params); template hipError_t gpu_hpmc_update<ShapeFacetedSphere>(const hpmc_args_t& args, const typename ShapeFacetedSphere::param_type *d_params); template hipError_t gpu_hpmc_implicit_count_overlaps<ShapeFacetedSphere>(const hpmc_implicit_args_t& args, const typename ShapeFacetedSphere::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject<ShapeFacetedSphere>(const hpmc_implicit_args_t& args, const typename ShapeFacetedSphere::param_type *d_params); template hipError_t gpu_hpmc_insert_depletants_queue<ShapeFacetedSphere>(const hpmc_implicit_args_new_t& args, const typename ShapeFacetedSphere::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapeFacetedSphere>(const hpmc_implicit_args_new_t& args, const typename ShapeFacetedSphere::param_type *d_params); }; // end namespace detail } // end namespace hpmc
69e61506c98cd9fb991d0a4a0231675a0fc39789.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapeFacetedSphere.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeFacetedSphere template cudaError_t gpu_hpmc_free_volume<ShapeFacetedSphere>(const hpmc_free_volume_args_t &args, const typename ShapeFacetedSphere::param_type *d_params); template cudaError_t gpu_hpmc_update<ShapeFacetedSphere>(const hpmc_args_t& args, const typename ShapeFacetedSphere::param_type *d_params); template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapeFacetedSphere>(const hpmc_implicit_args_t& args, const typename ShapeFacetedSphere::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeFacetedSphere>(const hpmc_implicit_args_t& args, const typename ShapeFacetedSphere::param_type *d_params); template cudaError_t gpu_hpmc_insert_depletants_queue<ShapeFacetedSphere>(const hpmc_implicit_args_new_t& args, const typename ShapeFacetedSphere::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapeFacetedSphere>(const hpmc_implicit_args_new_t& args, const typename ShapeFacetedSphere::param_type *d_params); }; // end namespace detail } // end namespace hpmc
a2a4469a668437b87649f2996976feae104b2b4f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "analysis.h" int arraySum(int* array, int count) { int sum = 0; for (int x = 0; x < count; x++) { sum += array[x]; } return sum; } __global__ void arrayPartialSum(int* partial, int* array, int count) { if (blockIdx.x * blockDim.x + threadIdx.x >= count) return; int* local_array = array + (blockIdx.x * blockDim.x); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { local_array[threadIdx.x] += local_array[threadIdx.x + stride]; } __syncthreads(); } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } __global__ void arrayPartialSumUnrolled2(int* partial, int* array, int count) { int idx = (2 * blockIdx.x * blockDim.x) + threadIdx.x; if (idx + blockDim.x < count) { array[idx] += array[idx + blockDim.x]; } __syncthreads(); int* local_array = array + (2 * blockIdx.x * blockDim.x); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { local_array[threadIdx.x] += local_array[threadIdx.x + stride]; } __syncthreads(); } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } __global__ void arrayPartialSumUnrolled8(int* partial, int* array, int count) { int idx = (8 * blockIdx.x * blockDim.x) + threadIdx.x; if (idx + (7 * blockDim.x) < count) { array[idx] += array[idx + blockDim.x]; array[idx] += array[idx + (2 * blockDim.x)]; array[idx] += array[idx + (3 * blockDim.x)]; array[idx] += array[idx + (4 * blockDim.x)]; array[idx] += array[idx + (5 * blockDim.x)]; array[idx] += array[idx + (6 * blockDim.x)]; array[idx] += array[idx + (7 * blockDim.x)]; } __syncthreads(); int* local_array = array + (8 * blockIdx.x * blockDim.x); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { local_array[threadIdx.x] += local_array[threadIdx.x + stride]; } __syncthreads(); } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } __global__ void arrayPartialSumUnrolledWarp8(int* partial, int* array, int count) { int idx = (8 * blockIdx.x * blockDim.x) + threadIdx.x; if (idx + (7 * blockDim.x) < count) { array[idx] += array[idx + blockDim.x]; array[idx] += array[idx + (2 * blockDim.x)]; array[idx] += array[idx + (3 * blockDim.x)]; array[idx] += array[idx + (4 * blockDim.x)]; array[idx] += array[idx + (5 * blockDim.x)]; array[idx] += array[idx + (6 * blockDim.x)]; array[idx] += array[idx + (7 * blockDim.x)]; } __syncthreads(); int* local_array = array + (8 * blockIdx.x * blockDim.x); for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (threadIdx.x < stride) { local_array[threadIdx.x] += local_array[threadIdx.x + stride]; } __syncthreads(); } if (threadIdx.x < 32) { volatile int* v_array = local_array; v_array[threadIdx.x] += v_array[threadIdx.x + 32]; v_array[threadIdx.x] += v_array[threadIdx.x + 16]; v_array[threadIdx.x] += v_array[threadIdx.x + 8]; v_array[threadIdx.x] += v_array[threadIdx.x + 4]; v_array[threadIdx.x] += v_array[threadIdx.x + 2]; v_array[threadIdx.x] += v_array[threadIdx.x + 1]; } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } __global__ void arrayPartialSumCompleteUnrolledWarp8(int* partial, int* array, int count) { int idx = (8 * blockIdx.x * blockDim.x) + threadIdx.x; if (idx + (7 * blockDim.x) < count) { array[idx] += array[idx + blockDim.x]; array[idx] += array[idx + (2 * blockDim.x)]; array[idx] += array[idx + (3 * blockDim.x)]; array[idx] += array[idx + (4 * blockDim.x)]; array[idx] += array[idx + (5 * blockDim.x)]; array[idx] += array[idx + (6 * blockDim.x)]; array[idx] += array[idx + (7 * blockDim.x)]; } __syncthreads(); int* local_array = array + (8 * blockIdx.x * blockDim.x); if (blockDim.x >= 1024 && threadIdx.x < 512) { local_array[threadIdx.x] += local_array[threadIdx.x + 512]; } if (blockDim.x >= 512 && threadIdx.x < 256) { local_array[threadIdx.x] += local_array[threadIdx.x + 256]; } if (blockDim.x >= 256 && threadIdx.x < 128) { local_array[threadIdx.x] += local_array[threadIdx.x + 128]; } if (blockDim.x >= 128 && threadIdx.x < 64) { local_array[threadIdx.x] += local_array[threadIdx.x + 64]; } if (threadIdx.x < 32) { volatile int* v_array = local_array; v_array[threadIdx.x] += v_array[threadIdx.x + 32]; v_array[threadIdx.x] += v_array[threadIdx.x + 16]; v_array[threadIdx.x] += v_array[threadIdx.x + 8]; v_array[threadIdx.x] += v_array[threadIdx.x + 4]; v_array[threadIdx.x] += v_array[threadIdx.x + 2]; v_array[threadIdx.x] += v_array[threadIdx.x + 1]; } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } template <unsigned int blockSize> __global__ void arrayPartialSumTemplateCompleteUnrolledWarp8(int* partial, int* array, int count) { int idx = (8 * blockIdx.x * blockDim.x) + threadIdx.x; if (idx + (7 * blockDim.x) < count) { array[idx] += array[idx + blockDim.x]; array[idx] += array[idx + (2 * blockDim.x)]; array[idx] += array[idx + (3 * blockDim.x)]; array[idx] += array[idx + (4 * blockDim.x)]; array[idx] += array[idx + (5 * blockDim.x)]; array[idx] += array[idx + (6 * blockDim.x)]; array[idx] += array[idx + (7 * blockDim.x)]; } __syncthreads(); int* local_array = array + (8 * blockIdx.x * blockDim.x); if (blockSize >= 1024 && threadIdx.x < 512) { local_array[threadIdx.x] += local_array[threadIdx.x + 512]; } if (blockSize >= 512 && threadIdx.x < 256) { local_array[threadIdx.x] += local_array[threadIdx.x + 256]; } if (blockSize >= 256 && threadIdx.x < 128) { local_array[threadIdx.x] += local_array[threadIdx.x + 128]; } if (blockSize >= 128 && threadIdx.x < 64) { local_array[threadIdx.x] += local_array[threadIdx.x + 64]; } if (threadIdx.x < 32) { volatile int* v_array = local_array; v_array[threadIdx.x] += v_array[threadIdx.x + 32]; v_array[threadIdx.x] += v_array[threadIdx.x + 16]; v_array[threadIdx.x] += v_array[threadIdx.x + 8]; v_array[threadIdx.x] += v_array[threadIdx.x + 4]; v_array[threadIdx.x] += v_array[threadIdx.x + 2]; v_array[threadIdx.x] += v_array[threadIdx.x + 1]; } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } int callArrayPartialSumKernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; hipMalloc((int**)&device_array, count*sizeof(int)); hipMemcpy(device_array, host_array, count*sizeof(int), hipMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; hipMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); hipLaunchKernelGGL(( arrayPartialSum), dim3(grid),dim3(block), 0, 0, device_partial, device_array, count); hipDeviceSynchronize(); Analysis::end(1); hipMemcpy(host_partial, device_partial, grid.x*sizeof(int), hipMemcpyDeviceToHost); hipFree(device_array); hipFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); hipDeviceReset(); return sum; } int callArrayPartialSumUnrolled2Kernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; hipMalloc((int**)&device_array, count*sizeof(int)); hipMemcpy(device_array, host_array, count*sizeof(int), hipMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; hipMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); hipLaunchKernelGGL(( arrayPartialSumUnrolled2), dim3(grid.x / 2),dim3(block), 0, 0, device_partial, device_array, count); hipDeviceSynchronize(); Analysis::end(2); hipMemcpy(host_partial, device_partial, grid.x*sizeof(int), hipMemcpyDeviceToHost); hipFree(device_array); hipFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); hipDeviceReset(); return sum; } int callArrayPartialSumUnrolled8Kernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; hipMalloc((int**)&device_array, count*sizeof(int)); hipMemcpy(device_array, host_array, count*sizeof(int), hipMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; hipMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); hipLaunchKernelGGL(( arrayPartialSumUnrolled8), dim3(grid.x / 8),dim3(block), 0, 0, device_partial, device_array, count); hipDeviceSynchronize(); Analysis::end(3); hipMemcpy(host_partial, device_partial, grid.x*sizeof(int), hipMemcpyDeviceToHost); hipFree(device_array); hipFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); hipDeviceReset(); return sum; } int callArrayPartialSumUnrolledWarp8Kernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; hipMalloc((int**)&device_array, count*sizeof(int)); hipMemcpy(device_array, host_array, count*sizeof(int), hipMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; hipMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); hipLaunchKernelGGL(( arrayPartialSumUnrolledWarp8), dim3(grid.x / 8),dim3(block), 0, 0, device_partial, device_array, count); hipDeviceSynchronize(); Analysis::end(4); hipMemcpy(host_partial, device_partial, grid.x*sizeof(int), hipMemcpyDeviceToHost); hipFree(device_array); hipFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); hipDeviceReset(); return sum; } int callArrayPartialSumCompleteUnrolledWarp8Kernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; hipMalloc((int**)&device_array, count*sizeof(int)); hipMemcpy(device_array, host_array, count*sizeof(int), hipMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; hipMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); hipLaunchKernelGGL(( arrayPartialSumCompleteUnrolledWarp8), dim3(grid.x / 8),dim3(block), 0, 0, device_partial, device_array, count); hipDeviceSynchronize(); Analysis::end(5); hipMemcpy(host_partial, device_partial, grid.x*sizeof(int), hipMemcpyDeviceToHost); hipFree(device_array); hipFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); hipDeviceReset(); return sum; } int callArrayPartialSumTemplateCompleteUnrolledWarp8Kernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; hipMalloc((int**)&device_array, count*sizeof(int)); hipMemcpy(device_array, host_array, count*sizeof(int), hipMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; hipMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); switch (block.x) { case 1024: hipLaunchKernelGGL(( arrayPartialSumTemplateCompleteUnrolledWarp8<1024>), dim3(grid.x / 8),dim3(block), 0, 0, device_partial, device_array, count); break; case 512: hipLaunchKernelGGL(( arrayPartialSumTemplateCompleteUnrolledWarp8<512>), dim3(grid.x / 8),dim3(block), 0, 0, device_partial, device_array, count); break; case 256: hipLaunchKernelGGL(( arrayPartialSumTemplateCompleteUnrolledWarp8<256>), dim3(grid.x / 8),dim3(block), 0, 0, device_partial, device_array, count); break; case 128: hipLaunchKernelGGL(( arrayPartialSumTemplateCompleteUnrolledWarp8<128>), dim3(grid.x / 8),dim3(block), 0, 0, device_partial, device_array, count); break; case 64: hipLaunchKernelGGL(( arrayPartialSumTemplateCompleteUnrolledWarp8<64>), dim3(grid.x / 8),dim3(block), 0, 0, device_partial, device_array, count); break; } hipDeviceSynchronize(); Analysis::end(6); hipMemcpy(host_partial, device_partial, grid.x*sizeof(int), hipMemcpyDeviceToHost); hipFree(device_array); hipFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); hipDeviceReset(); return sum; } int main(void) { int input = 1 << 16; int* host_array = (int*)malloc(input*sizeof(int)); for (int x = 0; x < input; x++) { host_array[x] = x; } Analysis::setAbsoluteStart(); Analysis::createLabel(0, "arraySum (CPU)"); Analysis::createLabel(1, "arrayPartialSum"); Analysis::createLabel(2, "arrayPartialSumUnrolled2"); Analysis::createLabel(3, "arrayPartialSumUnrolled8"); Analysis::createLabel(4, "arrayPartialSumUnrolledWarp8Kernel"); Analysis::createLabel(5, "arrayPartialSumCompleteUnrolledWarp8Kernel"); Analysis::createLabel(6, "arrayPartialSumTemplateCompleteUnrolledWarp8Kernel"); hipDeviceReset(); Analysis::begin(); printf("\n%-60s %d\n", "arraySum (CPU):", arraySum(host_array, input)); Analysis::end(0); printf("%-60s %d\n", "arrayPartialSum:", callArrayPartialSumKernel(host_array, input)); printf("%-60s %d\n", "arrayPartialSumUnrolled2:", callArrayPartialSumUnrolled2Kernel(host_array, input)); printf("%-60s %d\n", "arrayPartialSumUnrolled8:", callArrayPartialSumUnrolled8Kernel(host_array, input)); printf("%-60s %d\n", "arrayPartialSumUnrolledWarp8Kernel:", callArrayPartialSumUnrolledWarp8Kernel(host_array, input)); printf("%-60s %d\n", "arrayPartialSumCompleteUnrolledWarp8Kernel:", callArrayPartialSumCompleteUnrolledWarp8Kernel(host_array, input)); printf("%-60s %d\n", "arrayPartialSumTemplateCompleteUnrolledWarp8Kernel:", callArrayPartialSumTemplateCompleteUnrolledWarp8Kernel(host_array, input)); Analysis::printAll(); return 0; }
a2a4469a668437b87649f2996976feae104b2b4f.cu
#include <stdio.h> #include "analysis.h" int arraySum(int* array, int count) { int sum = 0; for (int x = 0; x < count; x++) { sum += array[x]; } return sum; } __global__ void arrayPartialSum(int* partial, int* array, int count) { if (blockIdx.x * blockDim.x + threadIdx.x >= count) return; int* local_array = array + (blockIdx.x * blockDim.x); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { local_array[threadIdx.x] += local_array[threadIdx.x + stride]; } __syncthreads(); } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } __global__ void arrayPartialSumUnrolled2(int* partial, int* array, int count) { int idx = (2 * blockIdx.x * blockDim.x) + threadIdx.x; if (idx + blockDim.x < count) { array[idx] += array[idx + blockDim.x]; } __syncthreads(); int* local_array = array + (2 * blockIdx.x * blockDim.x); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { local_array[threadIdx.x] += local_array[threadIdx.x + stride]; } __syncthreads(); } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } __global__ void arrayPartialSumUnrolled8(int* partial, int* array, int count) { int idx = (8 * blockIdx.x * blockDim.x) + threadIdx.x; if (idx + (7 * blockDim.x) < count) { array[idx] += array[idx + blockDim.x]; array[idx] += array[idx + (2 * blockDim.x)]; array[idx] += array[idx + (3 * blockDim.x)]; array[idx] += array[idx + (4 * blockDim.x)]; array[idx] += array[idx + (5 * blockDim.x)]; array[idx] += array[idx + (6 * blockDim.x)]; array[idx] += array[idx + (7 * blockDim.x)]; } __syncthreads(); int* local_array = array + (8 * blockIdx.x * blockDim.x); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { local_array[threadIdx.x] += local_array[threadIdx.x + stride]; } __syncthreads(); } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } __global__ void arrayPartialSumUnrolledWarp8(int* partial, int* array, int count) { int idx = (8 * blockIdx.x * blockDim.x) + threadIdx.x; if (idx + (7 * blockDim.x) < count) { array[idx] += array[idx + blockDim.x]; array[idx] += array[idx + (2 * blockDim.x)]; array[idx] += array[idx + (3 * blockDim.x)]; array[idx] += array[idx + (4 * blockDim.x)]; array[idx] += array[idx + (5 * blockDim.x)]; array[idx] += array[idx + (6 * blockDim.x)]; array[idx] += array[idx + (7 * blockDim.x)]; } __syncthreads(); int* local_array = array + (8 * blockIdx.x * blockDim.x); for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (threadIdx.x < stride) { local_array[threadIdx.x] += local_array[threadIdx.x + stride]; } __syncthreads(); } if (threadIdx.x < 32) { volatile int* v_array = local_array; v_array[threadIdx.x] += v_array[threadIdx.x + 32]; v_array[threadIdx.x] += v_array[threadIdx.x + 16]; v_array[threadIdx.x] += v_array[threadIdx.x + 8]; v_array[threadIdx.x] += v_array[threadIdx.x + 4]; v_array[threadIdx.x] += v_array[threadIdx.x + 2]; v_array[threadIdx.x] += v_array[threadIdx.x + 1]; } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } __global__ void arrayPartialSumCompleteUnrolledWarp8(int* partial, int* array, int count) { int idx = (8 * blockIdx.x * blockDim.x) + threadIdx.x; if (idx + (7 * blockDim.x) < count) { array[idx] += array[idx + blockDim.x]; array[idx] += array[idx + (2 * blockDim.x)]; array[idx] += array[idx + (3 * blockDim.x)]; array[idx] += array[idx + (4 * blockDim.x)]; array[idx] += array[idx + (5 * blockDim.x)]; array[idx] += array[idx + (6 * blockDim.x)]; array[idx] += array[idx + (7 * blockDim.x)]; } __syncthreads(); int* local_array = array + (8 * blockIdx.x * blockDim.x); if (blockDim.x >= 1024 && threadIdx.x < 512) { local_array[threadIdx.x] += local_array[threadIdx.x + 512]; } if (blockDim.x >= 512 && threadIdx.x < 256) { local_array[threadIdx.x] += local_array[threadIdx.x + 256]; } if (blockDim.x >= 256 && threadIdx.x < 128) { local_array[threadIdx.x] += local_array[threadIdx.x + 128]; } if (blockDim.x >= 128 && threadIdx.x < 64) { local_array[threadIdx.x] += local_array[threadIdx.x + 64]; } if (threadIdx.x < 32) { volatile int* v_array = local_array; v_array[threadIdx.x] += v_array[threadIdx.x + 32]; v_array[threadIdx.x] += v_array[threadIdx.x + 16]; v_array[threadIdx.x] += v_array[threadIdx.x + 8]; v_array[threadIdx.x] += v_array[threadIdx.x + 4]; v_array[threadIdx.x] += v_array[threadIdx.x + 2]; v_array[threadIdx.x] += v_array[threadIdx.x + 1]; } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } template <unsigned int blockSize> __global__ void arrayPartialSumTemplateCompleteUnrolledWarp8(int* partial, int* array, int count) { int idx = (8 * blockIdx.x * blockDim.x) + threadIdx.x; if (idx + (7 * blockDim.x) < count) { array[idx] += array[idx + blockDim.x]; array[idx] += array[idx + (2 * blockDim.x)]; array[idx] += array[idx + (3 * blockDim.x)]; array[idx] += array[idx + (4 * blockDim.x)]; array[idx] += array[idx + (5 * blockDim.x)]; array[idx] += array[idx + (6 * blockDim.x)]; array[idx] += array[idx + (7 * blockDim.x)]; } __syncthreads(); int* local_array = array + (8 * blockIdx.x * blockDim.x); if (blockSize >= 1024 && threadIdx.x < 512) { local_array[threadIdx.x] += local_array[threadIdx.x + 512]; } if (blockSize >= 512 && threadIdx.x < 256) { local_array[threadIdx.x] += local_array[threadIdx.x + 256]; } if (blockSize >= 256 && threadIdx.x < 128) { local_array[threadIdx.x] += local_array[threadIdx.x + 128]; } if (blockSize >= 128 && threadIdx.x < 64) { local_array[threadIdx.x] += local_array[threadIdx.x + 64]; } if (threadIdx.x < 32) { volatile int* v_array = local_array; v_array[threadIdx.x] += v_array[threadIdx.x + 32]; v_array[threadIdx.x] += v_array[threadIdx.x + 16]; v_array[threadIdx.x] += v_array[threadIdx.x + 8]; v_array[threadIdx.x] += v_array[threadIdx.x + 4]; v_array[threadIdx.x] += v_array[threadIdx.x + 2]; v_array[threadIdx.x] += v_array[threadIdx.x + 1]; } if (threadIdx.x == 0) { partial[blockIdx.x] = local_array[0]; } } int callArrayPartialSumKernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; cudaMalloc((int**)&device_array, count*sizeof(int)); cudaMemcpy(device_array, host_array, count*sizeof(int), cudaMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; cudaMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); arrayPartialSum<<<grid,block>>>(device_partial, device_array, count); cudaDeviceSynchronize(); Analysis::end(1); cudaMemcpy(host_partial, device_partial, grid.x*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(device_array); cudaFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); cudaDeviceReset(); return sum; } int callArrayPartialSumUnrolled2Kernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; cudaMalloc((int**)&device_array, count*sizeof(int)); cudaMemcpy(device_array, host_array, count*sizeof(int), cudaMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; cudaMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); arrayPartialSumUnrolled2<<<grid.x / 2,block>>>(device_partial, device_array, count); cudaDeviceSynchronize(); Analysis::end(2); cudaMemcpy(host_partial, device_partial, grid.x*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(device_array); cudaFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); cudaDeviceReset(); return sum; } int callArrayPartialSumUnrolled8Kernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; cudaMalloc((int**)&device_array, count*sizeof(int)); cudaMemcpy(device_array, host_array, count*sizeof(int), cudaMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; cudaMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); arrayPartialSumUnrolled8<<<grid.x / 8,block>>>(device_partial, device_array, count); cudaDeviceSynchronize(); Analysis::end(3); cudaMemcpy(host_partial, device_partial, grid.x*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(device_array); cudaFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); cudaDeviceReset(); return sum; } int callArrayPartialSumUnrolledWarp8Kernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; cudaMalloc((int**)&device_array, count*sizeof(int)); cudaMemcpy(device_array, host_array, count*sizeof(int), cudaMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; cudaMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); arrayPartialSumUnrolledWarp8<<<grid.x / 8,block>>>(device_partial, device_array, count); cudaDeviceSynchronize(); Analysis::end(4); cudaMemcpy(host_partial, device_partial, grid.x*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(device_array); cudaFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); cudaDeviceReset(); return sum; } int callArrayPartialSumCompleteUnrolledWarp8Kernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; cudaMalloc((int**)&device_array, count*sizeof(int)); cudaMemcpy(device_array, host_array, count*sizeof(int), cudaMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; cudaMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); arrayPartialSumCompleteUnrolledWarp8<<<grid.x / 8,block>>>(device_partial, device_array, count); cudaDeviceSynchronize(); Analysis::end(5); cudaMemcpy(host_partial, device_partial, grid.x*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(device_array); cudaFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); cudaDeviceReset(); return sum; } int callArrayPartialSumTemplateCompleteUnrolledWarp8Kernel(int* host_array, int count) { dim3 block = (64); dim3 grid = ((count + block.x - 1) / block.x); int* device_array; cudaMalloc((int**)&device_array, count*sizeof(int)); cudaMemcpy(device_array, host_array, count*sizeof(int), cudaMemcpyHostToDevice); int* host_partial = (int*)malloc(grid.x*sizeof(int)); int* device_partial; cudaMalloc((int**)&device_partial, grid.x*sizeof(int)); Analysis::begin(); switch (block.x) { case 1024: arrayPartialSumTemplateCompleteUnrolledWarp8<1024><<<grid.x / 8,block>>>(device_partial, device_array, count); break; case 512: arrayPartialSumTemplateCompleteUnrolledWarp8<512><<<grid.x / 8,block>>>(device_partial, device_array, count); break; case 256: arrayPartialSumTemplateCompleteUnrolledWarp8<256><<<grid.x / 8,block>>>(device_partial, device_array, count); break; case 128: arrayPartialSumTemplateCompleteUnrolledWarp8<128><<<grid.x / 8,block>>>(device_partial, device_array, count); break; case 64: arrayPartialSumTemplateCompleteUnrolledWarp8<64><<<grid.x / 8,block>>>(device_partial, device_array, count); break; } cudaDeviceSynchronize(); Analysis::end(6); cudaMemcpy(host_partial, device_partial, grid.x*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(device_array); cudaFree(device_partial); int sum = 0; for (int x = 0; x < grid.x; x++) { sum += host_partial[x]; } free(host_partial); cudaDeviceReset(); return sum; } int main(void) { int input = 1 << 16; int* host_array = (int*)malloc(input*sizeof(int)); for (int x = 0; x < input; x++) { host_array[x] = x; } Analysis::setAbsoluteStart(); Analysis::createLabel(0, "arraySum (CPU)"); Analysis::createLabel(1, "arrayPartialSum"); Analysis::createLabel(2, "arrayPartialSumUnrolled2"); Analysis::createLabel(3, "arrayPartialSumUnrolled8"); Analysis::createLabel(4, "arrayPartialSumUnrolledWarp8Kernel"); Analysis::createLabel(5, "arrayPartialSumCompleteUnrolledWarp8Kernel"); Analysis::createLabel(6, "arrayPartialSumTemplateCompleteUnrolledWarp8Kernel"); cudaDeviceReset(); Analysis::begin(); printf("\n%-60s %d\n", "arraySum (CPU):", arraySum(host_array, input)); Analysis::end(0); printf("%-60s %d\n", "arrayPartialSum:", callArrayPartialSumKernel(host_array, input)); printf("%-60s %d\n", "arrayPartialSumUnrolled2:", callArrayPartialSumUnrolled2Kernel(host_array, input)); printf("%-60s %d\n", "arrayPartialSumUnrolled8:", callArrayPartialSumUnrolled8Kernel(host_array, input)); printf("%-60s %d\n", "arrayPartialSumUnrolledWarp8Kernel:", callArrayPartialSumUnrolledWarp8Kernel(host_array, input)); printf("%-60s %d\n", "arrayPartialSumCompleteUnrolledWarp8Kernel:", callArrayPartialSumCompleteUnrolledWarp8Kernel(host_array, input)); printf("%-60s %d\n", "arrayPartialSumTemplateCompleteUnrolledWarp8Kernel:", callArrayPartialSumTemplateCompleteUnrolledWarp8Kernel(host_array, input)); Analysis::printAll(); return 0; }
05f950e36020f2663d679dafa6fcf353e2827892.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ========================================================================== // $Id$ // ========================================================================== // (C)opyright: 2009 // // Ulm University // // Creator: Hendrik Lensch, Holger Dammertz // Email: hendrik.lensch@uni-ulm.de, holger.dammertz@uni-ulm.de // ========================================================================== // $Log$ // ========================================================================== #include <stdio.h> #include <algorithm> #include <hip/hip_vector_types.h> #include <sys/time.h> using namespace std; #define MAX_BLOCKS 256 #define MAX_THREADS 256 #define maxThreadsPerBlock 256 inline __int64_t continuousTimeNs() { timespec now; clock_gettime(CLOCK_REALTIME, &now); __int64_t result = (__int64_t ) now.tv_sec * 1000000000 + (__int64_t ) now.tv_nsec; return result; } __global__ void dotProdKernel(float *dst, const float* a1, const float* a2, int dim) { // Number of the current thread unsigned int threadNo = blockDim.x * blockIdx.x + threadIdx.x; // Number of all threads unsigned int threadSize = gridDim.x * blockDim.x; // Sum up every (threadSize)th element starting at the threads index and ending before dim float result = 0.0f; for (unsigned int t = threadNo; t < dim; t += threadSize) result += a1[t] * a2[t]; // Write the result to dst[threadIdx] if it can contain something dst[threadNo] = result; } // !!! missing !!! // Kernel for reducing gridDim.x*blockDim.x elements to gridDim.x elements /* This program sets up two large arrays of size dim and computes the dot product of both arrays. Most of the code of previous exercises is reused. Mode 0 of the program computes the final dot product as before. Mode 1: After computing the dot product and storing the result for all MAX_BLOCKS * MAX_THREAD threads, this time, the reduction of the sum is to be computed on the GPU. Write a reduction sum kernel which is called log(n) times. The number of total threads will be divided by nThreads(iter-1) in each iteration. Inside the kernel, the problem will be reduced by a factor of 2 in each step. */ __global__ void reduceSumKernel(float *d_out, const float* d_in) { extern __shared__ float sdata[]; // Number of the current thread int myID = threadIdx.x + blockIdx.x * blockDim.x; int tid =threadIdx.x; sdata[tid] = d_in[myID]; __syncthreads(); for(int stride =blockDim.x/2; stride>=1; stride >>=1){ if (tid<stride){ sdata[tid] += sdata[tid+stride]; } __syncthreads(); } if(tid==0){ d_out[blockIdx.x] = sdata[0]; } } int main(int argc, char* argv[]) { // parse command line int acount = 1; if (argc < 3) { printf( "usage: testDotProductStreams <dim> <reduction mode [gold:0, CPU:1, GPU:2]>\n"); exit(1); } // number of elements in both vectors int dim = atoi(argv[acount++]); int mode = atoi(argv[acount++]); printf("dim: %d\n", dim); // Allocate only pagelocked memory for simplicity float* cpuArray1; float* cpuArray2; float* cpuResult; float* cpuTemp; hipHostMalloc((void**) &cpuTemp, sizeof(float)); hipHostMalloc((void**) &cpuArray1, dim * sizeof(float)); hipHostMalloc((void**) &cpuArray2, dim * sizeof(float)); hipHostMalloc((void**) &cpuResult, MAX_THREADS * MAX_BLOCKS * sizeof(float)); // initialize the two arrays for (int i = 0; i < dim; ++i) { #ifdef RTEST cpuArray1[i] = drand48(); cpuArray2[i] = drand48(); #else cpuArray1[i] = 1.0; cpuArray2[i] = 1.; // i % 10; #endif } // Allocate GPU memory float* gpuArray1; float* gpuArray2; float* gpuResult1; // Two result arrays to be able to move data from one to the other during reduction float* gpuResult2; float* d_temp; hipMalloc((void**) &d_temp, sizeof(float)); hipMalloc((void**) &gpuArray1, dim * sizeof(float)); hipMalloc((void**) &gpuArray2, dim * sizeof(float)); hipMalloc((void**) &gpuResult1, MAX_BLOCKS * MAX_THREADS * sizeof(float)); hipMalloc((void**) &gpuResult2, MAX_BLOCKS * MAX_THREADS * sizeof(float)); // MAX_BLOCKS elements would be sufficient here... // Upload input data hipMemcpy(gpuArray1, cpuArray1, dim * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(gpuArray2, cpuArray2, dim * sizeof(float), hipMemcpyHostToDevice); // Variable for output double finalDotProduct = 0.; __int64_t startTime = continuousTimeNs(); // Iterations for benchmarking only the kernel call for (int iter = 0; iter < 1000; ++iter) { // a simplistic way of splitting the problem into threads dim3 blockGrid(MAX_BLOCKS); dim3 threadBlock(MAX_THREADS); unsigned int expectedResultSize; switch (mode) { case 0: finalDotProduct = 0.0; for (unsigned int i = 0; i < dim; i++) finalDotProduct += cpuArray1[i] * cpuArray2[i]; break; case 1: // call the dot kernel hipLaunchKernelGGL(( dotProdKernel), dim3(blockGrid), dim3(threadBlock), 0, 0, gpuResult1, gpuArray1, gpuArray2, dim); // If dim < launchedThreads, only the first dim elements will contain data expectedResultSize = min(dim, MAX_THREADS * MAX_BLOCKS); // download and combine the results of multiple threads hipMemcpy(cpuResult, gpuResult1, expectedResultSize * sizeof(float), hipMemcpyDeviceToHost); finalDotProduct = 0.; // accumulate the final result on the host for (int i = 0; i < expectedResultSize; ++i) finalDotProduct += cpuResult[i]; break; case 2: // call the dot kernel, store result in gpuResult1 hipLaunchKernelGGL(( dotProdKernel), dim3(blockGrid), dim3(threadBlock), 0, 0, gpuResult1, gpuArray1, gpuArray2, dim); // !!! missing !!! // Reduce all the dot product summands to one single value, // download it to a float and use it to set finalDotProduct. int blocks; blocks = (dim+ MAX_THREADS-1) / MAX_THREADS; if(blocks>1){ hipLaunchKernelGGL(( reduceSumKernel), dim3(blocks),dim3(MAX_THREADS),MAX_THREADS*sizeof(float), 0, gpuResult2,gpuResult1); hipLaunchKernelGGL(( reduceSumKernel), dim3(1), dim3(MAX_THREADS),MAX_THREADS*sizeof(float), 0, d_temp, gpuResult2); } else{ hipLaunchKernelGGL(( reduceSumKernel), dim3(1), dim3(MAX_THREADS),MAX_THREADS*sizeof(float), 0, d_temp, gpuResult1); } hipMemcpy(cpuTemp, d_temp, sizeof(float), hipMemcpyDeviceToHost); finalDotProduct = cpuTemp[0]; break; } // end switch } __int64_t endTime = continuousTimeNs(); __int64_t runTime = endTime - startTime; // Print results and timing printf("Result: %f\n", finalDotProduct); printf("Time: %f\n", (float) runTime / 1000000000.0f); // cleanup GPU memory hipFree(gpuResult1); hipFree(gpuResult2); hipFree(gpuArray2); hipFree(gpuArray1); hipFree(d_temp); // free page locked memory hipHostFree(cpuArray1); hipHostFree(cpuArray2); hipHostFree(cpuResult); hipHostFree(cpuTemp); printf("done\n"); }
05f950e36020f2663d679dafa6fcf353e2827892.cu
// ========================================================================== // $Id$ // ========================================================================== // (C)opyright: 2009 // // Ulm University // // Creator: Hendrik Lensch, Holger Dammertz // Email: hendrik.lensch@uni-ulm.de, holger.dammertz@uni-ulm.de // ========================================================================== // $Log$ // ========================================================================== #include <stdio.h> #include <algorithm> #include <vector_types.h> #include <sys/time.h> using namespace std; #define MAX_BLOCKS 256 #define MAX_THREADS 256 #define maxThreadsPerBlock 256 inline __int64_t continuousTimeNs() { timespec now; clock_gettime(CLOCK_REALTIME, &now); __int64_t result = (__int64_t ) now.tv_sec * 1000000000 + (__int64_t ) now.tv_nsec; return result; } __global__ void dotProdKernel(float *dst, const float* a1, const float* a2, int dim) { // Number of the current thread unsigned int threadNo = blockDim.x * blockIdx.x + threadIdx.x; // Number of all threads unsigned int threadSize = gridDim.x * blockDim.x; // Sum up every (threadSize)th element starting at the threads index and ending before dim float result = 0.0f; for (unsigned int t = threadNo; t < dim; t += threadSize) result += a1[t] * a2[t]; // Write the result to dst[threadIdx] if it can contain something dst[threadNo] = result; } // !!! missing !!! // Kernel for reducing gridDim.x*blockDim.x elements to gridDim.x elements /* This program sets up two large arrays of size dim and computes the dot product of both arrays. Most of the code of previous exercises is reused. Mode 0 of the program computes the final dot product as before. Mode 1: After computing the dot product and storing the result for all MAX_BLOCKS * MAX_THREAD threads, this time, the reduction of the sum is to be computed on the GPU. Write a reduction sum kernel which is called log(n) times. The number of total threads will be divided by nThreads(iter-1) in each iteration. Inside the kernel, the problem will be reduced by a factor of 2 in each step. */ __global__ void reduceSumKernel(float *d_out, const float* d_in) { extern __shared__ float sdata[]; // Number of the current thread int myID = threadIdx.x + blockIdx.x * blockDim.x; int tid =threadIdx.x; sdata[tid] = d_in[myID]; __syncthreads(); for(int stride =blockDim.x/2; stride>=1; stride >>=1){ if (tid<stride){ sdata[tid] += sdata[tid+stride]; } __syncthreads(); } if(tid==0){ d_out[blockIdx.x] = sdata[0]; } } int main(int argc, char* argv[]) { // parse command line int acount = 1; if (argc < 3) { printf( "usage: testDotProductStreams <dim> <reduction mode [gold:0, CPU:1, GPU:2]>\n"); exit(1); } // number of elements in both vectors int dim = atoi(argv[acount++]); int mode = atoi(argv[acount++]); printf("dim: %d\n", dim); // Allocate only pagelocked memory for simplicity float* cpuArray1; float* cpuArray2; float* cpuResult; float* cpuTemp; cudaMallocHost((void**) &cpuTemp, sizeof(float)); cudaMallocHost((void**) &cpuArray1, dim * sizeof(float)); cudaMallocHost((void**) &cpuArray2, dim * sizeof(float)); cudaMallocHost((void**) &cpuResult, MAX_THREADS * MAX_BLOCKS * sizeof(float)); // initialize the two arrays for (int i = 0; i < dim; ++i) { #ifdef RTEST cpuArray1[i] = drand48(); cpuArray2[i] = drand48(); #else cpuArray1[i] = 1.0; cpuArray2[i] = 1.; // i % 10; #endif } // Allocate GPU memory float* gpuArray1; float* gpuArray2; float* gpuResult1; // Two result arrays to be able to move data from one to the other during reduction float* gpuResult2; float* d_temp; cudaMalloc((void**) &d_temp, sizeof(float)); cudaMalloc((void**) &gpuArray1, dim * sizeof(float)); cudaMalloc((void**) &gpuArray2, dim * sizeof(float)); cudaMalloc((void**) &gpuResult1, MAX_BLOCKS * MAX_THREADS * sizeof(float)); cudaMalloc((void**) &gpuResult2, MAX_BLOCKS * MAX_THREADS * sizeof(float)); // MAX_BLOCKS elements would be sufficient here... // Upload input data cudaMemcpy(gpuArray1, cpuArray1, dim * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(gpuArray2, cpuArray2, dim * sizeof(float), cudaMemcpyHostToDevice); // Variable for output double finalDotProduct = 0.; __int64_t startTime = continuousTimeNs(); // Iterations for benchmarking only the kernel call for (int iter = 0; iter < 1000; ++iter) { // a simplistic way of splitting the problem into threads dim3 blockGrid(MAX_BLOCKS); dim3 threadBlock(MAX_THREADS); unsigned int expectedResultSize; switch (mode) { case 0: finalDotProduct = 0.0; for (unsigned int i = 0; i < dim; i++) finalDotProduct += cpuArray1[i] * cpuArray2[i]; break; case 1: // call the dot kernel dotProdKernel<<<blockGrid, threadBlock>>>(gpuResult1, gpuArray1, gpuArray2, dim); // If dim < launchedThreads, only the first dim elements will contain data expectedResultSize = min(dim, MAX_THREADS * MAX_BLOCKS); // download and combine the results of multiple threads cudaMemcpy(cpuResult, gpuResult1, expectedResultSize * sizeof(float), cudaMemcpyDeviceToHost); finalDotProduct = 0.; // accumulate the final result on the host for (int i = 0; i < expectedResultSize; ++i) finalDotProduct += cpuResult[i]; break; case 2: // call the dot kernel, store result in gpuResult1 dotProdKernel<<<blockGrid, threadBlock>>>(gpuResult1, gpuArray1, gpuArray2, dim); // !!! missing !!! // Reduce all the dot product summands to one single value, // download it to a float and use it to set finalDotProduct. int blocks; blocks = (dim+ MAX_THREADS-1) / MAX_THREADS; if(blocks>1){ reduceSumKernel<<<blocks,MAX_THREADS,MAX_THREADS*sizeof(float)>>>(gpuResult2,gpuResult1); reduceSumKernel<<<1, MAX_THREADS,MAX_THREADS*sizeof(float)>>>(d_temp, gpuResult2); } else{ reduceSumKernel<<<1, MAX_THREADS,MAX_THREADS*sizeof(float)>>>(d_temp, gpuResult1); } cudaMemcpy(cpuTemp, d_temp, sizeof(float), cudaMemcpyDeviceToHost); finalDotProduct = cpuTemp[0]; break; } // end switch } __int64_t endTime = continuousTimeNs(); __int64_t runTime = endTime - startTime; // Print results and timing printf("Result: %f\n", finalDotProduct); printf("Time: %f\n", (float) runTime / 1000000000.0f); // cleanup GPU memory cudaFree(gpuResult1); cudaFree(gpuResult2); cudaFree(gpuArray2); cudaFree(gpuArray1); cudaFree(d_temp); // free page locked memory cudaFreeHost(cpuArray1); cudaFreeHost(cpuArray2); cudaFreeHost(cpuResult); cudaFreeHost(cpuTemp); printf("done\n"); }
d5445cb42853b78d4389c850b6f4222d3c51ce6f.hip
// !!! This is a file automatically generated by hipify!!! #include "CudaFlow.h" #include <opencv2/optflow.hpp> int CudaFlow::initializeCorrelation(int kernelSize, int maxSearchWidth, int maxSearchHeight) { this->corrKernelSize = kernelSize; this->corrMaxSearchHeight = maxSearchHeight; this->corrMaxSearchWidth = maxSearchWidth; this->corrStride = iAlignUp(maxSearchWidth); checkCudaErrors(hipMalloc(&d_icorr032f, dataSize32f)); checkCudaErrors(hipMalloc(&d_icorr132f, dataSize32f)); checkCudaErrors(hipMalloc(&d_corrKernel, corrKernelSize * corrKernelSize * sizeof(float))); checkCudaErrors(hipMalloc(&d_corrSearchSpace, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float))); checkCudaErrors(hipMalloc(&d_corrOutput, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float))); checkCudaErrors(hipMalloc(&d_ucorr, dataSize32f)); checkCudaErrors(hipMalloc(&d_vcorr, dataSize32f)); checkCudaErrors(hipMalloc(&d_uvrgbcorr, dataSize32fc3)); checkCudaErrors(hipMalloc(&d_corrSparseMask, dataSize32f)); checkCudaErrors(hipMalloc(&d_derivMask, dataSize32f)); return 0; } int CudaFlow::solveCorrPatchMatch(const char *picName, const char *flowName) { //this->_solveCorrPatchMatch(); this->_solveCorrPatchMatch_cpu(picName, flowName); return 0; } int CudaFlow::solveCorrPatchMatch() { //this->_solveCorrPatchMatch(); this->_solveCorrPatchMatch_cpu("new", "new"); return 0; } int CudaFlow::_solveCorrPatchMatch_cpu(const char *picName, const char *flowName) { //bind textures rgbToGray(d_i08uc3, d_icorr032f, width, height, stride); rgbToGray(d_i18uc3, d_icorr132f, width, height, stride); CorrelationBindTextures(d_icorr032f, d_icorr132f, width, height, stride); cv::Mat u = cv::Mat::zeros(cv::Size(stride, height), CV_32F); cv::Mat v = cv::Mat::zeros(cv::Size(stride, height), CV_32F); cv::Mat sMask = cv::Mat::zeros(cv::Size(stride, height), CV_32F); //solve derivative to accept only high derivative kernels cv::Mat derivMask = cv::Mat(cv::Size(stride, height), CV_32F); ComputeDerivMask(d_icorr032f, width, height, stride, d_derivMask, 0.001f); checkCudaErrors(hipMemcpy((float*)derivMask.ptr(), d_derivMask, dataSize32f, hipMemcpyDeviceToHost)); //int total = 0; for (int j = 0; j < height; j += 1) { for (int i = 0; i < width; i += 1) { //5x5 correlation CorrelationKernelSampling(i, j, d_corrKernel, width, height); CorrelationSearchSampling(i, j, d_corrSearchSpace); Correlation(d_corrKernel, d_corrSearchSpace, d_corrOutput); ///TODO: get maximum value and save the u, v //CPU Version cv::Mat corrResult = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); checkCudaErrors(hipMemcpy(corrResult.ptr(), d_corrOutput, corrMaxSearchHeight*corrMaxSearchWidth * sizeof(float), hipMemcpyDeviceToHost)); double minVal, maxVal; cv::Point minLoc, maxLoc; cv::minMaxLoc(corrResult, &minVal, &maxVal, &minLoc, &maxLoc); //cv::Scalar mean, stdev; //cv::meanStdDev(corrResult, mean, stdev); //std::cout << minLoc << " " << minVal << std::endl; //std::cout << stdev << std::endl; //if ((mean[0] - minVal) > stdev[0]/2) { if (derivMask.at<float>(j, i) == 1.0f) { u.at<float>(j, i) = minLoc.x - (corrMaxSearchWidth / 2); v.at<float>(j, i) = minLoc.y - (corrMaxSearchHeight / 2); sMask.at<float>(j, i) = minVal; } //} //std::cout << u.at<float>(j, i) << "," << v.at<float>(j, i) << " " << minVal << std::endl; //para kita lahat /*u.at<float>(j-1, i-1) = minLoc.x - (corrMaxSearchWidth / 2); v.at<float>(j-1, i-1) = minLoc.y - (corrMaxSearchHeight / 2); u.at<float>(j, i-1) = minLoc.x - (corrMaxSearchWidth / 2); v.at<float>(j, i-1) = minLoc.y - (corrMaxSearchHeight / 2); u.at<float>(j-1, i) = minLoc.x - (corrMaxSearchWidth / 2); v.at<float>(j-1, i) = minLoc.y - (corrMaxSearchHeight / 2);*/ } } //std::cout << total << std::endl; //cv::Mat output = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); //checkCudaErrors(hipMemcpy(output.ptr(), d_corrOutput, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float), hipMemcpyDeviceToHost)); //cv::imshow("uv", sMask); //cv::waitKey(); checkCudaErrors(hipMemcpy(d_ucorr, (float*)u.ptr(), dataSize32f, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_vcorr, (float*)v.ptr(), dataSize32f, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_corrSparseMask, (float*)sMask.ptr(), dataSize32f, hipMemcpyHostToDevice)); FlowToHSV(d_ucorr, d_vcorr, width, height, stride, d_uvrgbcorr, 50.0); cv::Mat uvrgb = cv::Mat(cv::Size(stride, height), CV_32FC3); checkCudaErrors(hipMemcpy((float3*)uvrgb.ptr(), d_uvrgbcorr, dataSize32fc3, hipMemcpyDeviceToHost)); //cv::imshow(windowName, uvrgb); //cv::waitKey(); std::cout << picName << std::endl; cv::Mat uvrgb8uc3; uvrgb = uvrgb * 256; uvrgb.convertTo(uvrgb8uc3, CV_8UC3); cv::imwrite(picName, uvrgb8uc3); std::vector<cv::Mat> channelForward; channelForward.push_back(u); channelForward.push_back(v); cv::Mat forward; cv::merge(channelForward, forward); cv::optflow::writeOpticalFlow(flowName, forward); return 0; } int CudaFlow::_solveCorrPatchMatch() { //bind textures rgbToGray(d_i08uc3, d_icorr032f, width, height, stride); rgbToGray(d_i18uc3, d_icorr132f, width, height, stride); CorrelationBindTextures(d_icorr032f, d_icorr132f, width, height, stride); //int total = 0; for (int j = 7; j < height; j += 16) { for (int i = 5; i < width; i += 12) { //5x5 correlation CorrelationKernelSampling(i, j, d_corrKernel, width, height); CorrelationSearchSampling(i, j, d_corrSearchSpace); Correlation(d_corrKernel, d_corrSearchSpace, d_corrOutput); ///TODO: get maximum value and save the u, v //CPU Version cv::Mat corrResult = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); checkCudaErrors(hipMemcpy(corrResult.ptr(), d_corrOutput, corrMaxSearchHeight*corrMaxSearchWidth * sizeof(float), hipMemcpyDeviceToHost)); double minVal, maxVal; cv::Point minLoc, maxLoc; cv::minMaxLoc(corrResult, &minVal, &maxVal, &minLoc, &maxLoc); //std::cout << minLoc << " " << minVal << std::endl; //GPUMAT version test //cv::cuda::GpuMat corrResult_gpu(corrMaxSearchWidth, corrMaxSearchHeight, CV_32F, d_corrOutput); //cv::minMaxLoc(corrResult_gpu, &minVal, &maxVal, &minLoc, &maxLoc); --> not implemented in opencv ///1x1 correlation -->>TODO: CREATE A SEPARATE FLOAT ARRAY IN CPU FOR THIS //float kernel = 1; //GetValue(d_icorr032f, j*stride + i, kernel); //CorrelationSearchSampling(i, j, d_corrSearchSpace); //Correlation1x1(kernel, d_corrSearchSpace, d_corrOutput); //total++; /*cv::Mat kernel = cv::Mat(cv::Size(corrKernelSize, corrKernelSize), CV_32F); checkCudaErrors(hipMemcpy(kernel.ptr(), d_corrKernel, corrKernelSize * corrKernelSize * sizeof(float), hipMemcpyDeviceToHost)); cv::Mat kernelUp; cv::resize(kernel, kernelUp, cv::Size(corrKernelSize, corrKernelSize)); cv::imshow("kernel", kernelUp); cv::Mat ss = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); checkCudaErrors(hipMemcpy(ss.ptr(), d_corrSearchSpace, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float), hipMemcpyDeviceToHost)); cv::imshow("ss", ss); cv::Mat output = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); checkCudaErrors(hipMemcpy(output.ptr(), d_corrOutput, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float), hipMemcpyDeviceToHost)); cv::imshow("output", output); cv::waitKey();*/ } } //std::cout << total << std::endl; cv::Mat output = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); checkCudaErrors(hipMemcpy(output.ptr(), d_corrOutput, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float), hipMemcpyDeviceToHost)); //cv::imshow("output", output); //for each 5x5 kernel in im0 //create kernel //create search space in im1 (texture sampling) //perform correlation //get max value and save the coordinate to ld(u,v) return 0; } int CudaFlow::solveOpticalFlowLdof() { return this->_solveOpticalFlowLdof(); } int CudaFlow::_solveOpticalFlowLdof() { // Convert RGB to Gray if (inputType == CV_8UC3) { rgbToGray(d_i08uc3, pI0[0], width, height, stride); rgbToGray(d_i18uc3, pI1[0], width, height, stride); } else if ((inputType == CV_8U) || (inputType == CV_8UC1)) { Cv8uToGray(d_i08u, pI0[0], width, height, stride); Cv8uToGray(d_i18u, pI1[0], width, height, stride); } else if (inputType == CV_16U) { Cv16uToGray(d_i016u, pI0[0], width, height, stride); Cv16uToGray(d_i116u, pI1[0], width, height, stride); } else if (inputType == CV_32F) { Cv32fToGray(d_i032f, pI0[0], width, height, stride); Cv32fToGray(d_i132f, pI1[0], width, height, stride); } else { rgbToGray(d_i08uc3, pI0[0], width, height, stride); rgbToGray(d_i18uc3, pI1[0], width, height, stride); } if (method == METHOD_TVCHARBGRAD) { ComputeDeriv(pI0[0], width, height, stride, pIx0[0], pIy0[0]); ComputeDeriv(pI1[0], width, height, stride, pIx1[0], pIy1[0]); } // construct pyramid for (int level = 1; level < nLevels; level++) { Downscale(pI0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI0[level]); Downscale(pI1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI1[level]); if (method == METHOD_TVCHARBGRAD) { Downscale(pIx0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pIx0[level]); Downscale(pIx1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pIx1[level]); Downscale(pIy0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pIy0[level]); Downscale(pIy1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pIy1[level]); } } // solve flow checkCudaErrors(hipMemset(d_u, 0, dataSize)); checkCudaErrors(hipMemset(d_v, 0, dataSize)); for (int level = nLevels - 1; level >= 0; level--) { for (int warpIter = 0; warpIter < nWarpIters; warpIter++) { //std::cout << level << std::endl; //initialize zeros checkCudaErrors(hipMemset(d_du, 0, dataSize)); checkCudaErrors(hipMemset(d_dv, 0, dataSize)); checkCudaErrors(hipMemset(d_dus, 0, dataSize)); checkCudaErrors(hipMemset(d_dvs, 0, dataSize)); checkCudaErrors(hipMemset(d_dumed, 0, dataSize)); checkCudaErrors(hipMemset(d_dvmed, 0, dataSize)); checkCudaErrors(hipMemset(d_dumeds, 0, dataSize)); checkCudaErrors(hipMemset(d_dvmeds, 0, dataSize)); checkCudaErrors(hipMemset(d_pu1, 0, dataSize)); checkCudaErrors(hipMemset(d_pu2, 0, dataSize)); checkCudaErrors(hipMemset(d_pv1, 0, dataSize)); checkCudaErrors(hipMemset(d_pv2, 0, dataSize)); //warp frame 1 WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_v, d_i1warp); if (method == METHOD_TVCHARBGRAD) { WarpImage(pIx1[level], pW[level], pH[level], pS[level], d_u, d_v, d_ix1warp); WarpImage(pIy1[level], pW[level], pH[level], pS[level], d_u, d_v, d_iy1warp); } //compute derivatives if (method == METHOD_TVL1PATCH) { ComputeDerivativesPatch(pI0[level], d_i1warp, pW[level], pH[level], pS[level], d_Ix, d_Iy, d_Iz); } else { ComputeDerivatives(pI0[level], d_i1warp, pW[level], pH[level], pS[level], d_Ix, d_Iy, d_Iz); } if (method == METHOD_TVCHARBGRAD) { ComputeDerivatives(pIx0[level], d_ix1warp, pW[level], pH[level], pS[level], d_Ixx, d_Ixy, d_Ixz); ComputeDerivatives(pIy0[level], d_iy1warp, pW[level], pH[level], pS[level], d_Iyx, d_Iyy, d_Iyz); } //inner iteration for (int iter = 0; iter < nSolverIters; ++iter) { if (method == METHOD_TVCHARBGRAD) { SolveDataCharbForTVGrad(d_du, d_dv, d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, d_Ix, d_Iy, d_Iz, d_Ixx, d_Ixy, d_Ixz, d_Iyx, d_Iyy, d_Iyz, pW[level], pH[level], pS[level], lambda, lambdagrad, theta, d_dus, d_dvs, d_dumeds, d_dvmeds); Swap(d_du, d_dus); Swap(d_dv, d_dvs); Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); } else if (method == METHOD_TVCHARB) { SolveDataCharbForTV(d_du, d_dv, d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, d_Ix, d_Iy, d_Iz, pW[level], pH[level], pS[level], lambda, theta, d_dus, d_dvs, d_dumeds, d_dvmeds); Swap(d_du, d_dus); Swap(d_dv, d_dvs); Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); } else if ((method == METHOD_TVL1) || (method == SCENEFLOW_KINECT_TVL1)) { SolveDataL1(d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, d_Ix, d_Iy, d_Iz, pW[level], pH[level], pS[level], lambda, theta, d_dumeds, d_dvmeds); //du1 = duhat output Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); } else { SolveDataL1(d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, d_Ix, d_Iy, d_Iz, pW[level], pH[level], pS[level], lambda, theta, d_dumeds, d_dvmeds); //du1 = duhat output Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); } SolveSmoothDualTVGlobal(d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, pW[level], pH[level], pS[level], tau, theta, d_pu1s, d_pu2s, d_pv1s, d_pv2s); Swap(d_pu1, d_pu1s); Swap(d_pu2, d_pu2s); Swap(d_pv1, d_pv1s); Swap(d_pv2, d_pv2s); //*********************************** } // one median filtering MedianFilter(d_dumed, d_dvmed, pW[level], pH[level], pS[level], d_dumeds, d_dvmeds, 5); Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); // update u, v Add(d_u, d_dumed, pH[level] * pS[level], d_u); Add(d_v, d_dvmed, pH[level] * pS[level], d_v); } //upscale if (level > 0) { // scale uv //float scale = (float)pW[level + 1] / (float)pW[level]; float scale = fScale; Upscale(d_u, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us); //float scaleY = (float)pH[level + 1] / (float)pH[level]; Upscale(d_v, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_vs); Swap(d_u, d_us); Swap(d_v, d_vs); } } if (withVisualization) { FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale); } //FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale); //SolveSceneFlow(d_u, d_v, d_depth016u, d_depth116u, width, height, stride, d_sceneflow); //std::cout << stride << " " << height << " " << height << " " << inputChannels << std::endl; return 0; return 0; } int CudaFlow::copyCorrPatchMatchToHost(cv::Mat &u, cv::Mat &v, cv::Mat &uvrgb) { return 0; }
d5445cb42853b78d4389c850b6f4222d3c51ce6f.cu
#include "CudaFlow.h" #include <opencv2/optflow.hpp> int CudaFlow::initializeCorrelation(int kernelSize, int maxSearchWidth, int maxSearchHeight) { this->corrKernelSize = kernelSize; this->corrMaxSearchHeight = maxSearchHeight; this->corrMaxSearchWidth = maxSearchWidth; this->corrStride = iAlignUp(maxSearchWidth); checkCudaErrors(cudaMalloc(&d_icorr032f, dataSize32f)); checkCudaErrors(cudaMalloc(&d_icorr132f, dataSize32f)); checkCudaErrors(cudaMalloc(&d_corrKernel, corrKernelSize * corrKernelSize * sizeof(float))); checkCudaErrors(cudaMalloc(&d_corrSearchSpace, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float))); checkCudaErrors(cudaMalloc(&d_corrOutput, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float))); checkCudaErrors(cudaMalloc(&d_ucorr, dataSize32f)); checkCudaErrors(cudaMalloc(&d_vcorr, dataSize32f)); checkCudaErrors(cudaMalloc(&d_uvrgbcorr, dataSize32fc3)); checkCudaErrors(cudaMalloc(&d_corrSparseMask, dataSize32f)); checkCudaErrors(cudaMalloc(&d_derivMask, dataSize32f)); return 0; } int CudaFlow::solveCorrPatchMatch(const char *picName, const char *flowName) { //this->_solveCorrPatchMatch(); this->_solveCorrPatchMatch_cpu(picName, flowName); return 0; } int CudaFlow::solveCorrPatchMatch() { //this->_solveCorrPatchMatch(); this->_solveCorrPatchMatch_cpu("new", "new"); return 0; } int CudaFlow::_solveCorrPatchMatch_cpu(const char *picName, const char *flowName) { //bind textures rgbToGray(d_i08uc3, d_icorr032f, width, height, stride); rgbToGray(d_i18uc3, d_icorr132f, width, height, stride); CorrelationBindTextures(d_icorr032f, d_icorr132f, width, height, stride); cv::Mat u = cv::Mat::zeros(cv::Size(stride, height), CV_32F); cv::Mat v = cv::Mat::zeros(cv::Size(stride, height), CV_32F); cv::Mat sMask = cv::Mat::zeros(cv::Size(stride, height), CV_32F); //solve derivative to accept only high derivative kernels cv::Mat derivMask = cv::Mat(cv::Size(stride, height), CV_32F); ComputeDerivMask(d_icorr032f, width, height, stride, d_derivMask, 0.001f); checkCudaErrors(cudaMemcpy((float*)derivMask.ptr(), d_derivMask, dataSize32f, cudaMemcpyDeviceToHost)); //int total = 0; for (int j = 0; j < height; j += 1) { for (int i = 0; i < width; i += 1) { //5x5 correlation CorrelationKernelSampling(i, j, d_corrKernel, width, height); CorrelationSearchSampling(i, j, d_corrSearchSpace); Correlation(d_corrKernel, d_corrSearchSpace, d_corrOutput); ///TODO: get maximum value and save the u, v //CPU Version cv::Mat corrResult = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); checkCudaErrors(cudaMemcpy(corrResult.ptr(), d_corrOutput, corrMaxSearchHeight*corrMaxSearchWidth * sizeof(float), cudaMemcpyDeviceToHost)); double minVal, maxVal; cv::Point minLoc, maxLoc; cv::minMaxLoc(corrResult, &minVal, &maxVal, &minLoc, &maxLoc); //cv::Scalar mean, stdev; //cv::meanStdDev(corrResult, mean, stdev); //std::cout << minLoc << " " << minVal << std::endl; //std::cout << stdev << std::endl; //if ((mean[0] - minVal) > stdev[0]/2) { if (derivMask.at<float>(j, i) == 1.0f) { u.at<float>(j, i) = minLoc.x - (corrMaxSearchWidth / 2); v.at<float>(j, i) = minLoc.y - (corrMaxSearchHeight / 2); sMask.at<float>(j, i) = minVal; } //} //std::cout << u.at<float>(j, i) << "," << v.at<float>(j, i) << " " << minVal << std::endl; //para kita lahat /*u.at<float>(j-1, i-1) = minLoc.x - (corrMaxSearchWidth / 2); v.at<float>(j-1, i-1) = minLoc.y - (corrMaxSearchHeight / 2); u.at<float>(j, i-1) = minLoc.x - (corrMaxSearchWidth / 2); v.at<float>(j, i-1) = minLoc.y - (corrMaxSearchHeight / 2); u.at<float>(j-1, i) = minLoc.x - (corrMaxSearchWidth / 2); v.at<float>(j-1, i) = minLoc.y - (corrMaxSearchHeight / 2);*/ } } //std::cout << total << std::endl; //cv::Mat output = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); //checkCudaErrors(cudaMemcpy(output.ptr(), d_corrOutput, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float), cudaMemcpyDeviceToHost)); //cv::imshow("uv", sMask); //cv::waitKey(); checkCudaErrors(cudaMemcpy(d_ucorr, (float*)u.ptr(), dataSize32f, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_vcorr, (float*)v.ptr(), dataSize32f, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_corrSparseMask, (float*)sMask.ptr(), dataSize32f, cudaMemcpyHostToDevice)); FlowToHSV(d_ucorr, d_vcorr, width, height, stride, d_uvrgbcorr, 50.0); cv::Mat uvrgb = cv::Mat(cv::Size(stride, height), CV_32FC3); checkCudaErrors(cudaMemcpy((float3*)uvrgb.ptr(), d_uvrgbcorr, dataSize32fc3, cudaMemcpyDeviceToHost)); //cv::imshow(windowName, uvrgb); //cv::waitKey(); std::cout << picName << std::endl; cv::Mat uvrgb8uc3; uvrgb = uvrgb * 256; uvrgb.convertTo(uvrgb8uc3, CV_8UC3); cv::imwrite(picName, uvrgb8uc3); std::vector<cv::Mat> channelForward; channelForward.push_back(u); channelForward.push_back(v); cv::Mat forward; cv::merge(channelForward, forward); cv::optflow::writeOpticalFlow(flowName, forward); return 0; } int CudaFlow::_solveCorrPatchMatch() { //bind textures rgbToGray(d_i08uc3, d_icorr032f, width, height, stride); rgbToGray(d_i18uc3, d_icorr132f, width, height, stride); CorrelationBindTextures(d_icorr032f, d_icorr132f, width, height, stride); //int total = 0; for (int j = 7; j < height; j += 16) { for (int i = 5; i < width; i += 12) { //5x5 correlation CorrelationKernelSampling(i, j, d_corrKernel, width, height); CorrelationSearchSampling(i, j, d_corrSearchSpace); Correlation(d_corrKernel, d_corrSearchSpace, d_corrOutput); ///TODO: get maximum value and save the u, v //CPU Version cv::Mat corrResult = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); checkCudaErrors(cudaMemcpy(corrResult.ptr(), d_corrOutput, corrMaxSearchHeight*corrMaxSearchWidth * sizeof(float), cudaMemcpyDeviceToHost)); double minVal, maxVal; cv::Point minLoc, maxLoc; cv::minMaxLoc(corrResult, &minVal, &maxVal, &minLoc, &maxLoc); //std::cout << minLoc << " " << minVal << std::endl; //GPUMAT version test //cv::cuda::GpuMat corrResult_gpu(corrMaxSearchWidth, corrMaxSearchHeight, CV_32F, d_corrOutput); //cv::minMaxLoc(corrResult_gpu, &minVal, &maxVal, &minLoc, &maxLoc); --> not implemented in opencv ///1x1 correlation -->>TODO: CREATE A SEPARATE FLOAT ARRAY IN CPU FOR THIS //float kernel = 1; //GetValue(d_icorr032f, j*stride + i, kernel); //CorrelationSearchSampling(i, j, d_corrSearchSpace); //Correlation1x1(kernel, d_corrSearchSpace, d_corrOutput); //total++; /*cv::Mat kernel = cv::Mat(cv::Size(corrKernelSize, corrKernelSize), CV_32F); checkCudaErrors(cudaMemcpy(kernel.ptr(), d_corrKernel, corrKernelSize * corrKernelSize * sizeof(float), cudaMemcpyDeviceToHost)); cv::Mat kernelUp; cv::resize(kernel, kernelUp, cv::Size(corrKernelSize, corrKernelSize)); cv::imshow("kernel", kernelUp); cv::Mat ss = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); checkCudaErrors(cudaMemcpy(ss.ptr(), d_corrSearchSpace, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float), cudaMemcpyDeviceToHost)); cv::imshow("ss", ss); cv::Mat output = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); checkCudaErrors(cudaMemcpy(output.ptr(), d_corrOutput, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float), cudaMemcpyDeviceToHost)); cv::imshow("output", output); cv::waitKey();*/ } } //std::cout << total << std::endl; cv::Mat output = cv::Mat(cv::Size(corrMaxSearchWidth, corrMaxSearchHeight), CV_32F); checkCudaErrors(cudaMemcpy(output.ptr(), d_corrOutput, corrMaxSearchWidth * corrMaxSearchHeight * sizeof(float), cudaMemcpyDeviceToHost)); //cv::imshow("output", output); //for each 5x5 kernel in im0 //create kernel //create search space in im1 (texture sampling) //perform correlation //get max value and save the coordinate to ld(u,v) return 0; } int CudaFlow::solveOpticalFlowLdof() { return this->_solveOpticalFlowLdof(); } int CudaFlow::_solveOpticalFlowLdof() { // Convert RGB to Gray if (inputType == CV_8UC3) { rgbToGray(d_i08uc3, pI0[0], width, height, stride); rgbToGray(d_i18uc3, pI1[0], width, height, stride); } else if ((inputType == CV_8U) || (inputType == CV_8UC1)) { Cv8uToGray(d_i08u, pI0[0], width, height, stride); Cv8uToGray(d_i18u, pI1[0], width, height, stride); } else if (inputType == CV_16U) { Cv16uToGray(d_i016u, pI0[0], width, height, stride); Cv16uToGray(d_i116u, pI1[0], width, height, stride); } else if (inputType == CV_32F) { Cv32fToGray(d_i032f, pI0[0], width, height, stride); Cv32fToGray(d_i132f, pI1[0], width, height, stride); } else { rgbToGray(d_i08uc3, pI0[0], width, height, stride); rgbToGray(d_i18uc3, pI1[0], width, height, stride); } if (method == METHOD_TVCHARBGRAD) { ComputeDeriv(pI0[0], width, height, stride, pIx0[0], pIy0[0]); ComputeDeriv(pI1[0], width, height, stride, pIx1[0], pIy1[0]); } // construct pyramid for (int level = 1; level < nLevels; level++) { Downscale(pI0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI0[level]); Downscale(pI1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI1[level]); if (method == METHOD_TVCHARBGRAD) { Downscale(pIx0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pIx0[level]); Downscale(pIx1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pIx1[level]); Downscale(pIy0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pIy0[level]); Downscale(pIy1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pIy1[level]); } } // solve flow checkCudaErrors(cudaMemset(d_u, 0, dataSize)); checkCudaErrors(cudaMemset(d_v, 0, dataSize)); for (int level = nLevels - 1; level >= 0; level--) { for (int warpIter = 0; warpIter < nWarpIters; warpIter++) { //std::cout << level << std::endl; //initialize zeros checkCudaErrors(cudaMemset(d_du, 0, dataSize)); checkCudaErrors(cudaMemset(d_dv, 0, dataSize)); checkCudaErrors(cudaMemset(d_dus, 0, dataSize)); checkCudaErrors(cudaMemset(d_dvs, 0, dataSize)); checkCudaErrors(cudaMemset(d_dumed, 0, dataSize)); checkCudaErrors(cudaMemset(d_dvmed, 0, dataSize)); checkCudaErrors(cudaMemset(d_dumeds, 0, dataSize)); checkCudaErrors(cudaMemset(d_dvmeds, 0, dataSize)); checkCudaErrors(cudaMemset(d_pu1, 0, dataSize)); checkCudaErrors(cudaMemset(d_pu2, 0, dataSize)); checkCudaErrors(cudaMemset(d_pv1, 0, dataSize)); checkCudaErrors(cudaMemset(d_pv2, 0, dataSize)); //warp frame 1 WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_v, d_i1warp); if (method == METHOD_TVCHARBGRAD) { WarpImage(pIx1[level], pW[level], pH[level], pS[level], d_u, d_v, d_ix1warp); WarpImage(pIy1[level], pW[level], pH[level], pS[level], d_u, d_v, d_iy1warp); } //compute derivatives if (method == METHOD_TVL1PATCH) { ComputeDerivativesPatch(pI0[level], d_i1warp, pW[level], pH[level], pS[level], d_Ix, d_Iy, d_Iz); } else { ComputeDerivatives(pI0[level], d_i1warp, pW[level], pH[level], pS[level], d_Ix, d_Iy, d_Iz); } if (method == METHOD_TVCHARBGRAD) { ComputeDerivatives(pIx0[level], d_ix1warp, pW[level], pH[level], pS[level], d_Ixx, d_Ixy, d_Ixz); ComputeDerivatives(pIy0[level], d_iy1warp, pW[level], pH[level], pS[level], d_Iyx, d_Iyy, d_Iyz); } //inner iteration for (int iter = 0; iter < nSolverIters; ++iter) { if (method == METHOD_TVCHARBGRAD) { SolveDataCharbForTVGrad(d_du, d_dv, d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, d_Ix, d_Iy, d_Iz, d_Ixx, d_Ixy, d_Ixz, d_Iyx, d_Iyy, d_Iyz, pW[level], pH[level], pS[level], lambda, lambdagrad, theta, d_dus, d_dvs, d_dumeds, d_dvmeds); Swap(d_du, d_dus); Swap(d_dv, d_dvs); Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); } else if (method == METHOD_TVCHARB) { SolveDataCharbForTV(d_du, d_dv, d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, d_Ix, d_Iy, d_Iz, pW[level], pH[level], pS[level], lambda, theta, d_dus, d_dvs, d_dumeds, d_dvmeds); Swap(d_du, d_dus); Swap(d_dv, d_dvs); Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); } else if ((method == METHOD_TVL1) || (method == SCENEFLOW_KINECT_TVL1)) { SolveDataL1(d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, d_Ix, d_Iy, d_Iz, pW[level], pH[level], pS[level], lambda, theta, d_dumeds, d_dvmeds); //du1 = duhat output Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); } else { SolveDataL1(d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, d_Ix, d_Iy, d_Iz, pW[level], pH[level], pS[level], lambda, theta, d_dumeds, d_dvmeds); //du1 = duhat output Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); } SolveSmoothDualTVGlobal(d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, pW[level], pH[level], pS[level], tau, theta, d_pu1s, d_pu2s, d_pv1s, d_pv2s); Swap(d_pu1, d_pu1s); Swap(d_pu2, d_pu2s); Swap(d_pv1, d_pv1s); Swap(d_pv2, d_pv2s); //*********************************** } // one median filtering MedianFilter(d_dumed, d_dvmed, pW[level], pH[level], pS[level], d_dumeds, d_dvmeds, 5); Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); // update u, v Add(d_u, d_dumed, pH[level] * pS[level], d_u); Add(d_v, d_dvmed, pH[level] * pS[level], d_v); } //upscale if (level > 0) { // scale uv //float scale = (float)pW[level + 1] / (float)pW[level]; float scale = fScale; Upscale(d_u, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us); //float scaleY = (float)pH[level + 1] / (float)pH[level]; Upscale(d_v, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_vs); Swap(d_u, d_us); Swap(d_v, d_vs); } } if (withVisualization) { FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale); } //FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale); //SolveSceneFlow(d_u, d_v, d_depth016u, d_depth116u, width, height, stride, d_sceneflow); //std::cout << stride << " " << height << " " << height << " " << inputChannels << std::endl; return 0; return 0; } int CudaFlow::copyCorrPatchMatchToHost(cv::Mat &u, cv::Mat &v, cv::Mat &uvrgb) { return 0; }
be15521aa71bae3068cb75e5d63b1e9ddfa22ac8.hip
// !!! This is a file automatically generated by hipify!!! /* * NormalizeFilter.cu * */ #include <assert.h> #include <hip/hip_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include "NormalizeFilter.cuh" /* --- for large size --- */ // width = 2560 = 5 * 2^9 = divisible by 5*128 = 5 * 2^7 // radius = 200 <= 128 * 2 = 256 // smem_size(minmax) = 2*128*(5+2*2)*2*2 = 9216 < 49152 // smem_size(uniform) = 4*128*(5+2*2)*2*2 = 18432 < 49152 constexpr int L_ROW_BLOCKDIM_X = 64; constexpr int L_ROW_BLOCKDIM_Y = 2; constexpr int L_ROW_BLOCKDIM_Z = 2; constexpr int L_ROW_RESULT_STEPS = 5; constexpr int L_ROW_HALO_STEPS = 4; // height = 2160 = 3^3 * 5 * 2^4 = divisible by 144*5 = 3^2 * 5 * 2^4 // radius = 200 <= 144 * 2 = 288 // smem_size(minmax) = 2*144*(5+2*2)*2*2 = 10368 < 49152 // smem_size(uniform) = 4*144*(5+2*2)*2*2 = 20736 < 49152 constexpr int L_COL_BLOCKDIM_X = 2; constexpr int L_COL_BLOCKDIM_Y = 72; constexpr int L_COL_BLOCKDIM_Z = 2; constexpr int L_COL_RESULT_STEPS = 5; constexpr int L_COL_HALO_STEPS = 4; // depth = 32 = 2^5 = divisible by 16*2 = 2^5 // radius = 30 < 16 * 2 = 32 // smem_size(minmax) = 2*16*(2+2*4)*8*8 = 20480 < 49152 // smem_size(uniform) = 4*16*(2+2*4)*8*8 = 40960 < 49152 constexpr int L_LAY_BLOCKDIM_X = 8; constexpr int L_LAY_BLOCKDIM_Y = 8; constexpr int L_LAY_BLOCKDIM_Z = 16; constexpr int L_LAY_RESULT_STEPS = 2; constexpr int L_LAY_HALO_STEPS = 2; constexpr int NORM_BLOCKDIM_X = 8; constexpr int NORM_BLOCKDIM_Y = 8; constexpr int NORM_BLOCKDIM_Z = 8; /* * Uniform Filter */ template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> __global__ void uniformRows3DKernel ( float *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { __shared__ float smem[BLOCKDIM_Z][BLOCKDIM_Y][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_X]; float *smem_thread = smem[threadIdx.z][threadIdx.y]; //Offset to the left halo edge const int baseX = (blockIdx.x * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * BLOCKDIM_Z + threadIdx.z; const float uniform_kernel = 1.0f / (2 * kernel_radius + 1); d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; //Load main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = (float)d_src[i * BLOCKDIM_X]; } //Load left halo (nearest repeat) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = (baseX + i * BLOCKDIM_X >= 0) ? (float)d_src[i * BLOCKDIM_X] : (float)d_src[-baseX]; } //Load right halo (nearest repeat) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = (baseX + i * BLOCKDIM_X < w) ? (float)d_src[i * BLOCKDIM_X] : (float)d_src[w-1 - baseX]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { float *smem_kern = &smem_thread[threadIdx.x + i * BLOCKDIM_X - kernel_radius]; float val = 0; //#pragma unroll for (int j = 0; j <= 2*kernel_radius; j++) { val += smem_kern[j]; } d_dst[i * BLOCKDIM_X] = val * uniform_kernel; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> void uniformRows3D ( float *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { assert(BLOCKDIM_X * HALO_STEPS >= kernel_radius); assert(w % (RESULT_STEPS * BLOCKDIM_X) == 0); assert(h % BLOCKDIM_Y == 0); assert(d % BLOCKDIM_Z == 0); dim3 blocks(w / (RESULT_STEPS * BLOCKDIM_X), h / BLOCKDIM_Y, d / BLOCKDIM_Z); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); hipLaunchKernelGGL(( uniformRows3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS>), dim3(blocks), dim3(threads), 0, 0, d_dst, d_src, w,h,d, kernel_radius ); getLastCudaError("uniformRows3DKernel() execution failed\n"); //checkCudaErrors(hipDeviceSynchronize()); } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> __global__ void uniformColumns3DKernel( float *d_dst, float *d_src, int w,int h,int d, int kernel_radius ) { __shared__ float smem[BLOCKDIM_Z][BLOCKDIM_X][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_Y + 1]; float *smem_thread = smem[threadIdx.z][threadIdx.x]; //Offset to the upper halo edge const int baseX = blockIdx.x * BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * BLOCKDIM_Z + threadIdx.z; const float uniform_kernel = 1.0f / (2 * kernel_radius + 1); d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; //Main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y] = d_src[i * BLOCKDIM_Y * w]; } //Upper halo (nearest repeat) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y] = (baseY + i * BLOCKDIM_Y >= 0) ? d_src[i * BLOCKDIM_Y * w] : d_src[-baseY*w]; } //Lower halo (nearest repeat) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y]= (baseY + i * BLOCKDIM_Y < h) ? d_src[i * BLOCKDIM_Y * w] : d_src[(h-1 - baseY)*w]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { float *smem_kern = &smem_thread[threadIdx.y + i * BLOCKDIM_Y - kernel_radius]; float val = 0; //#pragma unroll for (int j = 0; j <= 2 * kernel_radius; j++) { val += smem_kern[j]; } d_dst[i * BLOCKDIM_Y * w] = val * uniform_kernel; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> void uniformColumns3D ( float *d_dst, float *d_src, int w,int h,int d, int kernel_radius ) { assert(BLOCKDIM_Y * HALO_STEPS >= kernel_radius); assert(w % BLOCKDIM_X == 0); assert(h % (RESULT_STEPS * BLOCKDIM_Y) == 0); assert(d % BLOCKDIM_Z == 0); dim3 blocks(w / BLOCKDIM_X, h / (RESULT_STEPS * BLOCKDIM_Y), d / BLOCKDIM_Z); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); hipLaunchKernelGGL(( uniformColumns3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS>), dim3(blocks), dim3(threads), 0, 0, d_dst,d_src,w,h,d,kernel_radius ); getLastCudaError("uniformColumns3DKernel() execution failed\n"); //checkCudaErrors(hipDeviceSynchronize()); } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> __global__ void uniformLayers3DKernel( float *d_dst, float *d_src, int w, int h, int d, int kernel_radius ) { __shared__ float smem[BLOCKDIM_X][BLOCKDIM_Y][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_Z + 1]; float *smem_thread = smem[threadIdx.x][threadIdx.y]; //Offset to the upper halo edge const int baseX = blockIdx.x * BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * BLOCKDIM_Y + threadIdx.y; const int baseZ = (blockIdx.z * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_Z + threadIdx.z; const float uniform_kernel = 1.0f / (2 * kernel_radius + 1); d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; const int pitch = w*h; //Main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z] = d_src[i * BLOCKDIM_Z * pitch]; } //Upper halo (nearest repeat) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z] = (baseZ + i * BLOCKDIM_Z >= 0) ? d_src[i * BLOCKDIM_Z * pitch] : d_src[-baseZ*pitch]; } //Lower halo (nearest repeat) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z]= (baseZ + i * BLOCKDIM_Z < d) ? d_src[i * BLOCKDIM_Z * pitch] : d_src[(d-1 - baseZ)*pitch]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { float *smem_kern = &smem_thread[threadIdx.z + i * BLOCKDIM_Z - kernel_radius]; float val = 0; //#pragma unroll for (int j = 0; j <= 2*kernel_radius; j++) { val += smem_kern[j]; } d_dst[i * BLOCKDIM_Z * pitch] = val * uniform_kernel; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> void uniformLayers3D( float *d_dst, float *d_src, int w, int h, int d, int kernel_radius ) { assert(BLOCKDIM_Z * HALO_STEPS >= kernel_radius); assert(w % BLOCKDIM_X == 0); assert(h % BLOCKDIM_Y == 0); assert(d % (RESULT_STEPS * BLOCKDIM_Z) == 0); dim3 blocks(w / BLOCKDIM_X, h / BLOCKDIM_Y, d / (RESULT_STEPS * BLOCKDIM_Z)); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); hipLaunchKernelGGL(( uniformLayers3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS>), dim3(blocks), dim3(threads), 0, 0, d_dst, d_src, w,h,d,kernel_radius ); getLastCudaError("uniformLayers3DKernel() execution failed\n"); //checkCudaErrors(hipDeviceSynchronize()); } /* * MinMax(Erosion or Dilation) Filter */ template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> __global__ void minmaxRows3DKernel ( unsigned short *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { __shared__ unsigned short smem[BLOCKDIM_Z][BLOCKDIM_Y][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_X]; unsigned short *smem_thread = smem[threadIdx.z][threadIdx.y]; //Offset to the left halo edge const int baseX = (blockIdx.x * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * BLOCKDIM_Z + threadIdx.z; d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; //Load main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = d_src[i * BLOCKDIM_X]; } //Load left halo (nearest constant border) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = (baseX + i * BLOCKDIM_X >= 0) ? d_src[i * BLOCKDIM_X] : d_src[-baseX]; } //Load right halo (nearest constant border) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = (baseX + i * BLOCKDIM_X < w) ? d_src[i * BLOCKDIM_X] : d_src[w-1-baseX]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { unsigned short *smem_kern = &smem_thread[threadIdx.x + i * BLOCKDIM_X - kernel_radius]; unsigned short val = smem_kern[0]; //#pragma unroll for (int j = 1; j <= 2*kernel_radius; j++) { if(is_min) val = min(val, smem_kern[j]); else val = max(val, smem_kern[j]); } d_dst[i * BLOCKDIM_X] = val; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> void minmaxRows3D ( unsigned short *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { assert(BLOCKDIM_X * HALO_STEPS >= kernel_radius); assert(w % (RESULT_STEPS * BLOCKDIM_X) == 0); assert(h % BLOCKDIM_Y == 0); assert(d % BLOCKDIM_Z == 0); dim3 blocks(w / (RESULT_STEPS * BLOCKDIM_X), h / BLOCKDIM_Y, d / BLOCKDIM_Z); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); hipLaunchKernelGGL(( minmaxRows3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS, is_min>), dim3(blocks), dim3(threads), 0, 0, d_dst, d_src, w,h,d, kernel_radius ); getLastCudaError("minmaxRows3DKernel() execution failed\n"); //checkCudaErrors(hipDeviceSynchronize()); } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> __global__ void minmaxColumns3DKernel( unsigned short *d_dst, unsigned short *d_src, int w,int h,int d, int kernel_radius ) { __shared__ unsigned short smem[BLOCKDIM_Z][BLOCKDIM_X][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_Y + 1]; unsigned short *smem_thread = smem[threadIdx.z][threadIdx.x]; //Offset to the upper halo edge const int baseX = blockIdx.x * BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * BLOCKDIM_Z + threadIdx.z; d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; //Main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y] = d_src[i * BLOCKDIM_Y * w]; } //Upper halo (nearest constant border) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y] = (baseY + i * BLOCKDIM_Y >= 0) ? d_src[i * BLOCKDIM_Y * w] : d_src[-baseY*w]; } //Lower halo (nearest constant border) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y]= (baseY + i * BLOCKDIM_Y < h) ? d_src[i * BLOCKDIM_Y * w] : d_src[(h-1-baseY)*w]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { unsigned short *smem_kern = &smem_thread[threadIdx.y + i * BLOCKDIM_Y - kernel_radius]; unsigned short val = smem_kern[0]; //#pragma unroll for (int j = 1; j <= 2 * kernel_radius; j++) { if(is_min) val = min(val, smem_kern[j]); else val = max(val, smem_kern[j]); } d_dst[i * BLOCKDIM_Y * w] = val; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> void minmaxColumns3D ( unsigned short *d_dst, unsigned short *d_src, int w,int h,int d, int kernel_radius ) { assert(BLOCKDIM_Y * HALO_STEPS >= kernel_radius); assert(w % BLOCKDIM_X == 0); assert(h % (RESULT_STEPS * BLOCKDIM_Y) == 0); assert(d % BLOCKDIM_Z == 0); dim3 blocks(w / BLOCKDIM_X, h / (RESULT_STEPS * BLOCKDIM_Y), d / BLOCKDIM_Z); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); hipLaunchKernelGGL(( minmaxColumns3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS, is_min>), dim3(blocks), dim3(threads), 0, 0, d_dst,d_src,w,h,d,kernel_radius ); getLastCudaError("minmaxColumns3DKernel() execution failed\n"); //checkCudaErrors(hipDeviceSynchronize()); } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> __global__ void minmaxLayers3DKernel( unsigned short *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { __shared__ unsigned short smem[BLOCKDIM_X][BLOCKDIM_Y][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_Z + 1]; unsigned short *smem_thread = smem[threadIdx.x][threadIdx.y]; //Offset to the upper halo edge const int baseX = blockIdx.x * BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * BLOCKDIM_Y + threadIdx.y; const int baseZ = (blockIdx.z * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_Z + threadIdx.z; d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; const int pitch = w*h; //Main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z] = d_src[i * BLOCKDIM_Z * pitch]; } //Upper halo (nearest constant border) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z] = (baseZ + i * BLOCKDIM_Z >= 0) ? d_src[i * BLOCKDIM_Z * pitch] : d_src[-baseZ*w*h]; } //Lower halo (nearest constant border) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z]= (baseZ + i * BLOCKDIM_Z < d) ? d_src[i * BLOCKDIM_Z * pitch] : d_src[(d-1-baseZ)*w*h]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { unsigned short *smem_kern = &smem_thread[threadIdx.z + i * BLOCKDIM_Z - kernel_radius]; unsigned short val = smem_kern[0]; //#pragma unroll for (int j = 1; j <= 2*kernel_radius; j++) { if(is_min) val = min(val, smem_kern[j]); else val = max(val, smem_kern[j]); } d_dst[i * BLOCKDIM_Z * pitch] = val; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> void minmaxLayers3D( unsigned short *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { assert(BLOCKDIM_Z * HALO_STEPS >= kernel_radius); assert(w % BLOCKDIM_X == 0); assert(h % BLOCKDIM_Y == 0); assert(d % (RESULT_STEPS * BLOCKDIM_Z) == 0); dim3 blocks(w / BLOCKDIM_X, h / BLOCKDIM_Y, d / (RESULT_STEPS * BLOCKDIM_Z)); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); hipLaunchKernelGGL(( minmaxLayers3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS,is_min>), dim3(blocks), dim3(threads), 0, 0, d_dst, d_src, w,h,d,kernel_radius ); getLastCudaError("minmaxLayers3DKernel() execution failed\n"); //checkCudaErrors(hipDeviceSynchronize()); } /* * Define Functions */ void UniformLarge3DFilter ( unsigned short *d_img, float *d_temp, float *d_result, int w, int h, int d, int radius_xy, int radius_z ) { uniformRows3D<L_ROW_BLOCKDIM_X,L_ROW_BLOCKDIM_Y,L_ROW_BLOCKDIM_Z,L_ROW_RESULT_STEPS,L_ROW_HALO_STEPS>(d_result, d_img, w,h,d,radius_xy); uniformColumns3D<L_COL_BLOCKDIM_X,L_COL_BLOCKDIM_Y,L_COL_BLOCKDIM_Z,L_COL_RESULT_STEPS,L_COL_HALO_STEPS>(d_temp, d_result, w,h,d,radius_xy); uniformLayers3D<L_LAY_BLOCKDIM_X,L_LAY_BLOCKDIM_Y,L_LAY_BLOCKDIM_Z,L_LAY_RESULT_STEPS,L_LAY_HALO_STEPS>(d_result, d_temp, w,h,d,radius_z); } void ErosionLarge3DFilter ( unsigned short *d_img, unsigned short *d_temp, unsigned short *d_result, int w, int h, int d, int radius_xy, int radius_z ) { minmaxRows3D<L_ROW_BLOCKDIM_X,L_ROW_BLOCKDIM_Y,L_ROW_BLOCKDIM_Z,L_ROW_RESULT_STEPS,L_ROW_HALO_STEPS,true>(d_result, d_img, w,h,d,radius_xy); minmaxColumns3D<L_COL_BLOCKDIM_X,L_COL_BLOCKDIM_Y,L_COL_BLOCKDIM_Z,L_COL_RESULT_STEPS,L_COL_HALO_STEPS,true>(d_temp, d_result, w,h,d,radius_xy); minmaxLayers3D<L_LAY_BLOCKDIM_X,L_LAY_BLOCKDIM_Y,L_LAY_BLOCKDIM_Z,L_LAY_RESULT_STEPS,L_LAY_HALO_STEPS,true>(d_result, d_temp, w,h,d,radius_z); } void DilationLarge3DFilter ( unsigned short *d_img, unsigned short *d_temp, unsigned short *d_result, int w, int h, int d, int radius_xy, int radius_z ) { minmaxRows3D<L_ROW_BLOCKDIM_X,L_ROW_BLOCKDIM_Y,L_ROW_BLOCKDIM_Z,L_ROW_RESULT_STEPS,L_ROW_HALO_STEPS,false>(d_result, d_img, w,h,d,radius_xy); minmaxColumns3D<L_COL_BLOCKDIM_X,L_COL_BLOCKDIM_Y,L_COL_BLOCKDIM_Z,L_COL_RESULT_STEPS,L_COL_HALO_STEPS,false>(d_temp, d_result, w,h,d,radius_xy); minmaxLayers3D<L_LAY_BLOCKDIM_X,L_LAY_BLOCKDIM_Y,L_LAY_BLOCKDIM_Z,L_LAY_RESULT_STEPS,L_LAY_HALO_STEPS,false>(d_result, d_temp, w,h,d,radius_z); } __global__ void Normalize3DKernel ( const unsigned short *d_src, const float *d_erosion, const float *d_dilation, float *d_dst, float min_intensity, const int width, const int height, const int depth ) { const int baseX = blockIdx.x * blockDim.x + threadIdx.x; const int baseY = blockIdx.y * blockDim.y + threadIdx.y; const int baseZ = blockIdx.z * blockDim.z + threadIdx.z; const int idx = (baseZ * height + baseY) * width + baseX; const float intensity = (float)d_src[idx]; d_dst[idx] = (intensity >= min_intensity) ? (intensity-d_erosion[idx]) / (d_dilation[idx] - d_erosion[idx]) : 0; } __global__ void Copy3DKernel ( const unsigned short *d_src, float *d_dst, float min_intensity, const int width, const int height, const int depth ) { const int baseX = blockIdx.x * blockDim.x + threadIdx.x; const int baseY = blockIdx.y * blockDim.y + threadIdx.y; const int baseZ = blockIdx.z * blockDim.z + threadIdx.z; const int idx = (baseZ * height + baseY) * width + baseX; const float intensity = (float)d_src[idx]; d_dst[idx] = (intensity >= min_intensity) ? intensity : 0; } void Normalize3DFilter ( unsigned short *d_img, float *d_norm, unsigned short *d_erosion_temp1, unsigned short *d_erosion_temp2, float *d_erosion_l, float *d_dilation_l, float min_intensity, const int width, const int height, const int depth, const int radius_large_xy, const int radius_large_z ) { if (radius_large_xy == 0 || radius_large_z == 0) { // skip normalize, just copy assert(width % (NORM_BLOCKDIM_X) == 0); assert(height % (NORM_BLOCKDIM_Y) == 0); assert(depth % (NORM_BLOCKDIM_Z) == 0); dim3 blocks(width / (NORM_BLOCKDIM_X), height/(NORM_BLOCKDIM_Y), depth / (NORM_BLOCKDIM_Z)); dim3 threads(NORM_BLOCKDIM_X, NORM_BLOCKDIM_Y, NORM_BLOCKDIM_Z); hipLaunchKernelGGL(( Copy3DKernel), dim3(blocks), dim3(threads), 0, 0, d_img, d_norm, min_intensity, width, height, depth); getLastCudaError("Error: Copy3DKernel() kernel execution FAILED!"); //checkCudaErrors(hipDeviceSynchronize()); } else { float *d_uniform_temp = d_norm; ErosionLarge3DFilter(d_img, d_erosion_temp1, d_erosion_temp2, width,height,depth, radius_large_xy,radius_large_z); UniformLarge3DFilter(d_erosion_temp2, d_uniform_temp, d_erosion_l, width,height,depth, radius_large_xy,radius_large_z); DilationLarge3DFilter(d_img, d_erosion_temp1, d_erosion_temp2, width,height,depth, radius_large_xy,radius_large_z); UniformLarge3DFilter(d_erosion_temp2, d_uniform_temp, d_dilation_l, width,height,depth, radius_large_xy, radius_large_z); assert(width % (NORM_BLOCKDIM_X) == 0); assert(height % (NORM_BLOCKDIM_Y) == 0); assert(depth % (NORM_BLOCKDIM_Z) == 0); dim3 blocks(width / (NORM_BLOCKDIM_X), height/(NORM_BLOCKDIM_Y), depth / (NORM_BLOCKDIM_Z)); dim3 threads(NORM_BLOCKDIM_X, NORM_BLOCKDIM_Y, NORM_BLOCKDIM_Z); hipLaunchKernelGGL(( Normalize3DKernel), dim3(blocks), dim3(threads), 0, 0, d_img, d_erosion_l, d_dilation_l, d_norm, min_intensity, width, height, depth); getLastCudaError("Error: Normalize3DKernel() kernel execution FAILED!"); //checkCudaErrors(hipDeviceSynchronize()); } }
be15521aa71bae3068cb75e5d63b1e9ddfa22ac8.cu
/* * NormalizeFilter.cu * */ #include <assert.h> #include <cuda_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include "NormalizeFilter.cuh" /* --- for large size --- */ // width = 2560 = 5 * 2^9 = divisible by 5*128 = 5 * 2^7 // radius = 200 <= 128 * 2 = 256 // smem_size(minmax) = 2*128*(5+2*2)*2*2 = 9216 < 49152 // smem_size(uniform) = 4*128*(5+2*2)*2*2 = 18432 < 49152 constexpr int L_ROW_BLOCKDIM_X = 64; constexpr int L_ROW_BLOCKDIM_Y = 2; constexpr int L_ROW_BLOCKDIM_Z = 2; constexpr int L_ROW_RESULT_STEPS = 5; constexpr int L_ROW_HALO_STEPS = 4; // height = 2160 = 3^3 * 5 * 2^4 = divisible by 144*5 = 3^2 * 5 * 2^4 // radius = 200 <= 144 * 2 = 288 // smem_size(minmax) = 2*144*(5+2*2)*2*2 = 10368 < 49152 // smem_size(uniform) = 4*144*(5+2*2)*2*2 = 20736 < 49152 constexpr int L_COL_BLOCKDIM_X = 2; constexpr int L_COL_BLOCKDIM_Y = 72; constexpr int L_COL_BLOCKDIM_Z = 2; constexpr int L_COL_RESULT_STEPS = 5; constexpr int L_COL_HALO_STEPS = 4; // depth = 32 = 2^5 = divisible by 16*2 = 2^5 // radius = 30 < 16 * 2 = 32 // smem_size(minmax) = 2*16*(2+2*4)*8*8 = 20480 < 49152 // smem_size(uniform) = 4*16*(2+2*4)*8*8 = 40960 < 49152 constexpr int L_LAY_BLOCKDIM_X = 8; constexpr int L_LAY_BLOCKDIM_Y = 8; constexpr int L_LAY_BLOCKDIM_Z = 16; constexpr int L_LAY_RESULT_STEPS = 2; constexpr int L_LAY_HALO_STEPS = 2; constexpr int NORM_BLOCKDIM_X = 8; constexpr int NORM_BLOCKDIM_Y = 8; constexpr int NORM_BLOCKDIM_Z = 8; /* * Uniform Filter */ template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> __global__ void uniformRows3DKernel ( float *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { __shared__ float smem[BLOCKDIM_Z][BLOCKDIM_Y][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_X]; float *smem_thread = smem[threadIdx.z][threadIdx.y]; //Offset to the left halo edge const int baseX = (blockIdx.x * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * BLOCKDIM_Z + threadIdx.z; const float uniform_kernel = 1.0f / (2 * kernel_radius + 1); d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; //Load main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = (float)d_src[i * BLOCKDIM_X]; } //Load left halo (nearest repeat) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = (baseX + i * BLOCKDIM_X >= 0) ? (float)d_src[i * BLOCKDIM_X] : (float)d_src[-baseX]; } //Load right halo (nearest repeat) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = (baseX + i * BLOCKDIM_X < w) ? (float)d_src[i * BLOCKDIM_X] : (float)d_src[w-1 - baseX]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { float *smem_kern = &smem_thread[threadIdx.x + i * BLOCKDIM_X - kernel_radius]; float val = 0; //#pragma unroll for (int j = 0; j <= 2*kernel_radius; j++) { val += smem_kern[j]; } d_dst[i * BLOCKDIM_X] = val * uniform_kernel; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> void uniformRows3D ( float *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { assert(BLOCKDIM_X * HALO_STEPS >= kernel_radius); assert(w % (RESULT_STEPS * BLOCKDIM_X) == 0); assert(h % BLOCKDIM_Y == 0); assert(d % BLOCKDIM_Z == 0); dim3 blocks(w / (RESULT_STEPS * BLOCKDIM_X), h / BLOCKDIM_Y, d / BLOCKDIM_Z); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); uniformRows3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS><<<blocks, threads>>> ( d_dst, d_src, w,h,d, kernel_radius ); getLastCudaError("uniformRows3DKernel() execution failed\n"); //checkCudaErrors(cudaDeviceSynchronize()); } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> __global__ void uniformColumns3DKernel( float *d_dst, float *d_src, int w,int h,int d, int kernel_radius ) { __shared__ float smem[BLOCKDIM_Z][BLOCKDIM_X][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_Y + 1]; float *smem_thread = smem[threadIdx.z][threadIdx.x]; //Offset to the upper halo edge const int baseX = blockIdx.x * BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * BLOCKDIM_Z + threadIdx.z; const float uniform_kernel = 1.0f / (2 * kernel_radius + 1); d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; //Main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y] = d_src[i * BLOCKDIM_Y * w]; } //Upper halo (nearest repeat) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y] = (baseY + i * BLOCKDIM_Y >= 0) ? d_src[i * BLOCKDIM_Y * w] : d_src[-baseY*w]; } //Lower halo (nearest repeat) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y]= (baseY + i * BLOCKDIM_Y < h) ? d_src[i * BLOCKDIM_Y * w] : d_src[(h-1 - baseY)*w]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { float *smem_kern = &smem_thread[threadIdx.y + i * BLOCKDIM_Y - kernel_radius]; float val = 0; //#pragma unroll for (int j = 0; j <= 2 * kernel_radius; j++) { val += smem_kern[j]; } d_dst[i * BLOCKDIM_Y * w] = val * uniform_kernel; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> void uniformColumns3D ( float *d_dst, float *d_src, int w,int h,int d, int kernel_radius ) { assert(BLOCKDIM_Y * HALO_STEPS >= kernel_radius); assert(w % BLOCKDIM_X == 0); assert(h % (RESULT_STEPS * BLOCKDIM_Y) == 0); assert(d % BLOCKDIM_Z == 0); dim3 blocks(w / BLOCKDIM_X, h / (RESULT_STEPS * BLOCKDIM_Y), d / BLOCKDIM_Z); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); uniformColumns3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS><<<blocks, threads>>> ( d_dst,d_src,w,h,d,kernel_radius ); getLastCudaError("uniformColumns3DKernel() execution failed\n"); //checkCudaErrors(cudaDeviceSynchronize()); } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> __global__ void uniformLayers3DKernel( float *d_dst, float *d_src, int w, int h, int d, int kernel_radius ) { __shared__ float smem[BLOCKDIM_X][BLOCKDIM_Y][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_Z + 1]; float *smem_thread = smem[threadIdx.x][threadIdx.y]; //Offset to the upper halo edge const int baseX = blockIdx.x * BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * BLOCKDIM_Y + threadIdx.y; const int baseZ = (blockIdx.z * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_Z + threadIdx.z; const float uniform_kernel = 1.0f / (2 * kernel_radius + 1); d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; const int pitch = w*h; //Main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z] = d_src[i * BLOCKDIM_Z * pitch]; } //Upper halo (nearest repeat) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z] = (baseZ + i * BLOCKDIM_Z >= 0) ? d_src[i * BLOCKDIM_Z * pitch] : d_src[-baseZ*pitch]; } //Lower halo (nearest repeat) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z]= (baseZ + i * BLOCKDIM_Z < d) ? d_src[i * BLOCKDIM_Z * pitch] : d_src[(d-1 - baseZ)*pitch]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { float *smem_kern = &smem_thread[threadIdx.z + i * BLOCKDIM_Z - kernel_radius]; float val = 0; //#pragma unroll for (int j = 0; j <= 2*kernel_radius; j++) { val += smem_kern[j]; } d_dst[i * BLOCKDIM_Z * pitch] = val * uniform_kernel; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS> void uniformLayers3D( float *d_dst, float *d_src, int w, int h, int d, int kernel_radius ) { assert(BLOCKDIM_Z * HALO_STEPS >= kernel_radius); assert(w % BLOCKDIM_X == 0); assert(h % BLOCKDIM_Y == 0); assert(d % (RESULT_STEPS * BLOCKDIM_Z) == 0); dim3 blocks(w / BLOCKDIM_X, h / BLOCKDIM_Y, d / (RESULT_STEPS * BLOCKDIM_Z)); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); uniformLayers3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS><<<blocks, threads>>> ( d_dst, d_src, w,h,d,kernel_radius ); getLastCudaError("uniformLayers3DKernel() execution failed\n"); //checkCudaErrors(cudaDeviceSynchronize()); } /* * MinMax(Erosion or Dilation) Filter */ template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> __global__ void minmaxRows3DKernel ( unsigned short *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { __shared__ unsigned short smem[BLOCKDIM_Z][BLOCKDIM_Y][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_X]; unsigned short *smem_thread = smem[threadIdx.z][threadIdx.y]; //Offset to the left halo edge const int baseX = (blockIdx.x * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * BLOCKDIM_Z + threadIdx.z; d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; //Load main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = d_src[i * BLOCKDIM_X]; } //Load left halo (nearest constant border) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = (baseX + i * BLOCKDIM_X >= 0) ? d_src[i * BLOCKDIM_X] : d_src[-baseX]; } //Load right halo (nearest constant border) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.x + i * BLOCKDIM_X] = (baseX + i * BLOCKDIM_X < w) ? d_src[i * BLOCKDIM_X] : d_src[w-1-baseX]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { unsigned short *smem_kern = &smem_thread[threadIdx.x + i * BLOCKDIM_X - kernel_radius]; unsigned short val = smem_kern[0]; //#pragma unroll for (int j = 1; j <= 2*kernel_radius; j++) { if(is_min) val = min(val, smem_kern[j]); else val = max(val, smem_kern[j]); } d_dst[i * BLOCKDIM_X] = val; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> void minmaxRows3D ( unsigned short *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { assert(BLOCKDIM_X * HALO_STEPS >= kernel_radius); assert(w % (RESULT_STEPS * BLOCKDIM_X) == 0); assert(h % BLOCKDIM_Y == 0); assert(d % BLOCKDIM_Z == 0); dim3 blocks(w / (RESULT_STEPS * BLOCKDIM_X), h / BLOCKDIM_Y, d / BLOCKDIM_Z); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); minmaxRows3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS, is_min><<<blocks, threads>>> ( d_dst, d_src, w,h,d, kernel_radius ); getLastCudaError("minmaxRows3DKernel() execution failed\n"); //checkCudaErrors(cudaDeviceSynchronize()); } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> __global__ void minmaxColumns3DKernel( unsigned short *d_dst, unsigned short *d_src, int w,int h,int d, int kernel_radius ) { __shared__ unsigned short smem[BLOCKDIM_Z][BLOCKDIM_X][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_Y + 1]; unsigned short *smem_thread = smem[threadIdx.z][threadIdx.x]; //Offset to the upper halo edge const int baseX = blockIdx.x * BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * BLOCKDIM_Z + threadIdx.z; d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; //Main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y] = d_src[i * BLOCKDIM_Y * w]; } //Upper halo (nearest constant border) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y] = (baseY + i * BLOCKDIM_Y >= 0) ? d_src[i * BLOCKDIM_Y * w] : d_src[-baseY*w]; } //Lower halo (nearest constant border) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.y + i * BLOCKDIM_Y]= (baseY + i * BLOCKDIM_Y < h) ? d_src[i * BLOCKDIM_Y * w] : d_src[(h-1-baseY)*w]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { unsigned short *smem_kern = &smem_thread[threadIdx.y + i * BLOCKDIM_Y - kernel_radius]; unsigned short val = smem_kern[0]; //#pragma unroll for (int j = 1; j <= 2 * kernel_radius; j++) { if(is_min) val = min(val, smem_kern[j]); else val = max(val, smem_kern[j]); } d_dst[i * BLOCKDIM_Y * w] = val; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> void minmaxColumns3D ( unsigned short *d_dst, unsigned short *d_src, int w,int h,int d, int kernel_radius ) { assert(BLOCKDIM_Y * HALO_STEPS >= kernel_radius); assert(w % BLOCKDIM_X == 0); assert(h % (RESULT_STEPS * BLOCKDIM_Y) == 0); assert(d % BLOCKDIM_Z == 0); dim3 blocks(w / BLOCKDIM_X, h / (RESULT_STEPS * BLOCKDIM_Y), d / BLOCKDIM_Z); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); minmaxColumns3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS, is_min><<<blocks, threads>>> ( d_dst,d_src,w,h,d,kernel_radius ); getLastCudaError("minmaxColumns3DKernel() execution failed\n"); //checkCudaErrors(cudaDeviceSynchronize()); } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> __global__ void minmaxLayers3DKernel( unsigned short *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { __shared__ unsigned short smem[BLOCKDIM_X][BLOCKDIM_Y][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_Z + 1]; unsigned short *smem_thread = smem[threadIdx.x][threadIdx.y]; //Offset to the upper halo edge const int baseX = blockIdx.x * BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * BLOCKDIM_Y + threadIdx.y; const int baseZ = (blockIdx.z * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_Z + threadIdx.z; d_src += (baseZ * h + baseY) * w + baseX; d_dst += (baseZ * h + baseY) * w + baseX; const int pitch = w*h; //Main data #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z] = d_src[i * BLOCKDIM_Z * pitch]; } //Upper halo (nearest constant border) #pragma unroll for (int i = 0; i < HALO_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z] = (baseZ + i * BLOCKDIM_Z >= 0) ? d_src[i * BLOCKDIM_Z * pitch] : d_src[-baseZ*w*h]; } //Lower halo (nearest constant border) #pragma unroll for (int i = HALO_STEPS + RESULT_STEPS; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++) { smem_thread[threadIdx.z + i * BLOCKDIM_Z]= (baseZ + i * BLOCKDIM_Z < d) ? d_src[i * BLOCKDIM_Z * pitch] : d_src[(d-1-baseZ)*w*h]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) { unsigned short *smem_kern = &smem_thread[threadIdx.z + i * BLOCKDIM_Z - kernel_radius]; unsigned short val = smem_kern[0]; //#pragma unroll for (int j = 1; j <= 2*kernel_radius; j++) { if(is_min) val = min(val, smem_kern[j]); else val = max(val, smem_kern[j]); } d_dst[i * BLOCKDIM_Z * pitch] = val; } } template<int BLOCKDIM_X, int BLOCKDIM_Y, int BLOCKDIM_Z, int RESULT_STEPS, int HALO_STEPS, bool is_min> void minmaxLayers3D( unsigned short *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius ) { assert(BLOCKDIM_Z * HALO_STEPS >= kernel_radius); assert(w % BLOCKDIM_X == 0); assert(h % BLOCKDIM_Y == 0); assert(d % (RESULT_STEPS * BLOCKDIM_Z) == 0); dim3 blocks(w / BLOCKDIM_X, h / BLOCKDIM_Y, d / (RESULT_STEPS * BLOCKDIM_Z)); dim3 threads(BLOCKDIM_X, BLOCKDIM_Y, BLOCKDIM_Z); minmaxLayers3DKernel<BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z,RESULT_STEPS,HALO_STEPS,is_min><<<blocks, threads>>> ( d_dst, d_src, w,h,d,kernel_radius ); getLastCudaError("minmaxLayers3DKernel() execution failed\n"); //checkCudaErrors(cudaDeviceSynchronize()); } /* * Define Functions */ void UniformLarge3DFilter ( unsigned short *d_img, float *d_temp, float *d_result, int w, int h, int d, int radius_xy, int radius_z ) { uniformRows3D<L_ROW_BLOCKDIM_X,L_ROW_BLOCKDIM_Y,L_ROW_BLOCKDIM_Z,L_ROW_RESULT_STEPS,L_ROW_HALO_STEPS>(d_result, d_img, w,h,d,radius_xy); uniformColumns3D<L_COL_BLOCKDIM_X,L_COL_BLOCKDIM_Y,L_COL_BLOCKDIM_Z,L_COL_RESULT_STEPS,L_COL_HALO_STEPS>(d_temp, d_result, w,h,d,radius_xy); uniformLayers3D<L_LAY_BLOCKDIM_X,L_LAY_BLOCKDIM_Y,L_LAY_BLOCKDIM_Z,L_LAY_RESULT_STEPS,L_LAY_HALO_STEPS>(d_result, d_temp, w,h,d,radius_z); } void ErosionLarge3DFilter ( unsigned short *d_img, unsigned short *d_temp, unsigned short *d_result, int w, int h, int d, int radius_xy, int radius_z ) { minmaxRows3D<L_ROW_BLOCKDIM_X,L_ROW_BLOCKDIM_Y,L_ROW_BLOCKDIM_Z,L_ROW_RESULT_STEPS,L_ROW_HALO_STEPS,true>(d_result, d_img, w,h,d,radius_xy); minmaxColumns3D<L_COL_BLOCKDIM_X,L_COL_BLOCKDIM_Y,L_COL_BLOCKDIM_Z,L_COL_RESULT_STEPS,L_COL_HALO_STEPS,true>(d_temp, d_result, w,h,d,radius_xy); minmaxLayers3D<L_LAY_BLOCKDIM_X,L_LAY_BLOCKDIM_Y,L_LAY_BLOCKDIM_Z,L_LAY_RESULT_STEPS,L_LAY_HALO_STEPS,true>(d_result, d_temp, w,h,d,radius_z); } void DilationLarge3DFilter ( unsigned short *d_img, unsigned short *d_temp, unsigned short *d_result, int w, int h, int d, int radius_xy, int radius_z ) { minmaxRows3D<L_ROW_BLOCKDIM_X,L_ROW_BLOCKDIM_Y,L_ROW_BLOCKDIM_Z,L_ROW_RESULT_STEPS,L_ROW_HALO_STEPS,false>(d_result, d_img, w,h,d,radius_xy); minmaxColumns3D<L_COL_BLOCKDIM_X,L_COL_BLOCKDIM_Y,L_COL_BLOCKDIM_Z,L_COL_RESULT_STEPS,L_COL_HALO_STEPS,false>(d_temp, d_result, w,h,d,radius_xy); minmaxLayers3D<L_LAY_BLOCKDIM_X,L_LAY_BLOCKDIM_Y,L_LAY_BLOCKDIM_Z,L_LAY_RESULT_STEPS,L_LAY_HALO_STEPS,false>(d_result, d_temp, w,h,d,radius_z); } __global__ void Normalize3DKernel ( const unsigned short *d_src, const float *d_erosion, const float *d_dilation, float *d_dst, float min_intensity, const int width, const int height, const int depth ) { const int baseX = blockIdx.x * blockDim.x + threadIdx.x; const int baseY = blockIdx.y * blockDim.y + threadIdx.y; const int baseZ = blockIdx.z * blockDim.z + threadIdx.z; const int idx = (baseZ * height + baseY) * width + baseX; const float intensity = (float)d_src[idx]; d_dst[idx] = (intensity >= min_intensity) ? (intensity-d_erosion[idx]) / (d_dilation[idx] - d_erosion[idx]) : 0; } __global__ void Copy3DKernel ( const unsigned short *d_src, float *d_dst, float min_intensity, const int width, const int height, const int depth ) { const int baseX = blockIdx.x * blockDim.x + threadIdx.x; const int baseY = blockIdx.y * blockDim.y + threadIdx.y; const int baseZ = blockIdx.z * blockDim.z + threadIdx.z; const int idx = (baseZ * height + baseY) * width + baseX; const float intensity = (float)d_src[idx]; d_dst[idx] = (intensity >= min_intensity) ? intensity : 0; } void Normalize3DFilter ( unsigned short *d_img, float *d_norm, unsigned short *d_erosion_temp1, unsigned short *d_erosion_temp2, float *d_erosion_l, float *d_dilation_l, float min_intensity, const int width, const int height, const int depth, const int radius_large_xy, const int radius_large_z ) { if (radius_large_xy == 0 || radius_large_z == 0) { // skip normalize, just copy assert(width % (NORM_BLOCKDIM_X) == 0); assert(height % (NORM_BLOCKDIM_Y) == 0); assert(depth % (NORM_BLOCKDIM_Z) == 0); dim3 blocks(width / (NORM_BLOCKDIM_X), height/(NORM_BLOCKDIM_Y), depth / (NORM_BLOCKDIM_Z)); dim3 threads(NORM_BLOCKDIM_X, NORM_BLOCKDIM_Y, NORM_BLOCKDIM_Z); Copy3DKernel<<<blocks, threads>>>(d_img, d_norm, min_intensity, width, height, depth); getLastCudaError("Error: Copy3DKernel() kernel execution FAILED!"); //checkCudaErrors(cudaDeviceSynchronize()); } else { float *d_uniform_temp = d_norm; ErosionLarge3DFilter(d_img, d_erosion_temp1, d_erosion_temp2, width,height,depth, radius_large_xy,radius_large_z); UniformLarge3DFilter(d_erosion_temp2, d_uniform_temp, d_erosion_l, width,height,depth, radius_large_xy,radius_large_z); DilationLarge3DFilter(d_img, d_erosion_temp1, d_erosion_temp2, width,height,depth, radius_large_xy,radius_large_z); UniformLarge3DFilter(d_erosion_temp2, d_uniform_temp, d_dilation_l, width,height,depth, radius_large_xy, radius_large_z); assert(width % (NORM_BLOCKDIM_X) == 0); assert(height % (NORM_BLOCKDIM_Y) == 0); assert(depth % (NORM_BLOCKDIM_Z) == 0); dim3 blocks(width / (NORM_BLOCKDIM_X), height/(NORM_BLOCKDIM_Y), depth / (NORM_BLOCKDIM_Z)); dim3 threads(NORM_BLOCKDIM_X, NORM_BLOCKDIM_Y, NORM_BLOCKDIM_Z); Normalize3DKernel<<<blocks, threads>>>(d_img, d_erosion_l, d_dilation_l, d_norm, min_intensity, width, height, depth); getLastCudaError("Error: Normalize3DKernel() kernel execution FAILED!"); //checkCudaErrors(cudaDeviceSynchronize()); } }
08c8ea5bfeeeabc447b663657107c2f23091cb85.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * workperform.cu * generate vector * execute dot product for two vectors * reference: "Optimizing Parallel Reduction in CUDA" by Mark Harris */ #include <stdio.h> #include <stdlib.h> #include <cstdlib> template <unsigned int block_Size> __device__ void warpReduce(volatile unsigned long long *vec, unsigned int tid) { if (block_Size >= 64) vec[tid] += vec[tid + 32]; if (block_Size >= 32) vec[tid] += vec[tid + 16]; if (block_Size >= 16) vec[tid] += vec[tid + 8]; if (block_Size >= 8) vec[tid] += vec[tid + 4]; if (block_Size >= 4) vec[tid] += vec[tid + 2]; if (block_Size >= 2) vec[tid] += vec[tid + 1]; } // The __global__ directive identifies this function as a kernel // Note: all kernels must be declared with return type void template <unsigned int block_Size> __global__ void cu_vector_dot (unsigned long long *output_d, unsigned long vecSize) { // genrate vector for F dot produt D __shared__ unsigned long long vec[block_Size]; unsigned int tid = threadIdx.x; unsigned int bid = blockIdx.x; unsigned long gid = bid*block_Size+tid; //~ unsigned long gid = bid*block_Size*2+tid; //~ unsigned int gridSize = block_Size*2*gridDim.x; vec[tid] = 0; //~ while (gid < vecSize) { //~ if(gid< vecSize/2 && (gid+block_Size)<vecSize/2){ //~ vec[tid] += (gid+1)*(gid%10+1)+(gid+block_Size+1)*((gid+block_Size)%10+1); //~ } //~ else if(gid< vecSize/2 && (gid+block_Size)>vecSize/2){ //~ vec[tid] += (gid+1)*(gid%10+1)+(gid+block_Size+2*(vecSize/2-(gid+block_Size)))*((gid+block_Size)%10+1); //~ } //~ else{ //~ vec[tid] += (gid+2*(vecSize/2-gid))*(gid%10+1)+(gid+block_Size+2*(vecSize/2-(gid+block_Size)))*((gid+block_Size)%10+1); //~ } //~ gid += gridSize; //~ } if(gid< vecSize/2){ vec[tid] = (gid+1)*(gid%10+1); } else{ vec[tid] = (gid+2*(vecSize/2-gid))*(gid%10+1); } __syncthreads(); //unroll the iterative if (block_Size >= 512) { if (tid < 256) { vec[tid] += vec[tid + 256]; } __syncthreads(); } if (block_Size >= 256) { if (tid < 128) { vec[tid] += vec[tid + 128]; } __syncthreads(); } if (block_Size >= 128) { if (tid < 64) { vec[tid] += vec[tid + 64]; } __syncthreads(); } // when tid<=32, it's in a warp, we don't need syncthreads, it's sequence if (tid < 32) { warpReduce<block_Size>(vec, tid); } if (tid == 0) output_d[bid] = vec[0]; } // This function is called from the host computer. // It manages memory and calls the function that is executed on the GPU void innerproduct(unsigned long long *output, unsigned long vecSize, int blockSize) { // block_d and thread_d are the GPU counterparts of the arrays that exists in host memory unsigned long long *output_d; int blockNum = vecSize/blockSize; hipError_t result; // allocate space in the device result = hipMalloc ((void**) &output_d, sizeof(unsigned long long) * blockNum); if (result != hipSuccess) { fprintf(stderr, "hipMalloc (block) failed."); exit(1); } //copy the arrays from host to the device result = hipMemcpy (output_d, output, sizeof(unsigned long long) * blockNum, hipMemcpyHostToDevice); if (result != hipSuccess) { fprintf(stderr, "hipMemcpy host->dev (block) failed."); exit(1); } // set execution configuration dim3 dimBlock (blockSize); dim3 dimGrid (blockNum); // actual computation: Call the kernel switch (blockSize) { case 512: hipLaunchKernelGGL(( cu_vector_dot<512>), dim3(dimGrid), dim3(dimBlock) , 0, 0, output_d, vecSize); break; case 256: hipLaunchKernelGGL(( cu_vector_dot<256>), dim3(dimGrid), dim3(dimBlock) , 0, 0, output_d, vecSize); break; case 128: hipLaunchKernelGGL(( cu_vector_dot<128>), dim3(dimGrid), dim3(dimBlock) , 0, 0, output_d, vecSize); break; case 64: hipLaunchKernelGGL(( cu_vector_dot< 64>), dim3(dimGrid), dim3(dimBlock) , 0, 0, output_d, vecSize); break; case 32: hipLaunchKernelGGL(( cu_vector_dot< 32>), dim3(dimGrid), dim3(dimBlock) , 0, 0, output_d, vecSize); break; case 16: hipLaunchKernelGGL(( cu_vector_dot< 16>), dim3(dimGrid), dim3(dimBlock) , 0, 0, output_d, vecSize); break; case 8: hipLaunchKernelGGL(( cu_vector_dot< 8>), dim3(dimGrid), dim3(dimBlock) , 0, 0, output_d, vecSize); break; case 4: hipLaunchKernelGGL(( cu_vector_dot< 4>), dim3(dimGrid), dim3(dimBlock) , 0, 0, output_d, vecSize); break; case 2: hipLaunchKernelGGL(( cu_vector_dot< 2>), dim3(dimGrid), dim3(dimBlock) , 0, 0, output_d, vecSize); break; case 1: hipLaunchKernelGGL(( cu_vector_dot< 1>), dim3(dimGrid), dim3(dimBlock) , 0, 0, output_d, vecSize); break; } // transfer results back to host result = hipMemcpy (output, output_d, sizeof(unsigned long long) * blockNum, hipMemcpyDeviceToHost); if (result != hipSuccess) { fprintf(stderr, "hipMemcpy host <- dev (block) failed."); exit(1); } // release the memory on the GPU result = hipFree (output_d); if (result != hipSuccess) { fprintf(stderr, "hipFree (block) failed."); exit(1); } }
08c8ea5bfeeeabc447b663657107c2f23091cb85.cu
/* * workperform.cu * generate vector * execute dot product for two vectors * reference: "Optimizing Parallel Reduction in CUDA" by Mark Harris */ #include <stdio.h> #include <stdlib.h> #include <cstdlib> template <unsigned int block_Size> __device__ void warpReduce(volatile unsigned long long *vec, unsigned int tid) { if (block_Size >= 64) vec[tid] += vec[tid + 32]; if (block_Size >= 32) vec[tid] += vec[tid + 16]; if (block_Size >= 16) vec[tid] += vec[tid + 8]; if (block_Size >= 8) vec[tid] += vec[tid + 4]; if (block_Size >= 4) vec[tid] += vec[tid + 2]; if (block_Size >= 2) vec[tid] += vec[tid + 1]; } // The __global__ directive identifies this function as a kernel // Note: all kernels must be declared with return type void template <unsigned int block_Size> __global__ void cu_vector_dot (unsigned long long *output_d, unsigned long vecSize) { // genrate vector for F dot produt D __shared__ unsigned long long vec[block_Size]; unsigned int tid = threadIdx.x; unsigned int bid = blockIdx.x; unsigned long gid = bid*block_Size+tid; //~ unsigned long gid = bid*block_Size*2+tid; //~ unsigned int gridSize = block_Size*2*gridDim.x; vec[tid] = 0; //~ while (gid < vecSize) { //~ if(gid< vecSize/2 && (gid+block_Size)<vecSize/2){ //~ vec[tid] += (gid+1)*(gid%10+1)+(gid+block_Size+1)*((gid+block_Size)%10+1); //~ } //~ else if(gid< vecSize/2 && (gid+block_Size)>vecSize/2){ //~ vec[tid] += (gid+1)*(gid%10+1)+(gid+block_Size+2*(vecSize/2-(gid+block_Size)))*((gid+block_Size)%10+1); //~ } //~ else{ //~ vec[tid] += (gid+2*(vecSize/2-gid))*(gid%10+1)+(gid+block_Size+2*(vecSize/2-(gid+block_Size)))*((gid+block_Size)%10+1); //~ } //~ gid += gridSize; //~ } if(gid< vecSize/2){ vec[tid] = (gid+1)*(gid%10+1); } else{ vec[tid] = (gid+2*(vecSize/2-gid))*(gid%10+1); } __syncthreads(); //unroll the iterative if (block_Size >= 512) { if (tid < 256) { vec[tid] += vec[tid + 256]; } __syncthreads(); } if (block_Size >= 256) { if (tid < 128) { vec[tid] += vec[tid + 128]; } __syncthreads(); } if (block_Size >= 128) { if (tid < 64) { vec[tid] += vec[tid + 64]; } __syncthreads(); } // when tid<=32, it's in a warp, we don't need syncthreads, it's sequence if (tid < 32) { warpReduce<block_Size>(vec, tid); } if (tid == 0) output_d[bid] = vec[0]; } // This function is called from the host computer. // It manages memory and calls the function that is executed on the GPU void innerproduct(unsigned long long *output, unsigned long vecSize, int blockSize) { // block_d and thread_d are the GPU counterparts of the arrays that exists in host memory unsigned long long *output_d; int blockNum = vecSize/blockSize; cudaError_t result; // allocate space in the device result = cudaMalloc ((void**) &output_d, sizeof(unsigned long long) * blockNum); if (result != cudaSuccess) { fprintf(stderr, "cudaMalloc (block) failed."); exit(1); } //copy the arrays from host to the device result = cudaMemcpy (output_d, output, sizeof(unsigned long long) * blockNum, cudaMemcpyHostToDevice); if (result != cudaSuccess) { fprintf(stderr, "cudaMemcpy host->dev (block) failed."); exit(1); } // set execution configuration dim3 dimBlock (blockSize); dim3 dimGrid (blockNum); // actual computation: Call the kernel switch (blockSize) { case 512: cu_vector_dot<512><<< dimGrid, dimBlock >>>(output_d, vecSize); break; case 256: cu_vector_dot<256><<< dimGrid, dimBlock >>>(output_d, vecSize); break; case 128: cu_vector_dot<128><<< dimGrid, dimBlock >>>(output_d, vecSize); break; case 64: cu_vector_dot< 64><<< dimGrid, dimBlock >>>(output_d, vecSize); break; case 32: cu_vector_dot< 32><<< dimGrid, dimBlock >>>(output_d, vecSize); break; case 16: cu_vector_dot< 16><<< dimGrid, dimBlock >>>(output_d, vecSize); break; case 8: cu_vector_dot< 8><<< dimGrid, dimBlock >>>(output_d, vecSize); break; case 4: cu_vector_dot< 4><<< dimGrid, dimBlock >>>(output_d, vecSize); break; case 2: cu_vector_dot< 2><<< dimGrid, dimBlock >>>(output_d, vecSize); break; case 1: cu_vector_dot< 1><<< dimGrid, dimBlock >>>(output_d, vecSize); break; } // transfer results back to host result = cudaMemcpy (output, output_d, sizeof(unsigned long long) * blockNum, cudaMemcpyDeviceToHost); if (result != cudaSuccess) { fprintf(stderr, "cudaMemcpy host <- dev (block) failed."); exit(1); } // release the memory on the GPU result = cudaFree (output_d); if (result != cudaSuccess) { fprintf(stderr, "cudaFree (block) failed."); exit(1); } }
9ee70c4d937aacb759fb3e75c1eba91773ab864b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kLessThanEqScalar.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; hipMalloc(&mat, XSIZE*YSIZE); float val = 1; float *target = NULL; hipMalloc(&target, XSIZE*YSIZE); unsigned int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kLessThanEqScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,val,target,len); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kLessThanEqScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,val,target,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kLessThanEqScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,val,target,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9ee70c4d937aacb759fb3e75c1eba91773ab864b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kLessThanEqScalar.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; cudaMalloc(&mat, XSIZE*YSIZE); float val = 1; float *target = NULL; cudaMalloc(&target, XSIZE*YSIZE); unsigned int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kLessThanEqScalar<<<gridBlock,threadBlock>>>(mat,val,target,len); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kLessThanEqScalar<<<gridBlock,threadBlock>>>(mat,val,target,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kLessThanEqScalar<<<gridBlock,threadBlock>>>(mat,val,target,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
beaf7d749d636f8da45baa4200077c14d2cb0603.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <hipcub/hipcub.hpp> #include "cuda_utils.h" #include "random/rng.h" #include "stats/mean.h" #include "stats/stddev.h" #include "test_utils.h" namespace MLCommon { namespace Random { enum RandomType { RNG_Normal, RNG_LogNormal, RNG_Uniform, RNG_Gumbel, RNG_Logistic, RNG_Exp, RNG_Rayleigh, RNG_Laplace }; template <typename T, int TPB> __global__ void meanKernel(T *out, const T *data, int len) { typedef hipcub::BlockReduce<T, TPB> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int tid = threadIdx.x + blockIdx.x * blockDim.x; T val = tid < len ? data[tid] : T(0); T x = BlockReduce(temp_storage).Sum(val); __syncthreads(); T xx = BlockReduce(temp_storage).Sum(val * val); __syncthreads(); if (threadIdx.x == 0) { myAtomicAdd(out, x); myAtomicAdd(out + 1, xx); } } template <typename T> struct RngInputs { T tolerance; int len; // start, end: for uniform // mean, sigma: for normal/lognormal // mean, beta: for gumbel // mean, scale: for logistic and laplace // lambda: for exponential // sigma: for rayleigh T start, end; RandomType type; GeneratorType gtype; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const RngInputs<T> &dims) { return os; } #include <time.h> #include <sys/timeb.h> template <typename T> class RngTest : public ::testing::TestWithParam<RngInputs<T>> { protected: void SetUp() override { // Tests are configured with their expected test-values sigma. For example, // 4 x sigma indicates the test shouldn't fail 99.9% of the time. num_sigma = 10; params = ::testing::TestWithParam<RngInputs<T>>::GetParam(); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); Rng r(params.seed, params.gtype); allocate(data, params.len); allocate(stats, 2, true); switch (params.type) { case RNG_Normal: r.normal(data, params.len, params.start, params.end, stream); break; case RNG_LogNormal: r.lognormal(data, params.len, params.start, params.end, stream); break; case RNG_Uniform: r.uniform(data, params.len, params.start, params.end, stream); break; case RNG_Gumbel: r.gumbel(data, params.len, params.start, params.end, stream); break; case RNG_Logistic: r.logistic(data, params.len, params.start, params.end, stream); break; case RNG_Exp: r.exponential(data, params.len, params.start, stream); break; case RNG_Rayleigh: r.rayleigh(data, params.len, params.start, stream); break; case RNG_Laplace: r.laplace(data, params.len, params.start, params.end, stream); break; }; static const int threads = 128; hipLaunchKernelGGL(( meanKernel<T, threads>), dim3(ceildiv(params.len, threads)), dim3(threads), 0, stream, stats, data, params.len); updateHost<T>(h_stats, stats, 2, stream); CUDA_CHECK(hipStreamSynchronize(stream)); h_stats[0] /= params.len; h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]); CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(stats)); } void getExpectedMeanVar(T meanvar[2]) { switch (params.type) { case RNG_Normal: meanvar[0] = params.start; meanvar[1] = params.end * params.end; break; case RNG_LogNormal: { auto var = params.end * params.end; auto mu = params.start; meanvar[0] = myExp(mu + var * T(0.5)); meanvar[1] = (myExp(var) - T(1.0)) * myExp(T(2.0) * mu + var); break; } case RNG_Uniform: meanvar[0] = (params.start + params.end) * T(0.5); meanvar[1] = params.end - params.start; meanvar[1] = meanvar[1] * meanvar[1] / T(12.0); break; case RNG_Gumbel: { auto gamma = T(0.577215664901532); meanvar[0] = params.start + params.end * gamma; meanvar[1] = T(3.1415) * T(3.1415) * params.end * params.end / T(6.0); break; } case RNG_Logistic: meanvar[0] = params.start; meanvar[1] = T(3.1415) * T(3.1415) * params.end * params.end / T(3.0); break; case RNG_Exp: meanvar[0] = T(1.0) / params.start; meanvar[1] = meanvar[0] * meanvar[0]; break; case RNG_Rayleigh: meanvar[0] = params.start * mySqrt(T(3.1415 / 2.0)); meanvar[1] = ((T(4.0) - T(3.1415)) / T(2.0)) * params.start * params.start; break; case RNG_Laplace: meanvar[0] = params.start; meanvar[1] = T(2.0) * params.end * params.end; break; }; } protected: RngInputs<T> params; T *data, *stats; T h_stats[2]; // mean, var int num_sigma; }; // The measured mean and standard deviation for each tested distribution are, // of course, statistical variables. Thus setting an appropriate testing // tolerance essentially requires one to set a probability of test failure. We // choose to set this at 3-4 x sigma, i.e., a 99.7-99.9% confidence interval so that // the test will indeed pass. In quick experiments (using the identical // distributions given by NumPy/SciPy), the measured standard deviation is the // variable with the greatest variance and so we determined the variance for // each distribution and number of samples (32*1024 or 8*1024). Below // are listed the standard deviation for these tests. // Distribution: StdDev 32*1024, StdDev 8*1024 // Normal: 0.0055, 0.011 // LogNormal: 0.05, 0.1 // Uniform: 0.003, 0.005 // Gumbel: 0.005, 0.01 // Logistic: 0.005, 0.01 // Exp: 0.008, 0.015 // Rayleigh: 0.0125, 0.025 // Laplace: 0.02, 0.04 // We generally want 4 x sigma >= 99.9% chance of success typedef RngTest<float> RngTestF; const std::vector<RngInputs<float>> inputsf = { {0.0055 , 32 * 1024, 1.f, 1.f, RNG_Normal, GenPhilox, 1234ULL}, {0.011 , 8 * 1024, 1.f, 1.f, RNG_Normal, GenPhilox, 1234ULL}, {0.05 , 32 * 1024, 1.f, 1.f, RNG_LogNormal, GenPhilox, 1234ULL}, {0.1 , 8 * 1024, 1.f, 1.f, RNG_LogNormal, GenPhilox, 1234ULL}, {0.003 , 32 * 1024, -1.f, 1.f, RNG_Uniform, GenPhilox, 1234ULL}, {0.005 , 8 * 1024, -1.f, 1.f, RNG_Uniform, GenPhilox, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Gumbel, GenPhilox, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Gumbel, GenPhilox, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Logistic, GenPhilox, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Logistic, GenPhilox, 1234ULL}, {0.008 , 32 * 1024, 1.f, 1.f, RNG_Exp, GenPhilox, 1234ULL}, {0.015 , 8 * 1024, 1.f, 1.f, RNG_Exp, GenPhilox, 1234ULL}, {0.0125 , 32 * 1024, 1.f, 1.f, RNG_Rayleigh, GenPhilox, 1234ULL}, {0.025 , 8 * 1024, 1.f, 1.f, RNG_Rayleigh, GenPhilox, 1234ULL}, {0.02 , 32 * 1024, 1.f, 1.f, RNG_Laplace, GenPhilox, 1234ULL}, {0.04 , 8 * 1024, 1.f, 1.f, RNG_Laplace, GenPhilox, 1234ULL}, {0.0055 , 32 * 1024, 1.f, 1.f, RNG_Normal, GenTaps, 1234ULL}, {0.011 , 8 * 1024, 1.f, 1.f, RNG_Normal, GenTaps, 1234ULL}, {0.05 , 32 * 1024, 1.f, 1.f, RNG_LogNormal, GenTaps, 1234ULL}, {0.1 , 8 * 1024, 1.f, 1.f, RNG_LogNormal, GenTaps, 1234ULL}, {0.003 , 32 * 1024, -1.f, 1.f, RNG_Uniform, GenTaps, 1234ULL}, {0.005 , 8 * 1024, -1.f, 1.f, RNG_Uniform, GenTaps, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Gumbel, GenTaps, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Gumbel, GenTaps, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Logistic, GenTaps, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Logistic, GenTaps, 1234ULL}, {0.008 , 32 * 1024, 1.f, 1.f, RNG_Exp, GenTaps, 1234ULL}, {0.015 , 8 * 1024, 1.f, 1.f, RNG_Exp, GenTaps, 1234ULL}, {0.0125 , 32 * 1024, 1.f, 1.f, RNG_Rayleigh, GenTaps, 1234ULL}, {0.025 , 8 * 1024, 1.f, 1.f, RNG_Rayleigh, GenTaps, 1234ULL}, {0.02 , 32 * 1024, 1.f, 1.f, RNG_Laplace, GenTaps, 1234ULL}, {0.04 , 8 * 1024, 1.f, 1.f, RNG_Laplace, GenTaps, 1234ULL}, {0.0055 , 32 * 1024, 1.f, 1.f, RNG_Normal, GenKiss99, 1234ULL}, {0.011 , 8 * 1024, 1.f, 1.f, RNG_Normal, GenKiss99, 1234ULL}, {0.05 , 32 * 1024, 1.f, 1.f, RNG_LogNormal, GenKiss99, 1234ULL}, {0.1 , 8 * 1024, 1.f, 1.f, RNG_LogNormal, GenKiss99, 1234ULL}, {0.003 , 32 * 1024, -1.f, 1.f, RNG_Uniform, GenKiss99, 1234ULL}, {0.005 , 8 * 1024, -1.f, 1.f, RNG_Uniform, GenKiss99, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Gumbel, GenKiss99, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Gumbel, GenKiss99, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Logistic, GenKiss99, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Logistic, GenKiss99, 1234ULL}, {0.008 , 32 * 1024, 1.f, 1.f, RNG_Exp, GenKiss99, 1234ULL}, {0.015 , 8 * 1024, 1.f, 1.f, RNG_Exp, GenKiss99, 1234ULL}, {0.0125 , 32 * 1024, 1.f, 1.f, RNG_Rayleigh, GenKiss99, 1234ULL}, {0.025 , 8 * 1024, 1.f, 1.f, RNG_Rayleigh, GenKiss99, 1234ULL}, {0.02 , 32 * 1024, 1.f, 1.f, RNG_Laplace, GenKiss99, 1234ULL}, {0.04 , 8 * 1024, 1.f, 1.f, RNG_Laplace, GenKiss99, 1234ULL}}; TEST_P(RngTestF, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<float>(num_sigma*params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<float>(num_sigma*params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestF, ::testing::ValuesIn(inputsf)); typedef RngTest<double> RngTestD; const std::vector<RngInputs<double>> inputsd = { {0.0055 , 32 * 1024, 1.0, 1.0, RNG_Normal, GenPhilox, 1234ULL}, {0.011 , 8 * 1024, 1.0, 1.0, RNG_Normal, GenPhilox, 1234ULL}, {0.05 , 32 * 1024, 1.0, 1.0, RNG_LogNormal, GenPhilox, 1234ULL}, {0.1 , 8 * 1024, 1.0, 1.0, RNG_LogNormal, GenPhilox, 1234ULL}, {0.003 , 32 * 1024, -1.0, 1.0, RNG_Uniform, GenPhilox, 1234ULL}, {0.005 , 8 * 1024, -1.0, 1.0, RNG_Uniform, GenPhilox, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Gumbel, GenPhilox, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Gumbel, GenPhilox, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Logistic, GenPhilox, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Logistic, GenPhilox, 1234ULL}, {0.008 , 32 * 1024, 1.0, 1.0, RNG_Exp, GenPhilox, 1234ULL}, {0.015 , 8 * 1024, 1.0, 1.0, RNG_Exp, GenPhilox, 1234ULL}, {0.0125 , 32 * 1024, 1.0, 1.0, RNG_Rayleigh, GenPhilox, 1234ULL}, {0.025 , 8 * 1024, 1.0, 1.0, RNG_Rayleigh, GenPhilox, 1234ULL}, {0.02 , 32 * 1024, 1.0, 1.0, RNG_Laplace, GenPhilox, 1234ULL}, {0.04 , 8 * 1024, 1.0, 1.0, RNG_Laplace, GenPhilox, 1234ULL}, {0.0055 , 32 * 1024, 1.0, 1.0, RNG_Normal, GenTaps, 1234ULL}, {0.011 , 8 * 1024, 1.0, 1.0, RNG_Normal, GenTaps, 1234ULL}, {0.05 , 32 * 1024, 1.0, 1.0, RNG_LogNormal, GenTaps, 1234ULL}, {0.1 , 8 * 1024, 1.0, 1.0, RNG_LogNormal, GenTaps, 1234ULL}, {0.003 , 32 * 1024, -1.0, 1.0, RNG_Uniform, GenTaps, 1234ULL}, {0.005 , 8 * 1024, -1.0, 1.0, RNG_Uniform, GenTaps, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Gumbel, GenTaps, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Gumbel, GenTaps, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Logistic, GenTaps, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Logistic, GenTaps, 1234ULL}, {0.008 , 32 * 1024, 1.0, 1.0, RNG_Exp, GenTaps, 1234ULL}, {0.015 , 8 * 1024, 1.0, 1.0, RNG_Exp, GenTaps, 1234ULL}, {0.0125 , 32 * 1024, 1.0, 1.0, RNG_Rayleigh, GenTaps, 1234ULL}, {0.025 , 8 * 1024, 1.0, 1.0, RNG_Rayleigh, GenTaps, 1234ULL}, {0.02 , 32 * 1024, 1.0, 1.0, RNG_Laplace, GenTaps, 1234ULL}, {0.04 , 8 * 1024, 1.0, 1.0, RNG_Laplace, GenTaps, 1234ULL}, {0.0055 , 32 * 1024, 1.0, 1.0, RNG_Normal, GenKiss99, 1234ULL}, {0.011 , 8 * 1024, 1.0, 1.0, RNG_Normal, GenKiss99, 1234ULL}, {0.05 , 32 * 1024, 1.0, 1.0, RNG_LogNormal, GenKiss99, 1234ULL}, {0.1 , 8 * 1024, 1.0, 1.0, RNG_LogNormal, GenKiss99, 1234ULL}, {0.003 , 32 * 1024, -1.0, 1.0, RNG_Uniform, GenKiss99, 1234ULL}, {0.005 , 8 * 1024, -1.0, 1.0, RNG_Uniform, GenKiss99, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Gumbel, GenKiss99, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Gumbel, GenKiss99, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Logistic, GenKiss99, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Logistic, GenKiss99, 1234ULL}, {0.008 , 32 * 1024, 1.0, 1.0, RNG_Exp, GenKiss99, 1234ULL}, {0.015 , 8 * 1024, 1.0, 1.0, RNG_Exp, GenKiss99, 1234ULL}, {0.0125 , 32 * 1024, 1.0, 1.0, RNG_Rayleigh, GenKiss99, 1234ULL}, {0.025 , 8 * 1024, 1.0, 1.0, RNG_Rayleigh, GenKiss99, 1234ULL}, {0.02 , 32 * 1024, 1.0, 1.0, RNG_Laplace, GenKiss99, 1234ULL}, {0.04 , 8 * 1024, 1.0, 1.0, RNG_Laplace, GenKiss99, 1234ULL}}; TEST_P(RngTestD, Result) { double meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<double>(num_sigma*params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<double>(num_sigma*params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestD, ::testing::ValuesIn(inputsd)); // ---------------------------------------------------------------------- // // Test for expected variance in mean calculations template <typename T> T quick_mean(const std::vector<T>& d) { T acc = T(0); for(const auto& di : d) { acc += di; } return acc/d.size(); } template <typename T> T quick_std(const std::vector<T>& d) { T acc = T(0); T d_mean = quick_mean(d); for(const auto& di : d) { acc += ((di - d_mean)*(di - d_mean)); } return std::sqrt(acc/(d.size()-1)); } template <typename T> std::ostream& operator<< (std::ostream& out, const std::vector<T>& v) { if ( !v.empty() ) { out << '['; std::copy (v.begin(), v.end(), std::ostream_iterator<T>(out, ", ")); out << "\b\b]"; } return out; } // The following tests the 3 random number generators by checking that the // measured mean error is close to the well-known analytical result // (sigma/sqrt(n_samples)). To compute the mean error, we a number of // experiments computing the mean, giving us a distribution of the mean // itself. The mean error is simply the standard deviation of this // distribution (the standard deviation of the mean). TEST(Rng, MeanError) { timeb time_struct; ftime(&time_struct); int seed = time_struct.millitm; int num_samples = 1024; int num_experiments = 1024; float* data; float* mean_result; float* std_result; int len = num_samples*num_experiments; hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); allocate(data, len); allocate(mean_result, num_experiments); allocate(std_result, num_experiments); for(auto rtype : {Random::GenPhilox, Random::GenKiss99 /*, Random::GenTaps */}) { Random::Rng r(seed, rtype); r.normal(data, len, 3.3f, 0.23f, stream); // r.uniform(data, len, -1.0, 2.0); Stats::mean(mean_result, data, num_samples, num_experiments, false, false, stream); Stats::stddev(std_result, data, mean_result, num_samples, num_experiments, false, false, stream); std::vector<float> h_mean_result(num_experiments); std::vector<float> h_std_result(num_experiments); updateHost(h_mean_result.data(), mean_result, num_experiments, stream); updateHost(h_std_result.data(), std_result, num_experiments, stream); CUDA_CHECK(hipStreamSynchronize(stream)); auto d_mean = quick_mean(h_mean_result); // std-dev of mean; also known as mean error auto d_std_of_mean = quick_std(h_mean_result); auto d_std = quick_mean(h_std_result); auto d_std_of_mean_analytical = d_std/std::sqrt(num_samples); // std::cout << "measured mean error: " << d_std_of_mean << "\n"; // std::cout << "expected mean error: " << d_std/std::sqrt(num_samples) << "\n"; auto diff_expected_vs_measured_mean_error = std::abs(d_std_of_mean - d_std/std::sqrt(num_samples)); ASSERT_TRUE((diff_expected_vs_measured_mean_error/d_std_of_mean_analytical < 0.5)); } CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(mean_result)); CUDA_CHECK(hipFree(std_result)); // std::cout << "mean_res:" << h_mean_result << "\n"; } template<typename T, int len, int scale> class ScaledBernoulliTest : public ::testing::Test { protected: void SetUp() override { CUDA_CHECK(hipStreamCreate(&stream)); Rng r(42); allocate(data, len * sizeof(T), stream); r.scaled_bernoulli(data, len, T(0.5), T(scale), stream); } void TearDown() override { CUDA_CHECK(hipFree(data)); } void rangeCheck() { T* h_data = new T[len]; updateHost(h_data, data, len, stream); ASSERT_TRUE(std::none_of(h_data, h_data + len, [](const T& a) { return a < -scale || a > scale; })); delete[] h_data; } T* data; hipStream_t stream; }; typedef ScaledBernoulliTest<float, 500, 35> ScaledBernoulliTest1; TEST_F(ScaledBernoulliTest1, RangeCheck) { rangeCheck(); } typedef ScaledBernoulliTest<double, 100, 220> ScaledBernoulliTest2; TEST_F(ScaledBernoulliTest2, RangeCheck) { rangeCheck(); } } // end namespace Random } // end namespace MLCommon
beaf7d749d636f8da45baa4200077c14d2cb0603.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <cub/cub.cuh> #include "cuda_utils.h" #include "random/rng.h" #include "stats/mean.h" #include "stats/stddev.h" #include "test_utils.h" namespace MLCommon { namespace Random { enum RandomType { RNG_Normal, RNG_LogNormal, RNG_Uniform, RNG_Gumbel, RNG_Logistic, RNG_Exp, RNG_Rayleigh, RNG_Laplace }; template <typename T, int TPB> __global__ void meanKernel(T *out, const T *data, int len) { typedef cub::BlockReduce<T, TPB> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int tid = threadIdx.x + blockIdx.x * blockDim.x; T val = tid < len ? data[tid] : T(0); T x = BlockReduce(temp_storage).Sum(val); __syncthreads(); T xx = BlockReduce(temp_storage).Sum(val * val); __syncthreads(); if (threadIdx.x == 0) { myAtomicAdd(out, x); myAtomicAdd(out + 1, xx); } } template <typename T> struct RngInputs { T tolerance; int len; // start, end: for uniform // mean, sigma: for normal/lognormal // mean, beta: for gumbel // mean, scale: for logistic and laplace // lambda: for exponential // sigma: for rayleigh T start, end; RandomType type; GeneratorType gtype; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const RngInputs<T> &dims) { return os; } #include <time.h> #include <sys/timeb.h> template <typename T> class RngTest : public ::testing::TestWithParam<RngInputs<T>> { protected: void SetUp() override { // Tests are configured with their expected test-values sigma. For example, // 4 x sigma indicates the test shouldn't fail 99.9% of the time. num_sigma = 10; params = ::testing::TestWithParam<RngInputs<T>>::GetParam(); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); Rng r(params.seed, params.gtype); allocate(data, params.len); allocate(stats, 2, true); switch (params.type) { case RNG_Normal: r.normal(data, params.len, params.start, params.end, stream); break; case RNG_LogNormal: r.lognormal(data, params.len, params.start, params.end, stream); break; case RNG_Uniform: r.uniform(data, params.len, params.start, params.end, stream); break; case RNG_Gumbel: r.gumbel(data, params.len, params.start, params.end, stream); break; case RNG_Logistic: r.logistic(data, params.len, params.start, params.end, stream); break; case RNG_Exp: r.exponential(data, params.len, params.start, stream); break; case RNG_Rayleigh: r.rayleigh(data, params.len, params.start, stream); break; case RNG_Laplace: r.laplace(data, params.len, params.start, params.end, stream); break; }; static const int threads = 128; meanKernel<T, threads><<<ceildiv(params.len, threads), threads, 0, stream>>>( stats, data, params.len); updateHost<T>(h_stats, stats, 2, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); h_stats[0] /= params.len; h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]); CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(stats)); } void getExpectedMeanVar(T meanvar[2]) { switch (params.type) { case RNG_Normal: meanvar[0] = params.start; meanvar[1] = params.end * params.end; break; case RNG_LogNormal: { auto var = params.end * params.end; auto mu = params.start; meanvar[0] = myExp(mu + var * T(0.5)); meanvar[1] = (myExp(var) - T(1.0)) * myExp(T(2.0) * mu + var); break; } case RNG_Uniform: meanvar[0] = (params.start + params.end) * T(0.5); meanvar[1] = params.end - params.start; meanvar[1] = meanvar[1] * meanvar[1] / T(12.0); break; case RNG_Gumbel: { auto gamma = T(0.577215664901532); meanvar[0] = params.start + params.end * gamma; meanvar[1] = T(3.1415) * T(3.1415) * params.end * params.end / T(6.0); break; } case RNG_Logistic: meanvar[0] = params.start; meanvar[1] = T(3.1415) * T(3.1415) * params.end * params.end / T(3.0); break; case RNG_Exp: meanvar[0] = T(1.0) / params.start; meanvar[1] = meanvar[0] * meanvar[0]; break; case RNG_Rayleigh: meanvar[0] = params.start * mySqrt(T(3.1415 / 2.0)); meanvar[1] = ((T(4.0) - T(3.1415)) / T(2.0)) * params.start * params.start; break; case RNG_Laplace: meanvar[0] = params.start; meanvar[1] = T(2.0) * params.end * params.end; break; }; } protected: RngInputs<T> params; T *data, *stats; T h_stats[2]; // mean, var int num_sigma; }; // The measured mean and standard deviation for each tested distribution are, // of course, statistical variables. Thus setting an appropriate testing // tolerance essentially requires one to set a probability of test failure. We // choose to set this at 3-4 x sigma, i.e., a 99.7-99.9% confidence interval so that // the test will indeed pass. In quick experiments (using the identical // distributions given by NumPy/SciPy), the measured standard deviation is the // variable with the greatest variance and so we determined the variance for // each distribution and number of samples (32*1024 or 8*1024). Below // are listed the standard deviation for these tests. // Distribution: StdDev 32*1024, StdDev 8*1024 // Normal: 0.0055, 0.011 // LogNormal: 0.05, 0.1 // Uniform: 0.003, 0.005 // Gumbel: 0.005, 0.01 // Logistic: 0.005, 0.01 // Exp: 0.008, 0.015 // Rayleigh: 0.0125, 0.025 // Laplace: 0.02, 0.04 // We generally want 4 x sigma >= 99.9% chance of success typedef RngTest<float> RngTestF; const std::vector<RngInputs<float>> inputsf = { {0.0055 , 32 * 1024, 1.f, 1.f, RNG_Normal, GenPhilox, 1234ULL}, {0.011 , 8 * 1024, 1.f, 1.f, RNG_Normal, GenPhilox, 1234ULL}, {0.05 , 32 * 1024, 1.f, 1.f, RNG_LogNormal, GenPhilox, 1234ULL}, {0.1 , 8 * 1024, 1.f, 1.f, RNG_LogNormal, GenPhilox, 1234ULL}, {0.003 , 32 * 1024, -1.f, 1.f, RNG_Uniform, GenPhilox, 1234ULL}, {0.005 , 8 * 1024, -1.f, 1.f, RNG_Uniform, GenPhilox, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Gumbel, GenPhilox, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Gumbel, GenPhilox, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Logistic, GenPhilox, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Logistic, GenPhilox, 1234ULL}, {0.008 , 32 * 1024, 1.f, 1.f, RNG_Exp, GenPhilox, 1234ULL}, {0.015 , 8 * 1024, 1.f, 1.f, RNG_Exp, GenPhilox, 1234ULL}, {0.0125 , 32 * 1024, 1.f, 1.f, RNG_Rayleigh, GenPhilox, 1234ULL}, {0.025 , 8 * 1024, 1.f, 1.f, RNG_Rayleigh, GenPhilox, 1234ULL}, {0.02 , 32 * 1024, 1.f, 1.f, RNG_Laplace, GenPhilox, 1234ULL}, {0.04 , 8 * 1024, 1.f, 1.f, RNG_Laplace, GenPhilox, 1234ULL}, {0.0055 , 32 * 1024, 1.f, 1.f, RNG_Normal, GenTaps, 1234ULL}, {0.011 , 8 * 1024, 1.f, 1.f, RNG_Normal, GenTaps, 1234ULL}, {0.05 , 32 * 1024, 1.f, 1.f, RNG_LogNormal, GenTaps, 1234ULL}, {0.1 , 8 * 1024, 1.f, 1.f, RNG_LogNormal, GenTaps, 1234ULL}, {0.003 , 32 * 1024, -1.f, 1.f, RNG_Uniform, GenTaps, 1234ULL}, {0.005 , 8 * 1024, -1.f, 1.f, RNG_Uniform, GenTaps, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Gumbel, GenTaps, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Gumbel, GenTaps, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Logistic, GenTaps, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Logistic, GenTaps, 1234ULL}, {0.008 , 32 * 1024, 1.f, 1.f, RNG_Exp, GenTaps, 1234ULL}, {0.015 , 8 * 1024, 1.f, 1.f, RNG_Exp, GenTaps, 1234ULL}, {0.0125 , 32 * 1024, 1.f, 1.f, RNG_Rayleigh, GenTaps, 1234ULL}, {0.025 , 8 * 1024, 1.f, 1.f, RNG_Rayleigh, GenTaps, 1234ULL}, {0.02 , 32 * 1024, 1.f, 1.f, RNG_Laplace, GenTaps, 1234ULL}, {0.04 , 8 * 1024, 1.f, 1.f, RNG_Laplace, GenTaps, 1234ULL}, {0.0055 , 32 * 1024, 1.f, 1.f, RNG_Normal, GenKiss99, 1234ULL}, {0.011 , 8 * 1024, 1.f, 1.f, RNG_Normal, GenKiss99, 1234ULL}, {0.05 , 32 * 1024, 1.f, 1.f, RNG_LogNormal, GenKiss99, 1234ULL}, {0.1 , 8 * 1024, 1.f, 1.f, RNG_LogNormal, GenKiss99, 1234ULL}, {0.003 , 32 * 1024, -1.f, 1.f, RNG_Uniform, GenKiss99, 1234ULL}, {0.005 , 8 * 1024, -1.f, 1.f, RNG_Uniform, GenKiss99, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Gumbel, GenKiss99, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Gumbel, GenKiss99, 1234ULL}, {0.005 , 32 * 1024, 1.f, 1.f, RNG_Logistic, GenKiss99, 1234ULL}, {0.01 , 8 * 1024, 1.f, 1.f, RNG_Logistic, GenKiss99, 1234ULL}, {0.008 , 32 * 1024, 1.f, 1.f, RNG_Exp, GenKiss99, 1234ULL}, {0.015 , 8 * 1024, 1.f, 1.f, RNG_Exp, GenKiss99, 1234ULL}, {0.0125 , 32 * 1024, 1.f, 1.f, RNG_Rayleigh, GenKiss99, 1234ULL}, {0.025 , 8 * 1024, 1.f, 1.f, RNG_Rayleigh, GenKiss99, 1234ULL}, {0.02 , 32 * 1024, 1.f, 1.f, RNG_Laplace, GenKiss99, 1234ULL}, {0.04 , 8 * 1024, 1.f, 1.f, RNG_Laplace, GenKiss99, 1234ULL}}; TEST_P(RngTestF, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<float>(num_sigma*params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<float>(num_sigma*params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestF, ::testing::ValuesIn(inputsf)); typedef RngTest<double> RngTestD; const std::vector<RngInputs<double>> inputsd = { {0.0055 , 32 * 1024, 1.0, 1.0, RNG_Normal, GenPhilox, 1234ULL}, {0.011 , 8 * 1024, 1.0, 1.0, RNG_Normal, GenPhilox, 1234ULL}, {0.05 , 32 * 1024, 1.0, 1.0, RNG_LogNormal, GenPhilox, 1234ULL}, {0.1 , 8 * 1024, 1.0, 1.0, RNG_LogNormal, GenPhilox, 1234ULL}, {0.003 , 32 * 1024, -1.0, 1.0, RNG_Uniform, GenPhilox, 1234ULL}, {0.005 , 8 * 1024, -1.0, 1.0, RNG_Uniform, GenPhilox, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Gumbel, GenPhilox, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Gumbel, GenPhilox, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Logistic, GenPhilox, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Logistic, GenPhilox, 1234ULL}, {0.008 , 32 * 1024, 1.0, 1.0, RNG_Exp, GenPhilox, 1234ULL}, {0.015 , 8 * 1024, 1.0, 1.0, RNG_Exp, GenPhilox, 1234ULL}, {0.0125 , 32 * 1024, 1.0, 1.0, RNG_Rayleigh, GenPhilox, 1234ULL}, {0.025 , 8 * 1024, 1.0, 1.0, RNG_Rayleigh, GenPhilox, 1234ULL}, {0.02 , 32 * 1024, 1.0, 1.0, RNG_Laplace, GenPhilox, 1234ULL}, {0.04 , 8 * 1024, 1.0, 1.0, RNG_Laplace, GenPhilox, 1234ULL}, {0.0055 , 32 * 1024, 1.0, 1.0, RNG_Normal, GenTaps, 1234ULL}, {0.011 , 8 * 1024, 1.0, 1.0, RNG_Normal, GenTaps, 1234ULL}, {0.05 , 32 * 1024, 1.0, 1.0, RNG_LogNormal, GenTaps, 1234ULL}, {0.1 , 8 * 1024, 1.0, 1.0, RNG_LogNormal, GenTaps, 1234ULL}, {0.003 , 32 * 1024, -1.0, 1.0, RNG_Uniform, GenTaps, 1234ULL}, {0.005 , 8 * 1024, -1.0, 1.0, RNG_Uniform, GenTaps, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Gumbel, GenTaps, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Gumbel, GenTaps, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Logistic, GenTaps, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Logistic, GenTaps, 1234ULL}, {0.008 , 32 * 1024, 1.0, 1.0, RNG_Exp, GenTaps, 1234ULL}, {0.015 , 8 * 1024, 1.0, 1.0, RNG_Exp, GenTaps, 1234ULL}, {0.0125 , 32 * 1024, 1.0, 1.0, RNG_Rayleigh, GenTaps, 1234ULL}, {0.025 , 8 * 1024, 1.0, 1.0, RNG_Rayleigh, GenTaps, 1234ULL}, {0.02 , 32 * 1024, 1.0, 1.0, RNG_Laplace, GenTaps, 1234ULL}, {0.04 , 8 * 1024, 1.0, 1.0, RNG_Laplace, GenTaps, 1234ULL}, {0.0055 , 32 * 1024, 1.0, 1.0, RNG_Normal, GenKiss99, 1234ULL}, {0.011 , 8 * 1024, 1.0, 1.0, RNG_Normal, GenKiss99, 1234ULL}, {0.05 , 32 * 1024, 1.0, 1.0, RNG_LogNormal, GenKiss99, 1234ULL}, {0.1 , 8 * 1024, 1.0, 1.0, RNG_LogNormal, GenKiss99, 1234ULL}, {0.003 , 32 * 1024, -1.0, 1.0, RNG_Uniform, GenKiss99, 1234ULL}, {0.005 , 8 * 1024, -1.0, 1.0, RNG_Uniform, GenKiss99, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Gumbel, GenKiss99, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Gumbel, GenKiss99, 1234ULL}, {0.005 , 32 * 1024, 1.0, 1.0, RNG_Logistic, GenKiss99, 1234ULL}, {0.01 , 8 * 1024, 1.0, 1.0, RNG_Logistic, GenKiss99, 1234ULL}, {0.008 , 32 * 1024, 1.0, 1.0, RNG_Exp, GenKiss99, 1234ULL}, {0.015 , 8 * 1024, 1.0, 1.0, RNG_Exp, GenKiss99, 1234ULL}, {0.0125 , 32 * 1024, 1.0, 1.0, RNG_Rayleigh, GenKiss99, 1234ULL}, {0.025 , 8 * 1024, 1.0, 1.0, RNG_Rayleigh, GenKiss99, 1234ULL}, {0.02 , 32 * 1024, 1.0, 1.0, RNG_Laplace, GenKiss99, 1234ULL}, {0.04 , 8 * 1024, 1.0, 1.0, RNG_Laplace, GenKiss99, 1234ULL}}; TEST_P(RngTestD, Result) { double meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<double>(num_sigma*params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<double>(num_sigma*params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestD, ::testing::ValuesIn(inputsd)); // ---------------------------------------------------------------------- // // Test for expected variance in mean calculations template <typename T> T quick_mean(const std::vector<T>& d) { T acc = T(0); for(const auto& di : d) { acc += di; } return acc/d.size(); } template <typename T> T quick_std(const std::vector<T>& d) { T acc = T(0); T d_mean = quick_mean(d); for(const auto& di : d) { acc += ((di - d_mean)*(di - d_mean)); } return std::sqrt(acc/(d.size()-1)); } template <typename T> std::ostream& operator<< (std::ostream& out, const std::vector<T>& v) { if ( !v.empty() ) { out << '['; std::copy (v.begin(), v.end(), std::ostream_iterator<T>(out, ", ")); out << "\b\b]"; } return out; } // The following tests the 3 random number generators by checking that the // measured mean error is close to the well-known analytical result // (sigma/sqrt(n_samples)). To compute the mean error, we a number of // experiments computing the mean, giving us a distribution of the mean // itself. The mean error is simply the standard deviation of this // distribution (the standard deviation of the mean). TEST(Rng, MeanError) { timeb time_struct; ftime(&time_struct); int seed = time_struct.millitm; int num_samples = 1024; int num_experiments = 1024; float* data; float* mean_result; float* std_result; int len = num_samples*num_experiments; cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); allocate(data, len); allocate(mean_result, num_experiments); allocate(std_result, num_experiments); for(auto rtype : {Random::GenPhilox, Random::GenKiss99 /*, Random::GenTaps */}) { Random::Rng r(seed, rtype); r.normal(data, len, 3.3f, 0.23f, stream); // r.uniform(data, len, -1.0, 2.0); Stats::mean(mean_result, data, num_samples, num_experiments, false, false, stream); Stats::stddev(std_result, data, mean_result, num_samples, num_experiments, false, false, stream); std::vector<float> h_mean_result(num_experiments); std::vector<float> h_std_result(num_experiments); updateHost(h_mean_result.data(), mean_result, num_experiments, stream); updateHost(h_std_result.data(), std_result, num_experiments, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); auto d_mean = quick_mean(h_mean_result); // std-dev of mean; also known as mean error auto d_std_of_mean = quick_std(h_mean_result); auto d_std = quick_mean(h_std_result); auto d_std_of_mean_analytical = d_std/std::sqrt(num_samples); // std::cout << "measured mean error: " << d_std_of_mean << "\n"; // std::cout << "expected mean error: " << d_std/std::sqrt(num_samples) << "\n"; auto diff_expected_vs_measured_mean_error = std::abs(d_std_of_mean - d_std/std::sqrt(num_samples)); ASSERT_TRUE((diff_expected_vs_measured_mean_error/d_std_of_mean_analytical < 0.5)); } CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(mean_result)); CUDA_CHECK(cudaFree(std_result)); // std::cout << "mean_res:" << h_mean_result << "\n"; } template<typename T, int len, int scale> class ScaledBernoulliTest : public ::testing::Test { protected: void SetUp() override { CUDA_CHECK(cudaStreamCreate(&stream)); Rng r(42); allocate(data, len * sizeof(T), stream); r.scaled_bernoulli(data, len, T(0.5), T(scale), stream); } void TearDown() override { CUDA_CHECK(cudaFree(data)); } void rangeCheck() { T* h_data = new T[len]; updateHost(h_data, data, len, stream); ASSERT_TRUE(std::none_of(h_data, h_data + len, [](const T& a) { return a < -scale || a > scale; })); delete[] h_data; } T* data; cudaStream_t stream; }; typedef ScaledBernoulliTest<float, 500, 35> ScaledBernoulliTest1; TEST_F(ScaledBernoulliTest1, RangeCheck) { rangeCheck(); } typedef ScaledBernoulliTest<double, 100, 220> ScaledBernoulliTest2; TEST_F(ScaledBernoulliTest2, RangeCheck) { rangeCheck(); } } // end namespace Random } // end namespace MLCommon
30c65f07ca7f01c10acdab377cb3f41c7311a89a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel0 (dtype *input, dtype *output, unsigned int n) { __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x + threadIdx.x; if(i < n) { scratch[threadIdx.x] = input[i]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); for(unsigned int s = 1; s < blockDim.x; s = s << 1) { if((threadIdx.x % (2 * s)) == 0) { //modify scratch[threadIdx.x] += scratch[threadIdx.x + s]; } __syncthreads (); } if(threadIdx.x == 0) { output[bid] = scratch[0]; } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_0, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 0; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype), hipMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(16, ((blocks + 16 - 1) / 16), 1); dim3 tb(threads, 1, 1); /* warm up */ hipLaunchKernelGGL(( kernel0) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); hipDeviceSynchronize (); stopwatch_start (timer); /* execute kernel */ hipLaunchKernelGGL(( kernel0) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(16, (blocks + 16 - 1) / 16, 1); dim3 tb(threads, 1, 1); hipLaunchKernelGGL(( kernel0) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s); s = (s + threads - 1) / threads; } hipDeviceSynchronize (); t_kernel_0 = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive GPU reduction kernel_Part_0: %Lg secs\n", t_kernel_0); double bw = (N * sizeof(dtype)) / (t_kernel_0 * 1e9); // calculate bandwidth fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype), hipMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
30c65f07ca7f01c10acdab377cb3f41c7311a89a.cu
#include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel0 (dtype *input, dtype *output, unsigned int n) { __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x + threadIdx.x; if(i < n) { scratch[threadIdx.x] = input[i]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); for(unsigned int s = 1; s < blockDim.x; s = s << 1) { if((threadIdx.x % (2 * s)) == 0) { //modify scratch[threadIdx.x] += scratch[threadIdx.x + s]; } __syncthreads (); } if(threadIdx.x == 0) { output[bid] = scratch[0]; } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_0, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 0; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype), cudaMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(16, ((blocks + 16 - 1) / 16), 1); dim3 tb(threads, 1, 1); /* warm up */ kernel0 <<<gb, tb>>> (d_idata, d_odata, N); cudaThreadSynchronize (); stopwatch_start (timer); /* execute kernel */ kernel0 <<<gb, tb>>> (d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(16, (blocks + 16 - 1) / 16, 1); dim3 tb(threads, 1, 1); kernel0 <<<gb, tb>>> (d_odata, d_odata, s); s = (s + threads - 1) / threads; } cudaThreadSynchronize (); t_kernel_0 = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive GPU reduction kernel_Part_0: %Lg secs\n", t_kernel_0); double bw = (N * sizeof(dtype)) / (t_kernel_0 * 1e9); // calculate bandwidth fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype), cudaMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
816d5ac65bfa9d299538358c170d6d2ca5345649.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "add_spatial_softmax_op.h" namespace caffe2 { namespace { __global__ void AddSpatialSoftmaxKernel(const int N, const int A, const int pixels, const float* Xdata, float* Pdata, const int num_classes) { // Loop throuh labels (N x A x H x W) CUDA_1D_KERNEL_LOOP(index, N * A * pixels) { const int DP = num_classes * A; const int D = DP - A; const int num_dim = num_classes - 1; const int p = index % pixels; const int a = (index / pixels) % A; const int i = index / (pixels * A); float max_val = 0; const int start_bottom = a * num_dim; const int end_bottom = start_bottom + num_dim; const int start_top = a * num_classes; const int end_top= start_top + num_classes; // Subtract max on each cell for numerical reasons for(int c = start_bottom; c < end_bottom; ++c) { int idx = (i * D + c) * pixels + p; max_val = max(max_val, Xdata[idx]); } // Exponentiate float expsum = exp(-max_val); int tc = start_top; Pdata[(i * DP + tc) * pixels + p] = expsum; for(int c = start_bottom; c < end_bottom; ++c) { int idx = (i * D + c) * pixels + p; float expx = exp(Xdata[idx] - max_val); tc ++; int tidx = (i * DP + tc) * pixels + p; Pdata[tidx] = expx; expsum += expx; } // Normalize for(int tc = start_top; tc < end_top; ++tc) { int tidx = (i * DP + tc) * pixels + p; Pdata[tidx] /= expsum; } } } __global__ void DeCopyKernel(const int num, const int A, const int pixels, const float* dYdata, float* dXdata, const int num_classes) { CUDA_1D_KERNEL_LOOP(index, num) { int idx = index; const int num_dim = num_classes - 1; const int p = idx % pixels; idx /= pixels; const int c = idx % num_dim; idx /= num_dim; const int target_index = (idx * num_classes + c + 1) * pixels + p; dXdata[index] = dYdata[target_index]; } } __global__ void SumProbsKernel(const int N, const int A, const int pixels, const float* Ydata, const float* dYdata, float* sum_probs_data, const int num_classes) { CUDA_1D_KERNEL_LOOP(index, N * A * pixels) { int DP = num_classes * A; int p = index % pixels; int a = (index / pixels) % A; int i = index / (pixels * A); const int start_top = a * num_classes; const int end_top= start_top + num_classes; float sum = 0.; for(int c = start_top; c < end_top; ++c) { int tidx = (i * DP + c) * pixels + p; sum += Ydata[tidx] * dYdata[tidx]; } sum_probs_data[index] = sum; } } __global__ void SubSumKernel(const int N, const int A, const int pixels, const float* sum_probs_data, float* dXdata, const int num_dim) { CUDA_1D_KERNEL_LOOP(index, N * A * num_dim * pixels) { const int p = index % pixels; const int idx = index / (pixels * num_dim); const int sidx = idx * pixels + p; dXdata[index] = dXdata[index] - sum_probs_data[sidx]; } } __global__ void DeMulKernel(const int num, const int A, const int pixels, const float* Ydata, float* dXdata, const int num_classes) { CUDA_1D_KERNEL_LOOP(index, num) { int idx = index; const int num_dim = num_classes - 1; const int p = idx % pixels; idx /= pixels; const int c = idx % num_dim; idx /= num_dim; const int target_index = (idx * num_classes + c + 1) * pixels + p; dXdata[index] *= Ydata[target_index]; } } } // namespace template <> bool AddSpatialSoftmaxOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto* P = Output(0); // Probabilities from softmax const int N = X.dim32(0); const int D = X.dim32(1); const int H = X.dim32(2); const int W = X.dim32(3); const int pixels = H * W; const int A = D / (num_classes_ - 1); // additional dimension const int DP = D + A; P->Resize(N, DP, H, W); DCHECK_EQ(X.ndim(), 4); const float* Xdata = X.data<float>(); float* Pdata = P->mutable_data<float>(); // Softmax for each x,y location hipLaunchKernelGGL(( AddSpatialSoftmaxKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, A, pixels, Xdata, Pdata, num_classes_); return true; } template<> bool AddSpatialSoftmaxGradientOp<float, CUDAContext>::RunOnDevice() { auto& Y = Input(0); // Probabilities from softmax auto& dY = Input(1); auto* dX = Output(0); DCHECK_EQ(Y.ndim(), 4); const int N = Y.dim32(0); const int DP = Y.dim32(1); const int H = Y.dim32(2); const int W = Y.dim32(3); const int pixels = H * W; // there is a default one -- 0 const int A = DP / num_classes_; const int D = DP - A; dX->Resize(N, D, H, W); const int size_sum_probs = N * A * pixels; if (sum_probs_.size() != size_sum_probs) { sum_probs_.Resize(size_sum_probs); } const float* Ydata = Y.data<float>(); const float* dYdata = dY.data<float>(); float* dXdata = dX->mutable_data<float>(); float* sum_probs_data = sum_probs_.mutable_data<float>(); // Complete math: // J_ij = h_i (delta_ij - h_j) // d x_i = sum_j d h_ij = sum_j J_ij * dy_j // = sum_j h_i (delta_ij - h_j) * dy_j // = h_i dy_i - (sum_j h_i h_j dy_j) // = h_i dy_i - h_i sum_j h_j dy_j // Step 0: dx = dy hipLaunchKernelGGL(( DeCopyKernel), dim3(CAFFE_GET_BLOCKS(dX->size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), dX->size(), A, pixels, dYdata, dXdata, num_classes_); // Step 1: s = Sum(dY[j] * Y[j]) hipLaunchKernelGGL(( SumProbsKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, A, pixels, Ydata, dYdata, sum_probs_data, num_classes_); // Step 2: dX[i] = dX[i] - s hipLaunchKernelGGL(( SubSumKernel), dim3(CAFFE_GET_BLOCKS(Y.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, A, pixels, sum_probs_.data<float>(), dXdata, num_classes_-1); // Step 3: dX[i] = Y[i] * dX[i] hipLaunchKernelGGL(( DeMulKernel), dim3(CAFFE_GET_BLOCKS(dX->size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), dX->size(), A, pixels, Ydata, dXdata, num_classes_); return true; } REGISTER_CUDA_OPERATOR(AddSpatialSoftmax, AddSpatialSoftmaxOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(AddSpatialSoftmaxGradient, AddSpatialSoftmaxGradientOp<float, CUDAContext>); } // namespace caffe2
816d5ac65bfa9d299538358c170d6d2ca5345649.cu
#include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "add_spatial_softmax_op.h" namespace caffe2 { namespace { __global__ void AddSpatialSoftmaxKernel(const int N, const int A, const int pixels, const float* Xdata, float* Pdata, const int num_classes) { // Loop throuh labels (N x A x H x W) CUDA_1D_KERNEL_LOOP(index, N * A * pixels) { const int DP = num_classes * A; const int D = DP - A; const int num_dim = num_classes - 1; const int p = index % pixels; const int a = (index / pixels) % A; const int i = index / (pixels * A); float max_val = 0; const int start_bottom = a * num_dim; const int end_bottom = start_bottom + num_dim; const int start_top = a * num_classes; const int end_top= start_top + num_classes; // Subtract max on each cell for numerical reasons for(int c = start_bottom; c < end_bottom; ++c) { int idx = (i * D + c) * pixels + p; max_val = max(max_val, Xdata[idx]); } // Exponentiate float expsum = exp(-max_val); int tc = start_top; Pdata[(i * DP + tc) * pixels + p] = expsum; for(int c = start_bottom; c < end_bottom; ++c) { int idx = (i * D + c) * pixels + p; float expx = exp(Xdata[idx] - max_val); tc ++; int tidx = (i * DP + tc) * pixels + p; Pdata[tidx] = expx; expsum += expx; } // Normalize for(int tc = start_top; tc < end_top; ++tc) { int tidx = (i * DP + tc) * pixels + p; Pdata[tidx] /= expsum; } } } __global__ void DeCopyKernel(const int num, const int A, const int pixels, const float* dYdata, float* dXdata, const int num_classes) { CUDA_1D_KERNEL_LOOP(index, num) { int idx = index; const int num_dim = num_classes - 1; const int p = idx % pixels; idx /= pixels; const int c = idx % num_dim; idx /= num_dim; const int target_index = (idx * num_classes + c + 1) * pixels + p; dXdata[index] = dYdata[target_index]; } } __global__ void SumProbsKernel(const int N, const int A, const int pixels, const float* Ydata, const float* dYdata, float* sum_probs_data, const int num_classes) { CUDA_1D_KERNEL_LOOP(index, N * A * pixels) { int DP = num_classes * A; int p = index % pixels; int a = (index / pixels) % A; int i = index / (pixels * A); const int start_top = a * num_classes; const int end_top= start_top + num_classes; float sum = 0.; for(int c = start_top; c < end_top; ++c) { int tidx = (i * DP + c) * pixels + p; sum += Ydata[tidx] * dYdata[tidx]; } sum_probs_data[index] = sum; } } __global__ void SubSumKernel(const int N, const int A, const int pixels, const float* sum_probs_data, float* dXdata, const int num_dim) { CUDA_1D_KERNEL_LOOP(index, N * A * num_dim * pixels) { const int p = index % pixels; const int idx = index / (pixels * num_dim); const int sidx = idx * pixels + p; dXdata[index] = dXdata[index] - sum_probs_data[sidx]; } } __global__ void DeMulKernel(const int num, const int A, const int pixels, const float* Ydata, float* dXdata, const int num_classes) { CUDA_1D_KERNEL_LOOP(index, num) { int idx = index; const int num_dim = num_classes - 1; const int p = idx % pixels; idx /= pixels; const int c = idx % num_dim; idx /= num_dim; const int target_index = (idx * num_classes + c + 1) * pixels + p; dXdata[index] *= Ydata[target_index]; } } } // namespace template <> bool AddSpatialSoftmaxOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto* P = Output(0); // Probabilities from softmax const int N = X.dim32(0); const int D = X.dim32(1); const int H = X.dim32(2); const int W = X.dim32(3); const int pixels = H * W; const int A = D / (num_classes_ - 1); // additional dimension const int DP = D + A; P->Resize(N, DP, H, W); DCHECK_EQ(X.ndim(), 4); const float* Xdata = X.data<float>(); float* Pdata = P->mutable_data<float>(); // Softmax for each x,y location AddSpatialSoftmaxKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, A, pixels, Xdata, Pdata, num_classes_); return true; } template<> bool AddSpatialSoftmaxGradientOp<float, CUDAContext>::RunOnDevice() { auto& Y = Input(0); // Probabilities from softmax auto& dY = Input(1); auto* dX = Output(0); DCHECK_EQ(Y.ndim(), 4); const int N = Y.dim32(0); const int DP = Y.dim32(1); const int H = Y.dim32(2); const int W = Y.dim32(3); const int pixels = H * W; // there is a default one -- 0 const int A = DP / num_classes_; const int D = DP - A; dX->Resize(N, D, H, W); const int size_sum_probs = N * A * pixels; if (sum_probs_.size() != size_sum_probs) { sum_probs_.Resize(size_sum_probs); } const float* Ydata = Y.data<float>(); const float* dYdata = dY.data<float>(); float* dXdata = dX->mutable_data<float>(); float* sum_probs_data = sum_probs_.mutable_data<float>(); // Complete math: // J_ij = h_i (delta_ij - h_j) // d x_i = sum_j d h_ij = sum_j J_ij * dy_j // = sum_j h_i (delta_ij - h_j) * dy_j // = h_i dy_i - (sum_j h_i h_j dy_j) // = h_i dy_i - h_i sum_j h_j dy_j // Step 0: dx = dy DeCopyKernel<<<CAFFE_GET_BLOCKS(dX->size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(dX->size(), A, pixels, dYdata, dXdata, num_classes_); // Step 1: s = Sum(dY[j] * Y[j]) SumProbsKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, A, pixels, Ydata, dYdata, sum_probs_data, num_classes_); // Step 2: dX[i] = dX[i] - s SubSumKernel<<<CAFFE_GET_BLOCKS(Y.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, A, pixels, sum_probs_.data<float>(), dXdata, num_classes_-1); // Step 3: dX[i] = Y[i] * dX[i] DeMulKernel<<<CAFFE_GET_BLOCKS(dX->size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(dX->size(), A, pixels, Ydata, dXdata, num_classes_); return true; } REGISTER_CUDA_OPERATOR(AddSpatialSoftmax, AddSpatialSoftmaxOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(AddSpatialSoftmaxGradient, AddSpatialSoftmaxGradientOp<float, CUDAContext>); } // namespace caffe2
5bc5f175177d5ee1d100d119e76f96b63c23998c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include <sys/time.h> #define HAVE_LINUX_PERF_EVENT_H 1 extern "C" { #include "perfstats.h" } #include <fcntl.h> #include <unistd.h> #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char** argv); #ifdef NONBLOCKING #include <pthread.h> struct read_parameter { float *vect; int grid_rows; int grid_cols; char *file; float *d_memory; }; void *readinput_thread(void *x); #endif /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) #ifdef GPUD #ifdef THREADED #ifdef DRAM #define NVMED_INIT(a) nvmed_init(6) #define NVMED_SEND(a,b,c,d) nvmed_host_pipeline_send(a, b, c, d, NULL) #define NVMED_RECV(a,b,c,d) nvmed_host_pipeline_recv(a,b,c,d) #else #define NVMED_INIT(a) nvmed_init(6) #define NVMED_SEND(a,b,c,d) nvmed_send_threaded(a,b,c,d) #define NVMED_RECV(a,b,c,d) nvmed_recv_threaded(a,b,c,d) #endif #else #define NVMED_INIT(a) nvmed_init(a) #define NVMED_SEND(a,b,c,d) nvmed_send(a,b,c,d) #define NVMED_RECV(a,b,c,d) nvmed_recv(a,b,c,d) #endif #endif void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j, index=0; FILE *fp; char str[STR_SIZE]; #ifdef OUTPUT_TO_BINARY #ifdef GPUD // fprintf(stderr,"%d %d %llu\n",grid_rows,grid_cols,grid_rows*grid_cols*sizeof(float)); nvmed_recv(file, vect, grid_rows*grid_cols*sizeof(float), 0); #else int fd = open(file, O_CREAT | O_WRONLY | O_SYNC, 0666); fp = fdopen(fd,"wb"); // fprintf(stderr,"%d %d %llu\n",grid_rows,grid_cols,grid_rows*grid_cols*sizeof(float)); /* for (i=0; i < grid_rows; i++) for (j=0; j < grid_cols; j++) { fwrite(&vect[i*grid_cols+j],sizeof(float),1,fp); } */ fwrite(vect, sizeof(float), grid_rows*grid_cols, fp); // fprintf(stderr,"Here\n"); fflush(fp); fclose(fp); close(fd); #endif #else if( (fp = fopen(file, "w" )) == 0 ) printf( "The file was not opened\n" ); for (i=0; i < grid_rows; i++) for (j=0; j < grid_cols; j++) { sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]); fputs(str,fp); index++; } fclose(fp); #endif } void readinput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j; FILE *fp; char str[STR_SIZE]; float val; #ifdef READING_FROM_BINARY if( (fp = fopen(file, "rb" )) ==0 ) printf( "The file was not opened\n" ); fread(vect,sizeof(float),grid_rows*grid_cols,fp); // fclose(fp); #else if( (fp = fopen(file, "r" )) ==0 ) printf( "The file was not opened\n" ); for (i=0; i <= grid_rows-1; i++) for (j=0; j <= grid_cols-1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal("not enough lines in file"); //if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal("invalid file format"); vect[i*grid_cols+j] = val; } #endif fclose(fp); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void calculate_temp(int iteration, //number of iteration float *power, //power input float *temp_src, //temperature input/output float *temp_dst, //temperature input/output int grid_cols, //Col of grid int grid_rows, //Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, //Capacitance float Rx, float Ry, float Rz, float step, float time_elapsed){ __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows*by-border_rows; int blkX = small_block_cols*bx-border_cols; int blkYmax = blkY+BLOCK_SIZE-1; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int yidx = blkY+ty; int xidx = blkX+tx; // load data if it is within the valid input range int loadYidx=yidx, loadXidx=xidx; int index = grid_cols*loadYidx+loadXidx; if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){ temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1; int N = ty-1; int S = ty+1; int W = tx-1; int E = tx+1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(tx, validXmin, validXmax) && \ IN_RANGE(ty, validYmin, validYmax) ) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range temp_on_cuda[ty][tx]= temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ temp_dst[index]= temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\ col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); } return dst; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]); fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n"); fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n"); fprintf(stderr, "\t<sim_time> - number of iterations\n"); fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n"); fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n"); fprintf(stderr, "\t<output_file> - name of the output file\n"); exit(1); } int main(int argc, char** argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); struct timeval time_start, time_end; gettimeofday(&time_start, NULL); run(argc,argv); gettimeofday(&time_end, NULL); printf("HGProfile: Total %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); return EXIT_SUCCESS; } void run(int argc, char** argv) { int size; int grid_rows,grid_cols; float *FilesavingTemp,*FilesavingPower,*MatrixOut; char *tfile, *pfile, *ofile; int total_iterations = 60; int pyramid_height = 1; // number of iterations struct timeval time_start, time_end; gettimeofday(&time_start, NULL); // perfstats_init(); // perfstats_enable(); if (argc != 7) usage(argc, argv); if((grid_rows = atoi(argv[1]))<=0|| (grid_cols = atoi(argv[1]))<=0|| (pyramid_height = atoi(argv[2]))<=0|| (total_iterations = atoi(argv[3]))<=0) usage(argc, argv); tfile=argv[4]; pfile=argv[5]; ofile=argv[6]; size=grid_rows*grid_cols; /* --------------- pyramid parameters --------------- */ # define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE/2; int borderRows = (pyramid_height)*EXPAND_RATE/2; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1); int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1); FilesavingTemp = (float *) malloc(size*sizeof(float)); FilesavingPower = (float *) malloc(size*sizeof(float)); if( !FilesavingPower || !FilesavingTemp ) fatal("unable to allocate memory"); MatrixOut = (float *) calloc (size, sizeof(float)); if( !MatrixOut) fatal("unable to allocate memory"); printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\ pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); float *MatrixTemp[2], *MatrixPower; readinput(FilesavingTemp, grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); // gettimeofday(&time_end, NULL); // printf("HGProfile: FileInput %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); // gettimeofday(&time_start, NULL); hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size); hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size); hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice); hipMalloc((void**)&MatrixPower, sizeof(float)*size); hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice); gettimeofday(&time_end, NULL); // printf("HGProfile: CUDAMemcpyHD %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); gettimeofday(&time_start, NULL); printf("Start computing the transient temperature\n"); int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \ total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows); printf("Ending simulation\n"); gettimeofday(&time_end, NULL); // printf("HGProfile: CUDAKernel %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); gettimeofday(&time_start, NULL); hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost); gettimeofday(&time_end, NULL); // printf("HGProfile: CUDAMemcpyDH %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); gettimeofday(&time_start, NULL); gettimeofday(&time_end, NULL); // printf("HGProfile: FileOutput %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); hipFree(MatrixPower); hipFree(MatrixTemp[0]); hipFree(MatrixTemp[1]); free(MatrixOut); // perfstats_disable(); // perfstats_print(); // perfstats_deinit(); }
5bc5f175177d5ee1d100d119e76f96b63c23998c.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include <sys/time.h> #define HAVE_LINUX_PERF_EVENT_H 1 extern "C" { #include "perfstats.h" } #include <fcntl.h> #include <unistd.h> #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char** argv); #ifdef NONBLOCKING #include <pthread.h> struct read_parameter { float *vect; int grid_rows; int grid_cols; char *file; float *d_memory; }; void *readinput_thread(void *x); #endif /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) #ifdef GPUD #ifdef THREADED #ifdef DRAM #define NVMED_INIT(a) nvmed_init(6) #define NVMED_SEND(a,b,c,d) nvmed_host_pipeline_send(a, b, c, d, NULL) #define NVMED_RECV(a,b,c,d) nvmed_host_pipeline_recv(a,b,c,d) #else #define NVMED_INIT(a) nvmed_init(6) #define NVMED_SEND(a,b,c,d) nvmed_send_threaded(a,b,c,d) #define NVMED_RECV(a,b,c,d) nvmed_recv_threaded(a,b,c,d) #endif #else #define NVMED_INIT(a) nvmed_init(a) #define NVMED_SEND(a,b,c,d) nvmed_send(a,b,c,d) #define NVMED_RECV(a,b,c,d) nvmed_recv(a,b,c,d) #endif #endif void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j, index=0; FILE *fp; char str[STR_SIZE]; #ifdef OUTPUT_TO_BINARY #ifdef GPUD // fprintf(stderr,"%d %d %llu\n",grid_rows,grid_cols,grid_rows*grid_cols*sizeof(float)); nvmed_recv(file, vect, grid_rows*grid_cols*sizeof(float), 0); #else int fd = open(file, O_CREAT | O_WRONLY | O_SYNC, 0666); fp = fdopen(fd,"wb"); // fprintf(stderr,"%d %d %llu\n",grid_rows,grid_cols,grid_rows*grid_cols*sizeof(float)); /* for (i=0; i < grid_rows; i++) for (j=0; j < grid_cols; j++) { fwrite(&vect[i*grid_cols+j],sizeof(float),1,fp); } */ fwrite(vect, sizeof(float), grid_rows*grid_cols, fp); // fprintf(stderr,"Here\n"); fflush(fp); fclose(fp); close(fd); #endif #else if( (fp = fopen(file, "w" )) == 0 ) printf( "The file was not opened\n" ); for (i=0; i < grid_rows; i++) for (j=0; j < grid_cols; j++) { sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]); fputs(str,fp); index++; } fclose(fp); #endif } void readinput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j; FILE *fp; char str[STR_SIZE]; float val; #ifdef READING_FROM_BINARY if( (fp = fopen(file, "rb" )) ==0 ) printf( "The file was not opened\n" ); fread(vect,sizeof(float),grid_rows*grid_cols,fp); // fclose(fp); #else if( (fp = fopen(file, "r" )) ==0 ) printf( "The file was not opened\n" ); for (i=0; i <= grid_rows-1; i++) for (j=0; j <= grid_cols-1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal("not enough lines in file"); //if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal("invalid file format"); vect[i*grid_cols+j] = val; } #endif fclose(fp); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void calculate_temp(int iteration, //number of iteration float *power, //power input float *temp_src, //temperature input/output float *temp_dst, //temperature input/output int grid_cols, //Col of grid int grid_rows, //Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, //Capacitance float Rx, float Ry, float Rz, float step, float time_elapsed){ __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows*by-border_rows; int blkX = small_block_cols*bx-border_cols; int blkYmax = blkY+BLOCK_SIZE-1; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int yidx = blkY+ty; int xidx = blkX+tx; // load data if it is within the valid input range int loadYidx=yidx, loadXidx=xidx; int index = grid_cols*loadYidx+loadXidx; if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){ temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1; int N = ty-1; int S = ty+1; int W = tx-1; int E = tx+1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(tx, validXmin, validXmax) && \ IN_RANGE(ty, validYmin, validYmax) ) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range temp_on_cuda[ty][tx]= temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ temp_dst[index]= temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\ col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); } return dst; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]); fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n"); fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n"); fprintf(stderr, "\t<sim_time> - number of iterations\n"); fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n"); fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n"); fprintf(stderr, "\t<output_file> - name of the output file\n"); exit(1); } int main(int argc, char** argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); struct timeval time_start, time_end; gettimeofday(&time_start, NULL); run(argc,argv); gettimeofday(&time_end, NULL); printf("HGProfile: Total %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); return EXIT_SUCCESS; } void run(int argc, char** argv) { int size; int grid_rows,grid_cols; float *FilesavingTemp,*FilesavingPower,*MatrixOut; char *tfile, *pfile, *ofile; int total_iterations = 60; int pyramid_height = 1; // number of iterations struct timeval time_start, time_end; gettimeofday(&time_start, NULL); // perfstats_init(); // perfstats_enable(); if (argc != 7) usage(argc, argv); if((grid_rows = atoi(argv[1]))<=0|| (grid_cols = atoi(argv[1]))<=0|| (pyramid_height = atoi(argv[2]))<=0|| (total_iterations = atoi(argv[3]))<=0) usage(argc, argv); tfile=argv[4]; pfile=argv[5]; ofile=argv[6]; size=grid_rows*grid_cols; /* --------------- pyramid parameters --------------- */ # define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE/2; int borderRows = (pyramid_height)*EXPAND_RATE/2; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1); int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1); FilesavingTemp = (float *) malloc(size*sizeof(float)); FilesavingPower = (float *) malloc(size*sizeof(float)); if( !FilesavingPower || !FilesavingTemp ) fatal("unable to allocate memory"); MatrixOut = (float *) calloc (size, sizeof(float)); if( !MatrixOut) fatal("unable to allocate memory"); printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\ pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); float *MatrixTemp[2], *MatrixPower; readinput(FilesavingTemp, grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); // gettimeofday(&time_end, NULL); // printf("HGProfile: FileInput %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); // gettimeofday(&time_start, NULL); cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size); cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size); cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice); cudaMalloc((void**)&MatrixPower, sizeof(float)*size); cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice); gettimeofday(&time_end, NULL); // printf("HGProfile: CUDAMemcpyHD %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); gettimeofday(&time_start, NULL); printf("Start computing the transient temperature\n"); int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \ total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows); printf("Ending simulation\n"); gettimeofday(&time_end, NULL); // printf("HGProfile: CUDAKernel %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); gettimeofday(&time_start, NULL); cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost); gettimeofday(&time_end, NULL); // printf("HGProfile: CUDAMemcpyDH %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); gettimeofday(&time_start, NULL); gettimeofday(&time_end, NULL); // printf("HGProfile: FileOutput %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))); cudaFree(MatrixPower); cudaFree(MatrixTemp[0]); cudaFree(MatrixTemp[1]); free(MatrixOut); // perfstats_disable(); // perfstats_print(); // perfstats_deinit(); }
a23e790153fb5bd504dbc166e86a2cc630913bc6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale = 5; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(0.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<20;v++) { cue = cue - hilva(aon*sins(cue)-uon*cosc(cue))/(aon*cosc(cue)-uon*sins(cue)); } double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
a23e790153fb5bd504dbc166e86a2cc630913bc6.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale = 5; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(0.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<20;v++) { cue = cue - hilva(aon*sins(cue)-uon*cosc(cue))/(aon*cosc(cue)-uon*sins(cue)); } double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
2f374be954012b3fda599cb8700b6473e285988f.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <rocblas.h> #include <hip/hip_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { hipblasHandle_t cublasH = NULL; hipStream_t stream = NULL; const int m = 2; const int n = 2; const int k = 2; const int lda = 2; const int ldb = 2; const int ldc = 2; const int batch_count = 2; /* * A = | 1.0 | 2.0 | 5.0 | 6.0 | * | 3.0 | 4.0 | 7.0 | 8.0 | * * B = | 5.0 | 6.0 | 9.0 | 10.0 | * | 7.0 | 8.0 | 11.0 | 12.0 | */ const std::vector<std::vector<data_type>> A_array = {{1.0, 3.0, 2.0, 4.0}, {5.0, 7.0, 6.0, 8.0}}; const std::vector<std::vector<data_type>> B_array = {{5.0, 7.0, 6.0, 8.0}, {9.0, 11.0, 10.0, 12.0}}; std::vector<std::vector<data_type>> C_array(batch_count, std::vector<data_type>(m * n)); const data_type alpha = 1.0; const data_type beta = 0.0; data_type **d_A_array = nullptr; data_type **d_B_array = nullptr; data_type **d_C_array = nullptr; std::vector<data_type *> d_A(batch_count, nullptr); std::vector<data_type *> d_B(batch_count, nullptr); std::vector<data_type *> d_C(batch_count, nullptr); hipblasOperation_t transa = HIPBLAS_OP_N; hipblasOperation_t transb = HIPBLAS_OP_N; hipblasComputeType_t compute_type = CUBLAS_COMPUTE_64F; printf("A[0]\n"); print_matrix(m, k, A_array[0].data(), lda); printf("=====\n"); printf("A[1]\n"); print_matrix(m, k, A_array[1].data(), lda); printf("=====\n"); printf("B[0]\n"); print_matrix(k, n, B_array[0].data(), ldb); printf("=====\n"); printf("B[1]\n"); print_matrix(k, n, B_array[1].data(), ldb); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(hipblasCreate(&cublasH)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); CUBLAS_CHECK(hipblasSetStream(cublasH, stream)); /* step 2: copy data to device */ for (int i = 0; i < batch_count; i++) { CUDA_CHECK( hipMalloc(reinterpret_cast<void **>(&d_A[i]), sizeof(data_type) * A_array[i].size())); CUDA_CHECK( hipMalloc(reinterpret_cast<void **>(&d_B[i]), sizeof(data_type) * B_array[i].size())); CUDA_CHECK( hipMalloc(reinterpret_cast<void **>(&d_C[i]), sizeof(data_type) * C_array[i].size())); } CUDA_CHECK( hipMalloc(reinterpret_cast<void **>(&d_A_array), sizeof(data_type *) * batch_count)); CUDA_CHECK( hipMalloc(reinterpret_cast<void **>(&d_B_array), sizeof(data_type *) * batch_count)); CUDA_CHECK( hipMalloc(reinterpret_cast<void **>(&d_C_array), sizeof(data_type *) * batch_count)); for (int i = 0; i < batch_count; i++) { CUDA_CHECK(hipMemcpyAsync(d_A[i], A_array[i].data(), sizeof(data_type) * A_array[i].size(), hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_B[i], B_array[i].data(), sizeof(data_type) * B_array[i].size(), hipMemcpyHostToDevice, stream)); } CUDA_CHECK(hipMemcpyAsync(d_A_array, d_A.data(), sizeof(data_type *) * batch_count, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_B_array, d_B.data(), sizeof(data_type *) * batch_count, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_C_array, d_C.data(), sizeof(data_type *) * batch_count, hipMemcpyHostToDevice, stream)); /* step 3: compute */ CUBLAS_CHECK(hipblasGemmBatchedEx(cublasH, transa, transb, m, n, k, &alpha, d_A_array, traits<data_type>::cuda_data_type, lda, d_B_array, traits<data_type>::cuda_data_type, ldb, &beta, d_C_array, traits<data_type>::cuda_data_type, ldc, batch_count, compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); /* step 4: copy data to host */ for (int i = 0; i < batch_count; i++) { CUDA_CHECK(hipMemcpyAsync(C_array[i].data(), d_C[i], sizeof(data_type) * C_array[i].size(), hipMemcpyDeviceToHost, stream)); } CUDA_CHECK(hipStreamSynchronize(stream)); /* * C = | 19.0 | 43.0 | 111.0 | 151.0 | * | 22.0 | 50.0 | 122.0 | 166.0 | */ printf("C[0]\n"); print_matrix(m, n, C_array[0].data(), ldc); printf("=====\n"); printf("C[1]\n"); print_matrix(m, n, C_array[1].data(), ldc); printf("=====\n"); /* free resources */ CUDA_CHECK(hipFree(d_A_array)); CUDA_CHECK(hipFree(d_B_array)); CUDA_CHECK(hipFree(d_C_array)); for (int i = 0; i < batch_count; i++) { CUDA_CHECK(hipFree(d_A[i])); CUDA_CHECK(hipFree(d_B[i])); CUDA_CHECK(hipFree(d_C[i])); } CUBLAS_CHECK(hipblasDestroy(cublasH)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
2f374be954012b3fda599cb8700b6473e285988f.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <cublas_v2.h> #include <cuda_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { cublasHandle_t cublasH = NULL; cudaStream_t stream = NULL; const int m = 2; const int n = 2; const int k = 2; const int lda = 2; const int ldb = 2; const int ldc = 2; const int batch_count = 2; /* * A = | 1.0 | 2.0 | 5.0 | 6.0 | * | 3.0 | 4.0 | 7.0 | 8.0 | * * B = | 5.0 | 6.0 | 9.0 | 10.0 | * | 7.0 | 8.0 | 11.0 | 12.0 | */ const std::vector<std::vector<data_type>> A_array = {{1.0, 3.0, 2.0, 4.0}, {5.0, 7.0, 6.0, 8.0}}; const std::vector<std::vector<data_type>> B_array = {{5.0, 7.0, 6.0, 8.0}, {9.0, 11.0, 10.0, 12.0}}; std::vector<std::vector<data_type>> C_array(batch_count, std::vector<data_type>(m * n)); const data_type alpha = 1.0; const data_type beta = 0.0; data_type **d_A_array = nullptr; data_type **d_B_array = nullptr; data_type **d_C_array = nullptr; std::vector<data_type *> d_A(batch_count, nullptr); std::vector<data_type *> d_B(batch_count, nullptr); std::vector<data_type *> d_C(batch_count, nullptr); cublasOperation_t transa = CUBLAS_OP_N; cublasOperation_t transb = CUBLAS_OP_N; cublasComputeType_t compute_type = CUBLAS_COMPUTE_64F; printf("A[0]\n"); print_matrix(m, k, A_array[0].data(), lda); printf("=====\n"); printf("A[1]\n"); print_matrix(m, k, A_array[1].data(), lda); printf("=====\n"); printf("B[0]\n"); print_matrix(k, n, B_array[0].data(), ldb); printf("=====\n"); printf("B[1]\n"); print_matrix(k, n, B_array[1].data(), ldb); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(cublasCreate(&cublasH)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); CUBLAS_CHECK(cublasSetStream(cublasH, stream)); /* step 2: copy data to device */ for (int i = 0; i < batch_count; i++) { CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&d_A[i]), sizeof(data_type) * A_array[i].size())); CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&d_B[i]), sizeof(data_type) * B_array[i].size())); CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&d_C[i]), sizeof(data_type) * C_array[i].size())); } CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&d_A_array), sizeof(data_type *) * batch_count)); CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&d_B_array), sizeof(data_type *) * batch_count)); CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&d_C_array), sizeof(data_type *) * batch_count)); for (int i = 0; i < batch_count; i++) { CUDA_CHECK(cudaMemcpyAsync(d_A[i], A_array[i].data(), sizeof(data_type) * A_array[i].size(), cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_B[i], B_array[i].data(), sizeof(data_type) * B_array[i].size(), cudaMemcpyHostToDevice, stream)); } CUDA_CHECK(cudaMemcpyAsync(d_A_array, d_A.data(), sizeof(data_type *) * batch_count, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_B_array, d_B.data(), sizeof(data_type *) * batch_count, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_C_array, d_C.data(), sizeof(data_type *) * batch_count, cudaMemcpyHostToDevice, stream)); /* step 3: compute */ CUBLAS_CHECK(cublasGemmBatchedEx(cublasH, transa, transb, m, n, k, &alpha, d_A_array, traits<data_type>::cuda_data_type, lda, d_B_array, traits<data_type>::cuda_data_type, ldb, &beta, d_C_array, traits<data_type>::cuda_data_type, ldc, batch_count, compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); /* step 4: copy data to host */ for (int i = 0; i < batch_count; i++) { CUDA_CHECK(cudaMemcpyAsync(C_array[i].data(), d_C[i], sizeof(data_type) * C_array[i].size(), cudaMemcpyDeviceToHost, stream)); } CUDA_CHECK(cudaStreamSynchronize(stream)); /* * C = | 19.0 | 43.0 | 111.0 | 151.0 | * | 22.0 | 50.0 | 122.0 | 166.0 | */ printf("C[0]\n"); print_matrix(m, n, C_array[0].data(), ldc); printf("=====\n"); printf("C[1]\n"); print_matrix(m, n, C_array[1].data(), ldc); printf("=====\n"); /* free resources */ CUDA_CHECK(cudaFree(d_A_array)); CUDA_CHECK(cudaFree(d_B_array)); CUDA_CHECK(cudaFree(d_C_array)); for (int i = 0; i < batch_count; i++) { CUDA_CHECK(cudaFree(d_A[i])); CUDA_CHECK(cudaFree(d_B[i])); CUDA_CHECK(cudaFree(d_C[i])); } CUBLAS_CHECK(cublasDestroy(cublasH)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
cfe4035054f7713367205f884649a1b543cb9cae.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/matrix/matrix.hpp> #include <rmm/device_uvector.hpp> #include <solver/cd.cuh> #include <test_utils.h> #include <raft/stats/mean.hpp> #include <raft/stats/meanvar.hpp> #include <raft/stats/stddev.hpp> namespace ML { namespace Solver { using namespace MLCommon; template <typename T> struct CdInputs { T tol; int n_row; int n_col; }; template <typename T> class CdTest : public ::testing::TestWithParam<CdInputs<T>> { public: CdTest() : params(::testing::TestWithParam<CdInputs<T>>::GetParam()), stream(handle.get_stream()), data(params.n_row * params.n_col, stream), labels(params.n_row, stream), coef(params.n_col, stream), coef2(params.n_col, stream), coef3(params.n_col, stream), coef4(params.n_col, stream), coef_ref(params.n_col, stream), coef2_ref(params.n_col, stream), coef3_ref(params.n_col, stream), coef4_ref(params.n_col, stream) { RAFT_CUDA_TRY(hipMemsetAsync(coef.data(), 0, coef.size() * sizeof(T), stream)); RAFT_CUDA_TRY(hipMemsetAsync(coef2.data(), 0, coef2.size() * sizeof(T), stream)); RAFT_CUDA_TRY(hipMemsetAsync(coef3.data(), 0, coef3.size() * sizeof(T), stream)); RAFT_CUDA_TRY(hipMemsetAsync(coef4.data(), 0, coef4.size() * sizeof(T), stream)); RAFT_CUDA_TRY(hipMemsetAsync(coef_ref.data(), 0, coef_ref.size() * sizeof(T), stream)); RAFT_CUDA_TRY(hipMemsetAsync(coef2_ref.data(), 0, coef2_ref.size() * sizeof(T), stream)); RAFT_CUDA_TRY(hipMemsetAsync(coef3_ref.data(), 0, coef3_ref.size() * sizeof(T), stream)); RAFT_CUDA_TRY(hipMemsetAsync(coef4_ref.data(), 0, coef4_ref.size() * sizeof(T), stream)); } protected: void lasso() { int len = params.n_row * params.n_col; T data_h[len] = {1.0, 1.2, 2.0, 2.0, 4.5, 2.0, 2.0, 3.0}; raft::update_device(data.data(), data_h, len, stream); T labels_h[params.n_row] = {6.0, 8.3, 9.8, 11.2}; raft::update_device(labels.data(), labels_h, params.n_row, stream); /* How to reproduce the coefficients for this test: from sklearn.preprocessing import StandardScaler scaler = StandardScaler(with_mean=True, with_std=True) x_norm = scaler.fit_transform(data_h) m = ElasticNet(fit_intercept=, normalize=, alpha=, l1_ratio=) m.fit(x_norm, y) print(m.coef_ / scaler.scale_ if normalize else m.coef_) */ T coef_ref_h[params.n_col] = {4.90832, 0.35031}; raft::update_device(coef_ref.data(), coef_ref_h, params.n_col, stream); T coef2_ref_h[params.n_col] = {2.53530, -0.36832}; raft::update_device(coef2_ref.data(), coef2_ref_h, params.n_col, stream); T coef3_ref_h[params.n_col] = {2.932841, 1.15248}; raft::update_device(coef3_ref.data(), coef3_ref_h, params.n_col, stream); T coef4_ref_h[params.n_col] = {1.75420431, -0.16215289}; raft::update_device(coef4_ref.data(), coef4_ref_h, params.n_col, stream); bool fit_intercept = false; bool normalize = false; int epochs = 200; T alpha = T(0.2); T l1_ratio = T(1.0); bool shuffle = false; T tol = T(1e-4); ML::loss_funct loss = ML::loss_funct::SQRD_LOSS; intercept = T(0); cdFit(handle, data.data(), params.n_row, params.n_col, labels.data(), coef.data(), &intercept, fit_intercept, normalize, epochs, loss, alpha, l1_ratio, shuffle, tol, stream); fit_intercept = true; intercept2 = T(0); cdFit(handle, data.data(), params.n_row, params.n_col, labels.data(), coef2.data(), &intercept2, fit_intercept, normalize, epochs, loss, alpha, l1_ratio, shuffle, tol, stream); alpha = T(1.0); l1_ratio = T(0.5); fit_intercept = false; intercept = T(0); cdFit(handle, data.data(), params.n_row, params.n_col, labels.data(), coef3.data(), &intercept, fit_intercept, normalize, epochs, loss, alpha, l1_ratio, shuffle, tol, stream); fit_intercept = true; normalize = true; intercept2 = T(0); cdFit(handle, data.data(), params.n_row, params.n_col, labels.data(), coef4.data(), &intercept2, fit_intercept, normalize, epochs, loss, alpha, l1_ratio, shuffle, tol, stream); } void SetUp() override { lasso(); } protected: CdInputs<T> params; raft::handle_t handle; hipStream_t stream = 0; rmm::device_uvector<T> data, labels, coef, coef_ref; rmm::device_uvector<T> coef2, coef2_ref; rmm::device_uvector<T> coef3, coef3_ref; rmm::device_uvector<T> coef4, coef4_ref; T intercept, intercept2; }; const std::vector<CdInputs<float>> inputsf2 = {{0.01f, 4, 2}}; const std::vector<CdInputs<double>> inputsd2 = {{0.01, 4, 2}}; typedef CdTest<float> CdTestF; TEST_P(CdTestF, Fit) { ASSERT_TRUE(raft::devArrMatch( coef_ref.data(), coef.data(), params.n_col, raft::CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(raft::devArrMatch( coef2_ref.data(), coef2.data(), params.n_col, raft::CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(raft::devArrMatch( coef3_ref.data(), coef3.data(), params.n_col, raft::CompareApproxAbs<float>(params.tol))); rmm::device_uvector<float> means_1(params.n_col, stream); rmm::device_uvector<float> means_2(params.n_col, stream); rmm::device_uvector<float> vars_1(params.n_col, stream); rmm::device_uvector<float> vars_2(params.n_col, stream); raft::stats::mean(means_1.data(), data.data(), params.n_col, params.n_row, false, false, stream); raft::stats::vars( vars_1.data(), data.data(), means_1.data(), params.n_col, params.n_row, false, false, stream); raft::stats::meanvar( means_2.data(), vars_2.data(), data.data(), params.n_col, params.n_row, false, false, stream); ASSERT_TRUE(raft::devArrMatch( means_1.data(), means_2.data(), params.n_col, raft::CompareApprox<float>(0.0001))); ASSERT_TRUE(raft::devArrMatch( vars_1.data(), vars_2.data(), params.n_col, raft::CompareApprox<float>(0.0001))); ASSERT_TRUE(raft::devArrMatch( coef4_ref.data(), coef4.data(), params.n_col, raft::CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(raft::devArrMatch( coef4_ref.data(), coef4.data(), params.n_col, raft::CompareApproxAbs<float>(params.tol))); } typedef CdTest<double> CdTestD; TEST_P(CdTestD, Fit) { ASSERT_TRUE(raft::devArrMatch( coef_ref.data(), coef.data(), params.n_col, raft::CompareApproxAbs<double>(params.tol))); ASSERT_TRUE(raft::devArrMatch( coef2_ref.data(), coef2.data(), params.n_col, raft::CompareApproxAbs<double>(params.tol))); ASSERT_TRUE(raft::devArrMatch( coef3_ref.data(), coef3.data(), params.n_col, raft::CompareApproxAbs<double>(params.tol))); rmm::device_uvector<double> means_1(params.n_col, stream); rmm::device_uvector<double> means_2(params.n_col, stream); rmm::device_uvector<double> vars_1(params.n_col, stream); rmm::device_uvector<double> vars_2(params.n_col, stream); raft::stats::mean(means_1.data(), data.data(), params.n_col, params.n_row, false, false, stream); raft::stats::vars( vars_1.data(), data.data(), means_1.data(), params.n_col, params.n_row, false, false, stream); raft::stats::meanvar( means_2.data(), vars_2.data(), data.data(), params.n_col, params.n_row, false, false, stream); ASSERT_TRUE(raft::devArrMatch( means_1.data(), means_2.data(), params.n_col, raft::CompareApprox<double>(0.0001))); ASSERT_TRUE(raft::devArrMatch( vars_1.data(), vars_2.data(), params.n_col, raft::CompareApprox<double>(0.0001))); ASSERT_TRUE(raft::devArrMatch( coef4_ref.data(), coef4.data(), params.n_col, raft::CompareApproxAbs<double>(params.tol))); } INSTANTIATE_TEST_CASE_P(CdTests, CdTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(CdTests, CdTestD, ::testing::ValuesIn(inputsd2)); } // namespace Solver } // end namespace ML
cfe4035054f7713367205f884649a1b543cb9cae.cu
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/matrix/matrix.hpp> #include <rmm/device_uvector.hpp> #include <solver/cd.cuh> #include <test_utils.h> #include <raft/stats/mean.hpp> #include <raft/stats/meanvar.hpp> #include <raft/stats/stddev.hpp> namespace ML { namespace Solver { using namespace MLCommon; template <typename T> struct CdInputs { T tol; int n_row; int n_col; }; template <typename T> class CdTest : public ::testing::TestWithParam<CdInputs<T>> { public: CdTest() : params(::testing::TestWithParam<CdInputs<T>>::GetParam()), stream(handle.get_stream()), data(params.n_row * params.n_col, stream), labels(params.n_row, stream), coef(params.n_col, stream), coef2(params.n_col, stream), coef3(params.n_col, stream), coef4(params.n_col, stream), coef_ref(params.n_col, stream), coef2_ref(params.n_col, stream), coef3_ref(params.n_col, stream), coef4_ref(params.n_col, stream) { RAFT_CUDA_TRY(cudaMemsetAsync(coef.data(), 0, coef.size() * sizeof(T), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(coef2.data(), 0, coef2.size() * sizeof(T), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(coef3.data(), 0, coef3.size() * sizeof(T), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(coef4.data(), 0, coef4.size() * sizeof(T), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(coef_ref.data(), 0, coef_ref.size() * sizeof(T), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(coef2_ref.data(), 0, coef2_ref.size() * sizeof(T), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(coef3_ref.data(), 0, coef3_ref.size() * sizeof(T), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(coef4_ref.data(), 0, coef4_ref.size() * sizeof(T), stream)); } protected: void lasso() { int len = params.n_row * params.n_col; T data_h[len] = {1.0, 1.2, 2.0, 2.0, 4.5, 2.0, 2.0, 3.0}; raft::update_device(data.data(), data_h, len, stream); T labels_h[params.n_row] = {6.0, 8.3, 9.8, 11.2}; raft::update_device(labels.data(), labels_h, params.n_row, stream); /* How to reproduce the coefficients for this test: from sklearn.preprocessing import StandardScaler scaler = StandardScaler(with_mean=True, with_std=True) x_norm = scaler.fit_transform(data_h) m = ElasticNet(fit_intercept=, normalize=, alpha=, l1_ratio=) m.fit(x_norm, y) print(m.coef_ / scaler.scale_ if normalize else m.coef_) */ T coef_ref_h[params.n_col] = {4.90832, 0.35031}; raft::update_device(coef_ref.data(), coef_ref_h, params.n_col, stream); T coef2_ref_h[params.n_col] = {2.53530, -0.36832}; raft::update_device(coef2_ref.data(), coef2_ref_h, params.n_col, stream); T coef3_ref_h[params.n_col] = {2.932841, 1.15248}; raft::update_device(coef3_ref.data(), coef3_ref_h, params.n_col, stream); T coef4_ref_h[params.n_col] = {1.75420431, -0.16215289}; raft::update_device(coef4_ref.data(), coef4_ref_h, params.n_col, stream); bool fit_intercept = false; bool normalize = false; int epochs = 200; T alpha = T(0.2); T l1_ratio = T(1.0); bool shuffle = false; T tol = T(1e-4); ML::loss_funct loss = ML::loss_funct::SQRD_LOSS; intercept = T(0); cdFit(handle, data.data(), params.n_row, params.n_col, labels.data(), coef.data(), &intercept, fit_intercept, normalize, epochs, loss, alpha, l1_ratio, shuffle, tol, stream); fit_intercept = true; intercept2 = T(0); cdFit(handle, data.data(), params.n_row, params.n_col, labels.data(), coef2.data(), &intercept2, fit_intercept, normalize, epochs, loss, alpha, l1_ratio, shuffle, tol, stream); alpha = T(1.0); l1_ratio = T(0.5); fit_intercept = false; intercept = T(0); cdFit(handle, data.data(), params.n_row, params.n_col, labels.data(), coef3.data(), &intercept, fit_intercept, normalize, epochs, loss, alpha, l1_ratio, shuffle, tol, stream); fit_intercept = true; normalize = true; intercept2 = T(0); cdFit(handle, data.data(), params.n_row, params.n_col, labels.data(), coef4.data(), &intercept2, fit_intercept, normalize, epochs, loss, alpha, l1_ratio, shuffle, tol, stream); } void SetUp() override { lasso(); } protected: CdInputs<T> params; raft::handle_t handle; cudaStream_t stream = 0; rmm::device_uvector<T> data, labels, coef, coef_ref; rmm::device_uvector<T> coef2, coef2_ref; rmm::device_uvector<T> coef3, coef3_ref; rmm::device_uvector<T> coef4, coef4_ref; T intercept, intercept2; }; const std::vector<CdInputs<float>> inputsf2 = {{0.01f, 4, 2}}; const std::vector<CdInputs<double>> inputsd2 = {{0.01, 4, 2}}; typedef CdTest<float> CdTestF; TEST_P(CdTestF, Fit) { ASSERT_TRUE(raft::devArrMatch( coef_ref.data(), coef.data(), params.n_col, raft::CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(raft::devArrMatch( coef2_ref.data(), coef2.data(), params.n_col, raft::CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(raft::devArrMatch( coef3_ref.data(), coef3.data(), params.n_col, raft::CompareApproxAbs<float>(params.tol))); rmm::device_uvector<float> means_1(params.n_col, stream); rmm::device_uvector<float> means_2(params.n_col, stream); rmm::device_uvector<float> vars_1(params.n_col, stream); rmm::device_uvector<float> vars_2(params.n_col, stream); raft::stats::mean(means_1.data(), data.data(), params.n_col, params.n_row, false, false, stream); raft::stats::vars( vars_1.data(), data.data(), means_1.data(), params.n_col, params.n_row, false, false, stream); raft::stats::meanvar( means_2.data(), vars_2.data(), data.data(), params.n_col, params.n_row, false, false, stream); ASSERT_TRUE(raft::devArrMatch( means_1.data(), means_2.data(), params.n_col, raft::CompareApprox<float>(0.0001))); ASSERT_TRUE(raft::devArrMatch( vars_1.data(), vars_2.data(), params.n_col, raft::CompareApprox<float>(0.0001))); ASSERT_TRUE(raft::devArrMatch( coef4_ref.data(), coef4.data(), params.n_col, raft::CompareApproxAbs<float>(params.tol))); ASSERT_TRUE(raft::devArrMatch( coef4_ref.data(), coef4.data(), params.n_col, raft::CompareApproxAbs<float>(params.tol))); } typedef CdTest<double> CdTestD; TEST_P(CdTestD, Fit) { ASSERT_TRUE(raft::devArrMatch( coef_ref.data(), coef.data(), params.n_col, raft::CompareApproxAbs<double>(params.tol))); ASSERT_TRUE(raft::devArrMatch( coef2_ref.data(), coef2.data(), params.n_col, raft::CompareApproxAbs<double>(params.tol))); ASSERT_TRUE(raft::devArrMatch( coef3_ref.data(), coef3.data(), params.n_col, raft::CompareApproxAbs<double>(params.tol))); rmm::device_uvector<double> means_1(params.n_col, stream); rmm::device_uvector<double> means_2(params.n_col, stream); rmm::device_uvector<double> vars_1(params.n_col, stream); rmm::device_uvector<double> vars_2(params.n_col, stream); raft::stats::mean(means_1.data(), data.data(), params.n_col, params.n_row, false, false, stream); raft::stats::vars( vars_1.data(), data.data(), means_1.data(), params.n_col, params.n_row, false, false, stream); raft::stats::meanvar( means_2.data(), vars_2.data(), data.data(), params.n_col, params.n_row, false, false, stream); ASSERT_TRUE(raft::devArrMatch( means_1.data(), means_2.data(), params.n_col, raft::CompareApprox<double>(0.0001))); ASSERT_TRUE(raft::devArrMatch( vars_1.data(), vars_2.data(), params.n_col, raft::CompareApprox<double>(0.0001))); ASSERT_TRUE(raft::devArrMatch( coef4_ref.data(), coef4.data(), params.n_col, raft::CompareApproxAbs<double>(params.tol))); } INSTANTIATE_TEST_CASE_P(CdTests, CdTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(CdTests, CdTestD, ::testing::ValuesIn(inputsd2)); } // namespace Solver } // end namespace ML
6311df8363561b1c80a55e30315810cd9c060bcf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define N (2048*2048) #define N_THREADS_PER_BLOCK 512 // Adapt vector addition to use both blocks and threads __global__ void addByCombine(int *a, int *b, int *c) { // use the built-in variable blockDim.x for threads per block int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = a[index] + b[index]; } void random_ints(int* x, int size) { int i; for (i = 0; i<size; i++) { x[i] = rand() % 10; } } int main() { int *host_a, *host_b, *host_c; // host copies of host_a, host_b, host_c int *device_a, *device_b, *device_c; // device copies of device_a, device_b, device_c int size = N * sizeof(int); // Alloc space for device copies of device_a, device_b, device_c hipMalloc(&device_a, size); hipMalloc(&device_b, size); hipMalloc(&device_c, size); // Alloc space for host copies of host_a, host_b, host_c // and setup input values host_a = (int*)malloc(size); random_ints(host_a, N); host_b = (int*)malloc(size); random_ints(host_b, N); host_c = (int*)malloc(size); // Copy input to device hipMemcpy(device_a, host_a, size, hipMemcpyHostToDevice); hipMemcpy(device_b, host_b, size, hipMemcpyHostToDevice); // Launch addByCombine() kernel on GPU addByCombine << <N/ N_THREADS_PER_BLOCK, N_THREADS_PER_BLOCK >> > (device_a, device_b, device_c); // Copy result back to host hipMemcpy(host_c, device_c, size, hipMemcpyDeviceToHost); for (int i = 0; i<N; i++) { printf("host_a[%d]=%d , host_b[%d]=%d, host_c[%d]=%d\n", i, host_a[i], i, host_b[i], i, host_c[i]); } // Cleanup free(host_a); free(host_b); free(host_c); hipFree(device_a); hipFree(device_b); hipFree(device_c); return 0; }
6311df8363561b1c80a55e30315810cd9c060bcf.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define N (2048*2048) #define N_THREADS_PER_BLOCK 512 // Adapt vector addition to use both blocks and threads __global__ void addByCombine(int *a, int *b, int *c) { // use the built-in variable blockDim.x for threads per block int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = a[index] + b[index]; } void random_ints(int* x, int size) { int i; for (i = 0; i<size; i++) { x[i] = rand() % 10; } } int main() { int *host_a, *host_b, *host_c; // host copies of host_a, host_b, host_c int *device_a, *device_b, *device_c; // device copies of device_a, device_b, device_c int size = N * sizeof(int); // Alloc space for device copies of device_a, device_b, device_c cudaMalloc(&device_a, size); cudaMalloc(&device_b, size); cudaMalloc(&device_c, size); // Alloc space for host copies of host_a, host_b, host_c // and setup input values host_a = (int*)malloc(size); random_ints(host_a, N); host_b = (int*)malloc(size); random_ints(host_b, N); host_c = (int*)malloc(size); // Copy input to device cudaMemcpy(device_a, host_a, size, cudaMemcpyHostToDevice); cudaMemcpy(device_b, host_b, size, cudaMemcpyHostToDevice); // Launch addByCombine() kernel on GPU addByCombine << <N/ N_THREADS_PER_BLOCK, N_THREADS_PER_BLOCK >> > (device_a, device_b, device_c); // Copy result back to host cudaMemcpy(host_c, device_c, size, cudaMemcpyDeviceToHost); for (int i = 0; i<N; i++) { printf("host_a[%d]=%d , host_b[%d]=%d, host_c[%d]=%d\n", i, host_a[i], i, host_b[i], i, host_c[i]); } // Cleanup free(host_a); free(host_b); free(host_c); cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); return 0; }
5d4e35b6fbacb86397a6e116a6e89863b2ab0f72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <helper_math.h> #include <helper_functions.h> #include <helper_cuda.h> // CUDA device initialization helper functions #define SCATTER #define SPEED 10 //#define SLOW_MATH #ifdef SLOW_MATH #include "../../../include/cuda_math.cuh" #else //my approximate math lib #include "../../../include/fast_math.cuh" #endif #include "newhalf.hpp" #include <hip/hip_fp16.h> __constant__ float cGaussian[64]; //gaussian array in device side __constant__ half2 cGaussian_half[64]; //gaussian array in device side half2 type texture<uchar4, 2, hipReadModeNormalizedFloat> rgbaTex; uint *dImage = NULL; //original image uint *dTemp = NULL; //temp array for iterations size_t pitch; /* Perform a simple bilateral filter. Bilateral filter is a nonlinear filter that is a mixture of range filter and domain filter, the previous one preserves crisp edges and the latter one filters noise. The intensity value at each pixel in an image is replaced by a weighted average of intensity values from nearby pixels. The weight factor is calculated by the product of domain filter component(using the gaussian distribution as a spatial distance) as well as range filter component(Euclidean distance between center pixel and the current neighbor pixel). Because this process is nonlinear, the sample just uses a simple pixel by pixel step. Texture fetches automatically clamp to edge of image. 1D gaussian array is mapped to a 1D texture instead of using shared memory, which may cause severe bank conflict. Threads are y-pass(column-pass), because the output is coalesced. Parameters od - pointer to output data in global memory d_f - pointer to the 1D gaussian array e_d - euclidean delta w - image width h - image height r - filter radius */ //Euclidean Distance (x, y, d) = exp((|x - y| / d)^2 / 2) __device__ float euclideanLen(float4 a, float4 b, float d) { float mod = (b.x - a.x) * (b.x - a.x) + (b.y - a.y) * (b.y - a.y) + (b.z - a.z) * (b.z - a.z); return __expf(-mod / (2.f * d * d)); } __device__ half2 euclideanLen_half(half2_4 a, half2_4 b, half2 d) { half2 mod = (b.x - a.x) * (b.x - a.x) + (b.y - a.y) * (b.y - a.y) + (b.z - a.z) * (b.z - a.z); return fast_h2exp(-mod * fast_h2rcp(__float2half2_rn(2.f) * d * d)); } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(fabs(rgba.x)); // clamp to [0.0, 1.0] rgba.y = __saturatef(fabs(rgba.y)); rgba.z = __saturatef(fabs(rgba.z)); rgba.w = __saturatef(fabs(rgba.w)); return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f); } __device__ float4 rgbaIntToFloat(uint c) { float4 rgba; rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f; rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f; rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f; rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f; return rgba; } //column pass using coalesced global memory reads __global__ void d_bilateral_filter(uint *od, int w, int h, float e_d, int r) { //int block_idx = blockIdx.x*gridDim.x + blockIdx.y; #ifdef SCATTER if(blockIdx.x %10 < SPEED){ //%100 if need more resolution, this program 40 blocks so %10 is good #else if(blockIdx.x < SPEED){ #endif if (threadIdx.y >= blockDim.y /2) return; int x = blockIdx.x*blockDim.x + threadIdx.x; int y1 = blockIdx.y*blockDim.y + 2*threadIdx.y; int y2 = blockIdx.y*blockDim.y + 2*threadIdx.y+1; if (x >= w || y2 >= h) { return; } half2 e_d_half = __float2half2_rn(e_d); half2 sum = __float2half2_rn(0.0f); half2 factor; half2_4 t = {__float2half2_rn(0.f), __float2half2_rn(0.f), __float2half2_rn(0.f), __float2half2_rn(0.f)}; float4 center_temp1 = tex2D(rgbaTex, x, y1); float4 center_temp2 = tex2D(rgbaTex, x, y2); half2_4 center; center.x = __floats2half2_rn(center_temp1.x, center_temp2.x); center.y = __floats2half2_rn(center_temp1.y, center_temp2.y); center.z = __floats2half2_rn(center_temp1.z, center_temp2.z); //euclideanLen_half uses only x,y,z; no need to convert w; for (int i = -r; i <= r; i++) { for (int j = -r; j <= r; j++) { float4 curPix1 = tex2D(rgbaTex, x + j, y1 + i); float4 curPix2 = tex2D(rgbaTex, x + j, y2 + i); half2_4 curPix; curPix.x = __floats2half2_rn(curPix1.x, curPix2.x); curPix.y = __floats2half2_rn(curPix1.y, curPix2.y); curPix.z = __floats2half2_rn(curPix1.z, curPix2.z); curPix.w = __floats2half2_rn(curPix1.w, curPix2.w); factor = cGaussian_half[i + r] * cGaussian_half[j + r] * //domain factor euclideanLen_half(curPix, center, e_d_half); //range factor t.x += factor * curPix.x; t.y += factor * curPix.y; t.z += factor * curPix.z; t.w += factor * curPix.w; sum += factor; } } float4 res1, res2; half2_4 res_half; half2 rcp_sum = fast_h2rcp(sum); res_half.x = t.x*rcp_sum; res_half.y = t.y*rcp_sum; res_half.z = t.z*rcp_sum; res_half.w = t.w*rcp_sum; float2 temp = __half22float2(res_half.x); res1.x = temp.x; res2.x = temp.y; temp = __half22float2(res_half.y); res1.y = temp.x; res2.y = temp.y; temp = __half22float2(res_half.z); res1.z = temp.x; res2.z = temp.y; temp = __half22float2(res_half.w); res1.w = temp.x; res2.w = temp.y; od[y1 * w + x] = rgbaFloatToInt(res1); od[y2 * w + x] = rgbaFloatToInt(res2); } else { //doing float int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float4 t = {0.f, 0.f, 0.f, 0.f}; float4 center = tex2D(rgbaTex, x, y); for (int i = -r; i <= r; i++) { for (int j = -r; j <= r; j++) { float4 curPix = tex2D(rgbaTex, x + j, y + i); factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor euclideanLen(curPix, center, e_d); //range factor t += factor * curPix; sum += factor; } } od[y * w + x] = rgbaFloatToInt(t/sum); } } extern "C" void initTexture(int width, int height, uint *hImage) { // copy image data to array checkCudaErrors(hipMallocPitch(&dImage, &pitch, sizeof(uint)*width, height)); checkCudaErrors(hipMallocPitch(&dTemp, &pitch, sizeof(uint)*width, height)); checkCudaErrors(hipMemcpy2D(dImage, pitch, hImage, sizeof(uint)*width, sizeof(uint)*width, height, hipMemcpyHostToDevice)); } extern "C" void freeTextures() { checkCudaErrors(hipFree(dImage)); checkCudaErrors(hipFree(dTemp)); } /* Because a 2D gaussian mask is symmetry in row and column, here only generate a 1D mask, and use the product by row and column index later. 1D gaussian distribution : g(x, d) -- C * exp(-x^2/d^2), C is a constant amplifier parameters: og - output gaussian array in global memory delta - the 2nd parameter 'd' in the above function radius - half of the filter size (total filter size = 2 * radius + 1) */ extern "C" void updateGaussian(float delta, int radius) { float fGaussian[64]; uint32_t half2Gaussian[64]; for (int i = 0; i < 2*radius + 1; ++i) { float x = i-radius; fGaussian[i] = expf(-(x*x) / (2*delta*delta)); half2Gaussian[i] = floats2half2 (fGaussian[i],fGaussian[i]); } //uint32_t floats2half2 (float val_high, float val_low ) checkCudaErrors(hipMemcpyToSymbol(cGaussian, fGaussian, sizeof(float)*(2*radius+1))); checkCudaErrors(hipMemcpyToSymbol(cGaussian_half, half2Gaussian, sizeof(uint32_t)*(2*radius+1))); } /* Perform 2D bilateral filter on image using CUDA Parameters: d_dest - pointer to destination image in device memory width - image width height - image height e_d - euclidean delta radius - filter radius iterations - number of iterations */ // RGBA version extern "C" double bilateralFilterRGBA(uint *dDest, int width, int height, float e_d, int radius, int iterations, StopWatchInterface *timer) { // var for kernel computation timing double dKernelTime; // Bind the array to the texture hipChannelFormatDesc desc = hipCreateChannelDesc<uchar4>(); checkCudaErrors(hipBindTexture2D(0, rgbaTex, dImage, desc, width, height, pitch)); for (int i=0; i<iterations; i++) { // sync host and start kernel computation timer dKernelTime = 0.0; checkCudaErrors(hipDeviceSynchronize()); sdkResetTimer(&timer); dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16); dim3 blockSize(16, 16); hipLaunchKernelGGL(( d_bilateral_filter), dim3(gridSize), dim3(blockSize), 0, 0, dDest, width, height, e_d, radius); // sync host and stop computation timer checkCudaErrors(hipDeviceSynchronize()); dKernelTime += sdkGetTimerValue(&timer); if (iterations > 1) { // copy result back from global memory to array checkCudaErrors(hipMemcpy2D(dTemp, pitch, dDest, sizeof(int)*width, sizeof(int)*width, height, hipMemcpyDeviceToDevice)); checkCudaErrors(hipBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch)); } } return ((dKernelTime/1000.)/(double)iterations); }
5d4e35b6fbacb86397a6e116a6e89863b2ab0f72.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <helper_math.h> #include <helper_functions.h> #include <helper_cuda.h> // CUDA device initialization helper functions #define SCATTER #define SPEED 10 //#define SLOW_MATH #ifdef SLOW_MATH #include "../../../include/cuda_math.cuh" #else //my approximate math lib #include "../../../include/fast_math.cuh" #endif #include "newhalf.hpp" #include <cuda_fp16.h> __constant__ float cGaussian[64]; //gaussian array in device side __constant__ half2 cGaussian_half[64]; //gaussian array in device side half2 type texture<uchar4, 2, cudaReadModeNormalizedFloat> rgbaTex; uint *dImage = NULL; //original image uint *dTemp = NULL; //temp array for iterations size_t pitch; /* Perform a simple bilateral filter. Bilateral filter is a nonlinear filter that is a mixture of range filter and domain filter, the previous one preserves crisp edges and the latter one filters noise. The intensity value at each pixel in an image is replaced by a weighted average of intensity values from nearby pixels. The weight factor is calculated by the product of domain filter component(using the gaussian distribution as a spatial distance) as well as range filter component(Euclidean distance between center pixel and the current neighbor pixel). Because this process is nonlinear, the sample just uses a simple pixel by pixel step. Texture fetches automatically clamp to edge of image. 1D gaussian array is mapped to a 1D texture instead of using shared memory, which may cause severe bank conflict. Threads are y-pass(column-pass), because the output is coalesced. Parameters od - pointer to output data in global memory d_f - pointer to the 1D gaussian array e_d - euclidean delta w - image width h - image height r - filter radius */ //Euclidean Distance (x, y, d) = exp((|x - y| / d)^2 / 2) __device__ float euclideanLen(float4 a, float4 b, float d) { float mod = (b.x - a.x) * (b.x - a.x) + (b.y - a.y) * (b.y - a.y) + (b.z - a.z) * (b.z - a.z); return __expf(-mod / (2.f * d * d)); } __device__ half2 euclideanLen_half(half2_4 a, half2_4 b, half2 d) { half2 mod = (b.x - a.x) * (b.x - a.x) + (b.y - a.y) * (b.y - a.y) + (b.z - a.z) * (b.z - a.z); return fast_h2exp(-mod * fast_h2rcp(__float2half2_rn(2.f) * d * d)); } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(fabs(rgba.x)); // clamp to [0.0, 1.0] rgba.y = __saturatef(fabs(rgba.y)); rgba.z = __saturatef(fabs(rgba.z)); rgba.w = __saturatef(fabs(rgba.w)); return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f); } __device__ float4 rgbaIntToFloat(uint c) { float4 rgba; rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f; rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f; rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f; rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f; return rgba; } //column pass using coalesced global memory reads __global__ void d_bilateral_filter(uint *od, int w, int h, float e_d, int r) { //int block_idx = blockIdx.x*gridDim.x + blockIdx.y; #ifdef SCATTER if(blockIdx.x %10 < SPEED){ //%100 if need more resolution, this program 40 blocks so %10 is good #else if(blockIdx.x < SPEED){ #endif if (threadIdx.y >= blockDim.y /2) return; int x = blockIdx.x*blockDim.x + threadIdx.x; int y1 = blockIdx.y*blockDim.y + 2*threadIdx.y; int y2 = blockIdx.y*blockDim.y + 2*threadIdx.y+1; if (x >= w || y2 >= h) { return; } half2 e_d_half = __float2half2_rn(e_d); half2 sum = __float2half2_rn(0.0f); half2 factor; half2_4 t = {__float2half2_rn(0.f), __float2half2_rn(0.f), __float2half2_rn(0.f), __float2half2_rn(0.f)}; float4 center_temp1 = tex2D(rgbaTex, x, y1); float4 center_temp2 = tex2D(rgbaTex, x, y2); half2_4 center; center.x = __floats2half2_rn(center_temp1.x, center_temp2.x); center.y = __floats2half2_rn(center_temp1.y, center_temp2.y); center.z = __floats2half2_rn(center_temp1.z, center_temp2.z); //euclideanLen_half uses only x,y,z; no need to convert w; for (int i = -r; i <= r; i++) { for (int j = -r; j <= r; j++) { float4 curPix1 = tex2D(rgbaTex, x + j, y1 + i); float4 curPix2 = tex2D(rgbaTex, x + j, y2 + i); half2_4 curPix; curPix.x = __floats2half2_rn(curPix1.x, curPix2.x); curPix.y = __floats2half2_rn(curPix1.y, curPix2.y); curPix.z = __floats2half2_rn(curPix1.z, curPix2.z); curPix.w = __floats2half2_rn(curPix1.w, curPix2.w); factor = cGaussian_half[i + r] * cGaussian_half[j + r] * //domain factor euclideanLen_half(curPix, center, e_d_half); //range factor t.x += factor * curPix.x; t.y += factor * curPix.y; t.z += factor * curPix.z; t.w += factor * curPix.w; sum += factor; } } float4 res1, res2; half2_4 res_half; half2 rcp_sum = fast_h2rcp(sum); res_half.x = t.x*rcp_sum; res_half.y = t.y*rcp_sum; res_half.z = t.z*rcp_sum; res_half.w = t.w*rcp_sum; float2 temp = __half22float2(res_half.x); res1.x = temp.x; res2.x = temp.y; temp = __half22float2(res_half.y); res1.y = temp.x; res2.y = temp.y; temp = __half22float2(res_half.z); res1.z = temp.x; res2.z = temp.y; temp = __half22float2(res_half.w); res1.w = temp.x; res2.w = temp.y; od[y1 * w + x] = rgbaFloatToInt(res1); od[y2 * w + x] = rgbaFloatToInt(res2); } else { //doing float int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= w || y >= h) { return; } float sum = 0.0f; float factor; float4 t = {0.f, 0.f, 0.f, 0.f}; float4 center = tex2D(rgbaTex, x, y); for (int i = -r; i <= r; i++) { for (int j = -r; j <= r; j++) { float4 curPix = tex2D(rgbaTex, x + j, y + i); factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor euclideanLen(curPix, center, e_d); //range factor t += factor * curPix; sum += factor; } } od[y * w + x] = rgbaFloatToInt(t/sum); } } extern "C" void initTexture(int width, int height, uint *hImage) { // copy image data to array checkCudaErrors(cudaMallocPitch(&dImage, &pitch, sizeof(uint)*width, height)); checkCudaErrors(cudaMallocPitch(&dTemp, &pitch, sizeof(uint)*width, height)); checkCudaErrors(cudaMemcpy2D(dImage, pitch, hImage, sizeof(uint)*width, sizeof(uint)*width, height, cudaMemcpyHostToDevice)); } extern "C" void freeTextures() { checkCudaErrors(cudaFree(dImage)); checkCudaErrors(cudaFree(dTemp)); } /* Because a 2D gaussian mask is symmetry in row and column, here only generate a 1D mask, and use the product by row and column index later. 1D gaussian distribution : g(x, d) -- C * exp(-x^2/d^2), C is a constant amplifier parameters: og - output gaussian array in global memory delta - the 2nd parameter 'd' in the above function radius - half of the filter size (total filter size = 2 * radius + 1) */ extern "C" void updateGaussian(float delta, int radius) { float fGaussian[64]; uint32_t half2Gaussian[64]; for (int i = 0; i < 2*radius + 1; ++i) { float x = i-radius; fGaussian[i] = expf(-(x*x) / (2*delta*delta)); half2Gaussian[i] = floats2half2 (fGaussian[i],fGaussian[i]); } //uint32_t floats2half2 (float val_high, float val_low ) checkCudaErrors(cudaMemcpyToSymbol(cGaussian, fGaussian, sizeof(float)*(2*radius+1))); checkCudaErrors(cudaMemcpyToSymbol(cGaussian_half, half2Gaussian, sizeof(uint32_t)*(2*radius+1))); } /* Perform 2D bilateral filter on image using CUDA Parameters: d_dest - pointer to destination image in device memory width - image width height - image height e_d - euclidean delta radius - filter radius iterations - number of iterations */ // RGBA version extern "C" double bilateralFilterRGBA(uint *dDest, int width, int height, float e_d, int radius, int iterations, StopWatchInterface *timer) { // var for kernel computation timing double dKernelTime; // Bind the array to the texture cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>(); checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dImage, desc, width, height, pitch)); for (int i=0; i<iterations; i++) { // sync host and start kernel computation timer dKernelTime = 0.0; checkCudaErrors(cudaDeviceSynchronize()); sdkResetTimer(&timer); dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16); dim3 blockSize(16, 16); d_bilateral_filter<<< gridSize, blockSize>>>( dDest, width, height, e_d, radius); // sync host and stop computation timer checkCudaErrors(cudaDeviceSynchronize()); dKernelTime += sdkGetTimerValue(&timer); if (iterations > 1) { // copy result back from global memory to array checkCudaErrors(cudaMemcpy2D(dTemp, pitch, dDest, sizeof(int)*width, sizeof(int)*width, height, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch)); } } return ((dKernelTime/1000.)/(double)iterations); }
9da1d92f9516ae862bf204e3c8e14e036453b04f.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/util/cuda_blas_interface.h" #include "oneflow/core/device/cuda_util.h" namespace oneflow { namespace { hipblasOperation_t CblasTrans2CublasTrans(CBLAS_TRANSPOSE trans) { hipblasOperation_t cublas_trans{}; if (trans == CBLAS_TRANSPOSE::CblasNoTrans) { cublas_trans = hipblasOperation_t::HIPBLAS_OP_N; } else if (trans == CBLAS_TRANSPOSE::CblasTrans) { cublas_trans = hipblasOperation_t::HIPBLAS_OP_T; } else if (trans == CBLAS_TRANSPOSE::CblasConjTrans) { cublas_trans = hipblasOperation_t::HIPBLAS_OP_C; } else { UNIMPLEMENTED(); // do nothing } return cublas_trans; } std::tuple<int, int, int, hipblasOperation_t, hipblasOperation_t> PrepareToCallCublasGemm( enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k) { int lda = (trans_a == CblasNoTrans) ? k : m; int ldb = (trans_b == CblasNoTrans) ? n : k; int ldc = n; hipblasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a); hipblasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b); return std::make_tuple(lda, ldb, ldc, cublas_trans_a, cublas_trans_b); } template<typename T> void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER /*order*/, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const T* a, const T* b, const double beta, T* c) { int lda = 0; int ldb = 0; int ldc = 0; hipblasOperation_t cublas_trans_a{}; hipblasOperation_t cublas_trans_b{}; std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallCublasGemm(trans_a, trans_b, m, n, k); const T alpha_val = static_cast<T>(alpha); const T beta_val = static_cast<T>(beta); cublas_gemm<T>(ctx->cublas_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_val, b, ldb, a, lda, &beta_val, c, ldc); } template<> void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER /*order*/, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const half* a, const half* b, const double beta, half* c) { const float alpha_f = static_cast<float>(alpha); const float beta_f = static_cast<float>(beta); int lda = 0; int ldb = 0; int ldc = 0; hipblasOperation_t cublas_trans_a{}; hipblasOperation_t cublas_trans_b; std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallCublasGemm(trans_a, trans_b, m, n, k); #if TORCH_HIP_VERSION < 11000 CublasMathModeGuard guard(ctx->cublas_handle(), CUBLAS_TENSOR_OP_MATH); #else CublasMathModeGuard guard(ctx->cublas_handle(), CUBLAS_DEFAULT_MATH); #endif // TORCH_HIP_VERSION < 11000 if (GetCudaSmVersion() >= 500) { OF_CUBLAS_CHECK(hipblasGemmEx(ctx->cublas_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_f, b, HIP_R_16F, ldb, a, HIP_R_16F, lda, &beta_f, c, HIP_R_16F, ldc, HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); } else { OF_CUBLAS_CHECK(cublasSgemmEx(ctx->cublas_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_f, b, HIP_R_16F, ldb, a, HIP_R_16F, lda, &beta_f, c, HIP_R_16F, ldc)); } } #define CUDA_DATA_TYPE_SEQ \ OF_PP_MAKE_TUPLE_SEQ(float, HIP_R_32F) \ OF_PP_MAKE_TUPLE_SEQ(double, HIP_R_64F) \ OF_PP_MAKE_TUPLE_SEQ(float16, HIP_R_16F) template<typename T> struct CudaDataType; #define SPECIALIZE_CUDA_DATA_TYPE(type_cpp, type_cuda) \ template<> \ struct CudaDataType<type_cpp> : std::integral_constant<hipDataType, type_cuda> {}; OF_PP_FOR_EACH_TUPLE(SPECIALIZE_CUDA_DATA_TYPE, CUDA_DATA_TYPE_SEQ); #undef SPECIALIZE_CUDA_DATA_TYPE } // namespace void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const float* a, const float* b, const double beta, float* c) { Gemm<float>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const double* a, const double* b, const double beta, double* c) { Gemm<double>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const float16* a, const float16* b, const double beta, float16* c) { Gemm<half>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b), beta, reinterpret_cast<half*>(c)); } } // namespace oneflow
9da1d92f9516ae862bf204e3c8e14e036453b04f.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/util/cuda_blas_interface.h" #include "oneflow/core/device/cuda_util.h" namespace oneflow { namespace { cublasOperation_t CblasTrans2CublasTrans(CBLAS_TRANSPOSE trans) { cublasOperation_t cublas_trans{}; if (trans == CBLAS_TRANSPOSE::CblasNoTrans) { cublas_trans = cublasOperation_t::CUBLAS_OP_N; } else if (trans == CBLAS_TRANSPOSE::CblasTrans) { cublas_trans = cublasOperation_t::CUBLAS_OP_T; } else if (trans == CBLAS_TRANSPOSE::CblasConjTrans) { cublas_trans = cublasOperation_t::CUBLAS_OP_C; } else { UNIMPLEMENTED(); // do nothing } return cublas_trans; } std::tuple<int, int, int, cublasOperation_t, cublasOperation_t> PrepareToCallCublasGemm( enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k) { int lda = (trans_a == CblasNoTrans) ? k : m; int ldb = (trans_b == CblasNoTrans) ? n : k; int ldc = n; cublasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a); cublasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b); return std::make_tuple(lda, ldb, ldc, cublas_trans_a, cublas_trans_b); } template<typename T> void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER /*order*/, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const T* a, const T* b, const double beta, T* c) { int lda = 0; int ldb = 0; int ldc = 0; cublasOperation_t cublas_trans_a{}; cublasOperation_t cublas_trans_b{}; std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallCublasGemm(trans_a, trans_b, m, n, k); const T alpha_val = static_cast<T>(alpha); const T beta_val = static_cast<T>(beta); cublas_gemm<T>(ctx->cublas_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_val, b, ldb, a, lda, &beta_val, c, ldc); } template<> void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER /*order*/, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const half* a, const half* b, const double beta, half* c) { const float alpha_f = static_cast<float>(alpha); const float beta_f = static_cast<float>(beta); int lda = 0; int ldb = 0; int ldc = 0; cublasOperation_t cublas_trans_a{}; cublasOperation_t cublas_trans_b; std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallCublasGemm(trans_a, trans_b, m, n, k); #if CUDA_VERSION < 11000 CublasMathModeGuard guard(ctx->cublas_handle(), CUBLAS_TENSOR_OP_MATH); #else CublasMathModeGuard guard(ctx->cublas_handle(), CUBLAS_DEFAULT_MATH); #endif // CUDA_VERSION < 11000 if (GetCudaSmVersion() >= 500) { OF_CUBLAS_CHECK(cublasGemmEx(ctx->cublas_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_f, b, CUDA_R_16F, ldb, a, CUDA_R_16F, lda, &beta_f, c, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); } else { OF_CUBLAS_CHECK(cublasSgemmEx(ctx->cublas_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_f, b, CUDA_R_16F, ldb, a, CUDA_R_16F, lda, &beta_f, c, CUDA_R_16F, ldc)); } } #define CUDA_DATA_TYPE_SEQ \ OF_PP_MAKE_TUPLE_SEQ(float, CUDA_R_32F) \ OF_PP_MAKE_TUPLE_SEQ(double, CUDA_R_64F) \ OF_PP_MAKE_TUPLE_SEQ(float16, CUDA_R_16F) template<typename T> struct CudaDataType; #define SPECIALIZE_CUDA_DATA_TYPE(type_cpp, type_cuda) \ template<> \ struct CudaDataType<type_cpp> : std::integral_constant<cudaDataType_t, type_cuda> {}; OF_PP_FOR_EACH_TUPLE(SPECIALIZE_CUDA_DATA_TYPE, CUDA_DATA_TYPE_SEQ); #undef SPECIALIZE_CUDA_DATA_TYPE } // namespace void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const float* a, const float* b, const double beta, float* c) { Gemm<float>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const double* a, const double* b, const double beta, double* c) { Gemm<double>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const float16* a, const float16* b, const double beta, float16* c) { Gemm<half>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b), beta, reinterpret_cast<half*>(c)); } } // namespace oneflow
bc314acdef59767bd1403ce74307cf39cc84c038.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> #include <sys/time.h> #include <hip/hip_cooperative_groups.h> #include <helper_functions.h> #include <helper_cuda.h> #include <assert.h> #include <string.h> #include "./files/msnbc_thread32_gpu_cuda_3.cu" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } //#include <helper_cuda.h> /** * CUDA Kernel Device code */ __global__ void main_ac(float *A, int *B, int *C, bool *Op, int nIter) { ac(A, B, C, Op, nIter); } int main(int argc, char **argv) { // nIter int nIter = getCmdLineArgumentInt(argc, (const char **)argv, "nIter"); //char *temp = NULL; //getCmdLineArgumentString(argc, (const char **) argv, "net", // &temp); //if (NULL != temp) { //} //else { // exit(1); //} size_t size_a= sizeof(float)* SIZE_OF_IN; size_t size_b= sizeof(int) * SIZE_OF_AC; size_t size_c= sizeof(int) * SIZE_OF_AC; size_t size_op= sizeof(bool) * SIZE_OF_AC; // Allocate the device input vector A float *d_A = NULL; gpuErrchk(hipMalloc((void **)&d_A, size_a)); int *d_B = NULL; gpuErrchk( hipMalloc((void **)&d_B, size_b)); int *d_C = NULL; gpuErrchk( hipMalloc((void **)&d_C, size_c)); bool *d_Op = NULL; gpuErrchk( hipMalloc((void **)&d_Op, size_op)); // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); gpuErrchk(hipMemcpy(d_A, h_A, size_a, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_B, h_B, size_b, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_C, h_C, size_c, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_Op, h_Op, size_op, hipMemcpyHostToDevice)); // Launch the Vector Add CUDA Kernel int threadsPerBlock = THREADS_PER_BLOCK; int blocksPerGrid= BLOCKS_PER_GRID; struct timeval t1, t2; gettimeofday(&t1, 0); hipLaunchKernelGGL(( main_ac), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, d_Op, nIter); // FInish execution of kernel hipDeviceSynchronize(); gettimeofday(&t2, 0); double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("Time of kernel: %3.4f ms \n", time); printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); gpuErrchk(hipGetLastError()); // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); gpuErrchk(hipMemcpy(h_A, d_A, size_a, hipMemcpyDeviceToHost)); for (int i=0; i< 4; i++) { printf("%d , %f | ", i, h_A[i]); } gpuErrchk(hipFree(d_A)); gpuErrchk(hipFree(d_B)); gpuErrchk(hipFree(d_C)); gpuErrchk(hipFree(d_Op)); //free(h_A); //free(h_B); //free(h_C); //free(h_Op); printf("Done!\n"); return 0; }
bc314acdef59767bd1403ce74307cf39cc84c038.cu
#include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #include <sys/time.h> #include <cooperative_groups.h> #include <helper_functions.h> #include <helper_cuda.h> #include <assert.h> #include <string.h> #include "./files/msnbc_thread32_gpu_cuda_3.cu" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } //#include <helper_cuda.h> /** * CUDA Kernel Device code */ __global__ void main_ac(float *A, int *B, int *C, bool *Op, int nIter) { ac(A, B, C, Op, nIter); } int main(int argc, char **argv) { // nIter int nIter = getCmdLineArgumentInt(argc, (const char **)argv, "nIter"); //char *temp = NULL; //getCmdLineArgumentString(argc, (const char **) argv, "net", // &temp); //if (NULL != temp) { //} //else { // exit(1); //} size_t size_a= sizeof(float)* SIZE_OF_IN; size_t size_b= sizeof(int) * SIZE_OF_AC; size_t size_c= sizeof(int) * SIZE_OF_AC; size_t size_op= sizeof(bool) * SIZE_OF_AC; // Allocate the device input vector A float *d_A = NULL; gpuErrchk(cudaMalloc((void **)&d_A, size_a)); int *d_B = NULL; gpuErrchk( cudaMalloc((void **)&d_B, size_b)); int *d_C = NULL; gpuErrchk( cudaMalloc((void **)&d_C, size_c)); bool *d_Op = NULL; gpuErrchk( cudaMalloc((void **)&d_Op, size_op)); // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); gpuErrchk(cudaMemcpy(d_A, h_A, size_a, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_B, h_B, size_b, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_C, h_C, size_c, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_Op, h_Op, size_op, cudaMemcpyHostToDevice)); // Launch the Vector Add CUDA Kernel int threadsPerBlock = THREADS_PER_BLOCK; int blocksPerGrid= BLOCKS_PER_GRID; struct timeval t1, t2; gettimeofday(&t1, 0); main_ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_Op, nIter); // FInish execution of kernel cudaDeviceSynchronize(); gettimeofday(&t2, 0); double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("Time of kernel: %3.4f ms \n", time); printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); gpuErrchk(cudaGetLastError()); // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); gpuErrchk(cudaMemcpy(h_A, d_A, size_a, cudaMemcpyDeviceToHost)); for (int i=0; i< 4; i++) { printf("%d , %f | ", i, h_A[i]); } gpuErrchk(cudaFree(d_A)); gpuErrchk(cudaFree(d_B)); gpuErrchk(cudaFree(d_C)); gpuErrchk(cudaFree(d_Op)); //free(h_A); //free(h_B); //free(h_C); //free(h_Op); printf("Done!\n"); return 0; }
7a3376bd36061890989702133a471bda1bd8ea00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pooling_new_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { /* #define CUDA_KERNEL_LOOP(i,n) \ for(int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); i +=blockDim.x * gridDim.x) */ struct pair_ { float data; int index; }; template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int top_instances, const int bottom_instances, Dtype* const top_data, int* mask, struct pair_* pairdata, const bool aver_flag) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph; int wstart = pw; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); int count = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; int* mask_slice = mask + (n * channels + c) * (top_instances + bottom_instances) * pooled_width; struct pair_* pairdata_slice = pairdata + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pairdata_slice[count].data = static_cast<float>(bottom_slice[h * width + w]); pairdata_slice[count].index = h * width + w; count++; } } //sort the pair data for (int i = 0; i < count - 1; ++i) { for (int j = 0; j < count - 1 - i; ++j) { if (pairdata_slice[j].data > pairdata_slice[j+1].data) { struct pair_ temp = pairdata_slice[j]; pairdata_slice[j] = pairdata_slice[j+1]; pairdata_slice[j+1] = temp; } } } int top_index = count - 1; int max_min = 0; int top_instances_t = top_instances; Dtype top_values = Dtype(0.); while (top_instances_t > 0) { top_values += Dtype(pairdata_slice[top_index].data); mask_slice[max_min] = pairdata_slice[top_index].index; top_index--; top_instances_t--; max_min++; } int bottom_index = 0; int bottom_instances_t = bottom_instances; Dtype bottom_values = Dtype(0.); while (bottom_instances_t > 0) { bottom_values += Dtype(pairdata_slice[bottom_index].data); mask_slice[max_min] = pairdata_slice[bottom_index].index; bottom_index++; bottom_instances_t--; max_min++; } if (aver_flag) { top_data[index] = (top_values + bottom_values)/(top_instances + bottom_instances); } else { top_data[index] = top_values + bottom_values; } } } template <typename Dtype> __global__ void gpu_set(const int nthreads, struct pair_* pairdata) { CUDA_KERNEL_LOOP(index, nthreads) { pairdata[index].data = 0.; pairdata[index].index = 0; } } template <typename Dtype> void PoolingNewLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); int* mask = NULL; mask = max_idx_.mutable_gpu_data(); caffe_gpu_set(count, Dtype(0.), top_data); caffe_gpu_set(max_idx_.count(), -1, mask); int count_bottom = bottom[0]->count(); struct pair_* pairdata; hipMallocManaged((void **)&pairdata, count_bottom * sizeof(struct pair_)); hipLaunchKernelGGL(( gpu_set<Dtype>), dim3(CAFFE_GET_BLOCKS(count_bottom)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count_bottom, pairdata); hipDeviceSynchronize(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, top_instances_, bottom_instances_, top_data, mask, pairdata, aver_flag_); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; hipFree(pairdata); } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const int num, const int channels, const int height, const int width,const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int top_instances, const int bottom_instances, Dtype* const bottom_diff, const bool aver_flag) { CUDA_KERNEL_LOOP(index, nthreads) { //const int w = index % width; //const int h = (index / width) % height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int offset1 = (n * channels + c) * (top_instances + bottom_instances) * pooled_width; const int offset2 = (n * channels + c) * height * width; const int* const mask_slice = mask + offset1; Dtype* const bottom_diff_slice = bottom_diff + offset2; for (int min_max = 0; min_max < top_instances; ++min_max) { const int bottom_diff_index = mask_slice[min_max]; if (aver_flag) { bottom_diff_slice[bottom_diff_index] = top_diff[index]/(top_instances + bottom_instances); } else { bottom_diff_slice[bottom_diff_index] = top_diff[index]; } } } } template <typename Dtype> void PoolingNewLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), Dtype(0.), bottom_diff); const int count = top[0]->count(); const int* mask = NULL; mask = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, top_instances_, bottom_instances_, bottom_diff, aver_flag_); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingNewLayer); } // namespace caffe
7a3376bd36061890989702133a471bda1bd8ea00.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pooling_new_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { /* #define CUDA_KERNEL_LOOP(i,n) \ for(int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); i +=blockDim.x * gridDim.x) */ struct pair_ { float data; int index; }; template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int top_instances, const int bottom_instances, Dtype* const top_data, int* mask, struct pair_* pairdata, const bool aver_flag) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph; int wstart = pw; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); int count = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; int* mask_slice = mask + (n * channels + c) * (top_instances + bottom_instances) * pooled_width; struct pair_* pairdata_slice = pairdata + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pairdata_slice[count].data = static_cast<float>(bottom_slice[h * width + w]); pairdata_slice[count].index = h * width + w; count++; } } //sort the pair data for (int i = 0; i < count - 1; ++i) { for (int j = 0; j < count - 1 - i; ++j) { if (pairdata_slice[j].data > pairdata_slice[j+1].data) { struct pair_ temp = pairdata_slice[j]; pairdata_slice[j] = pairdata_slice[j+1]; pairdata_slice[j+1] = temp; } } } int top_index = count - 1; int max_min = 0; int top_instances_t = top_instances; Dtype top_values = Dtype(0.); while (top_instances_t > 0) { top_values += Dtype(pairdata_slice[top_index].data); mask_slice[max_min] = pairdata_slice[top_index].index; top_index--; top_instances_t--; max_min++; } int bottom_index = 0; int bottom_instances_t = bottom_instances; Dtype bottom_values = Dtype(0.); while (bottom_instances_t > 0) { bottom_values += Dtype(pairdata_slice[bottom_index].data); mask_slice[max_min] = pairdata_slice[bottom_index].index; bottom_index++; bottom_instances_t--; max_min++; } if (aver_flag) { top_data[index] = (top_values + bottom_values)/(top_instances + bottom_instances); } else { top_data[index] = top_values + bottom_values; } } } template <typename Dtype> __global__ void gpu_set(const int nthreads, struct pair_* pairdata) { CUDA_KERNEL_LOOP(index, nthreads) { pairdata[index].data = 0.; pairdata[index].index = 0; } } template <typename Dtype> void PoolingNewLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); int* mask = NULL; mask = max_idx_.mutable_gpu_data(); caffe_gpu_set(count, Dtype(0.), top_data); caffe_gpu_set(max_idx_.count(), -1, mask); int count_bottom = bottom[0]->count(); struct pair_* pairdata; cudaMallocManaged((void **)&pairdata, count_bottom * sizeof(struct pair_)); gpu_set<Dtype><<<CAFFE_GET_BLOCKS(count_bottom), CAFFE_CUDA_NUM_THREADS>>>(count_bottom, pairdata); cudaDeviceSynchronize(); // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, top_instances_, bottom_instances_, top_data, mask, pairdata, aver_flag_); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; cudaFree(pairdata); } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const int num, const int channels, const int height, const int width,const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int top_instances, const int bottom_instances, Dtype* const bottom_diff, const bool aver_flag) { CUDA_KERNEL_LOOP(index, nthreads) { //const int w = index % width; //const int h = (index / width) % height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int offset1 = (n * channels + c) * (top_instances + bottom_instances) * pooled_width; const int offset2 = (n * channels + c) * height * width; const int* const mask_slice = mask + offset1; Dtype* const bottom_diff_slice = bottom_diff + offset2; for (int min_max = 0; min_max < top_instances; ++min_max) { const int bottom_diff_index = mask_slice[min_max]; if (aver_flag) { bottom_diff_slice[bottom_diff_index] = top_diff[index]/(top_instances + bottom_instances); } else { bottom_diff_slice[bottom_diff_index] = top_diff[index]; } } } } template <typename Dtype> void PoolingNewLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), Dtype(0.), bottom_diff); const int count = top[0]->count(); const int* mask = NULL; mask = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, top_instances_, bottom_instances_, bottom_diff, aver_flag_); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingNewLayer); } // namespace caffe
41bb50e7c01cacddc0792a51cd8c8a37d8fe43a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------- // CUDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision: 4400 $ // $Date: 2008-08-04 10:58:14 -0700 (Mon, 04 Aug 2008) $ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt // in the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * rand_md5_app.cu * * @brief CUDPP application-level rand routine for MD5 */ #include "cudpp.h" #include "cudpp_util.h" #include "cudpp_plan.h" #include <cutil.h> #include <cstdlib> #include <cstdio> #include <assert.h> #include "cta/rand_cta.cu" #include "kernel/rand_kernel.cu" #define RAND_CTA_SIZE 128 //128 chosen, may be changed later /** \addtogroup cudpp_app * */ /** @name Rand Functions * @{ */ /**@brief Launches the MD5 Random number generator kernel * * The MD5 Random number generator works by generating 128 bit digests which * are then broken down into 32 bit chunks and stored inside \a d_out. * \a d_out is expected to be of type unsigned int and can hold \a numElements * elements. * * An analysis of the stastical distribution of the MD5 random number generator * can be found in the original paper * <a href="http://portal.acm.org/citation.cfm?id=1342263"> * Parallel white noise generation on a GPU via cryptographic hash</a>. * The optimizations mentioned in the paper are also present in the CUDPP * version of the MD5 Random number generator. * * It is also worth pointing out that the GPU version will \b not generate * the same output * as the CPU version. This is due to the difference in the * floating point accuracy and several optimizations that have been used * (i.e. calculating sin using device hardware rather than storing it in * an array that the original implementation does). However, the distribution * of the numbers is well suited for random number generation, even without * the CPU-GPU invariance. * * @param[out] d_out the array of unsigned integers allocated on device memory * @param[in] seed the random seed used to vary the output * @param[in] numElements the number of elements in \a d_out * @see gen_randMD5() * @see cudppRand() * @todo: chose a better block size, perhaps a multiple of two is optimal */ void launchRandMD5Kernel(unsigned int * d_out, unsigned int seed, size_t numElements) { //first, we need a temporary array of uints uint4 * dev_output; //figure out how many elements are needed in this array unsigned int devOutputsize = numElements / 4; devOutputsize += (numElements %4 == 0) ? 0 : 1; //used for overflow unsigned int memSize = devOutputsize * sizeof(uint4); //now figure out block size unsigned int blockSize = RAND_CTA_SIZE; if(devOutputsize < RAND_CTA_SIZE) blockSize = devOutputsize; unsigned int n_blocks = devOutputsize/blockSize + (devOutputsize%blockSize == 0 ? 0:1); //printf("Generating %u random numbers using %u blocks and %u threads per block\n", numElements, n_blocks, blockSize); /* old debug code now removed. printf("\nnumber of elements: %u, devOutputSize: %u\n", numElements, devOutputsize); printf("number of blocks: %u blocksize: %u devOutputsize = %u\n", n_blocks, blockSize, devOutputsize); printf("number of threads: %u\n", n_blocks * blockSize); printf("seed value: %u\n", seed); */ //now create the memory on the device CUDA_SAFE_CALL( hipMalloc((void **) &dev_output, memSize)); CUDA_SAFE_CALL( hipMemset(dev_output, 0, memSize)); hipLaunchKernelGGL(( gen_randMD5), dim3(n_blocks), dim3(blockSize), 0, 0, dev_output, devOutputsize, seed); //here the GPU computation is done //here we have all the data on the device, we copy it over into host memory //calculate final memSize //@TODO: write a template version of this which calls two different version // depending if numElements %4 == 0 size_t finalMemSize = sizeof(unsigned int) * numElements; CUDA_SAFE_CALL( hipMemcpy(d_out, dev_output, finalMemSize, hipMemcpyDeviceToDevice)); CUDA_SAFE_CALL( hipFree(dev_output)); }//end launchRandMD5Kernel #ifdef __cplusplus extern "C" { #endif /**@brief Dispatches the rand function based on the plan * * This is the dispatch call which looks at the algorithm specified in \a plan * and calls the appropriate random number generation algorithm. * * @param[out] d_out the array allocated on device memory where the random * numbers will be stored * must be of type unsigned int * @param[in] numElements the number of elements in the array d_out * @param[in] plan pointer to CUDPPRandPlan which contains the algorithm to run */ void cudppRandDispatch(void * d_out, size_t numElements, const CUDPPRandPlan * plan) { /*//switch to figure out which algorithm to run switch(plan->m_config.algorithm) { case CUDPP_RAND_MD5: //run the md5 algorithm here launchRandMD5Kernel( (unsigned int *) d_out, plan->m_seed, numElements); break; default: break; }//end switch*/ } #ifdef __cplusplus } #endif /** @} */ // end rand_app /** @} */ // end cudpp_app
41bb50e7c01cacddc0792a51cd8c8a37d8fe43a7.cu
// ------------------------------------------------------------- // CUDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision: 4400 $ // $Date: 2008-08-04 10:58:14 -0700 (Mon, 04 Aug 2008) $ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt // in the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * rand_md5_app.cu * * @brief CUDPP application-level rand routine for MD5 */ #include "cudpp.h" #include "cudpp_util.h" #include "cudpp_plan.h" #include <cutil.h> #include <cstdlib> #include <cstdio> #include <assert.h> #include "cta/rand_cta.cu" #include "kernel/rand_kernel.cu" #define RAND_CTA_SIZE 128 //128 chosen, may be changed later /** \addtogroup cudpp_app * */ /** @name Rand Functions * @{ */ /**@brief Launches the MD5 Random number generator kernel * * The MD5 Random number generator works by generating 128 bit digests which * are then broken down into 32 bit chunks and stored inside \a d_out. * \a d_out is expected to be of type unsigned int and can hold \a numElements * elements. * * An analysis of the stastical distribution of the MD5 random number generator * can be found in the original paper * <a href="http://portal.acm.org/citation.cfm?id=1342263"> * Parallel white noise generation on a GPU via cryptographic hash</a>. * The optimizations mentioned in the paper are also present in the CUDPP * version of the MD5 Random number generator. * * It is also worth pointing out that the GPU version will \b not generate * the same output * as the CPU version. This is due to the difference in the * floating point accuracy and several optimizations that have been used * (i.e. calculating sin using device hardware rather than storing it in * an array that the original implementation does). However, the distribution * of the numbers is well suited for random number generation, even without * the CPU-GPU invariance. * * @param[out] d_out the array of unsigned integers allocated on device memory * @param[in] seed the random seed used to vary the output * @param[in] numElements the number of elements in \a d_out * @see gen_randMD5() * @see cudppRand() * @todo: chose a better block size, perhaps a multiple of two is optimal */ void launchRandMD5Kernel(unsigned int * d_out, unsigned int seed, size_t numElements) { //first, we need a temporary array of uints uint4 * dev_output; //figure out how many elements are needed in this array unsigned int devOutputsize = numElements / 4; devOutputsize += (numElements %4 == 0) ? 0 : 1; //used for overflow unsigned int memSize = devOutputsize * sizeof(uint4); //now figure out block size unsigned int blockSize = RAND_CTA_SIZE; if(devOutputsize < RAND_CTA_SIZE) blockSize = devOutputsize; unsigned int n_blocks = devOutputsize/blockSize + (devOutputsize%blockSize == 0 ? 0:1); //printf("Generating %u random numbers using %u blocks and %u threads per block\n", numElements, n_blocks, blockSize); /* old debug code now removed. printf("\nnumber of elements: %u, devOutputSize: %u\n", numElements, devOutputsize); printf("number of blocks: %u blocksize: %u devOutputsize = %u\n", n_blocks, blockSize, devOutputsize); printf("number of threads: %u\n", n_blocks * blockSize); printf("seed value: %u\n", seed); */ //now create the memory on the device CUDA_SAFE_CALL( cudaMalloc((void **) &dev_output, memSize)); CUDA_SAFE_CALL( cudaMemset(dev_output, 0, memSize)); gen_randMD5<<<n_blocks, blockSize>>>(dev_output, devOutputsize, seed); //here the GPU computation is done //here we have all the data on the device, we copy it over into host memory //calculate final memSize //@TODO: write a template version of this which calls two different version // depending if numElements %4 == 0 size_t finalMemSize = sizeof(unsigned int) * numElements; CUDA_SAFE_CALL( cudaMemcpy(d_out, dev_output, finalMemSize, cudaMemcpyDeviceToDevice)); CUDA_SAFE_CALL( cudaFree(dev_output)); }//end launchRandMD5Kernel #ifdef __cplusplus extern "C" { #endif /**@brief Dispatches the rand function based on the plan * * This is the dispatch call which looks at the algorithm specified in \a plan * and calls the appropriate random number generation algorithm. * * @param[out] d_out the array allocated on device memory where the random * numbers will be stored * must be of type unsigned int * @param[in] numElements the number of elements in the array d_out * @param[in] plan pointer to CUDPPRandPlan which contains the algorithm to run */ void cudppRandDispatch(void * d_out, size_t numElements, const CUDPPRandPlan * plan) { /*//switch to figure out which algorithm to run switch(plan->m_config.algorithm) { case CUDPP_RAND_MD5: //run the md5 algorithm here launchRandMD5Kernel( (unsigned int *) d_out, plan->m_seed, numElements); break; default: break; }//end switch*/ } #ifdef __cplusplus } #endif /** @} */ // end rand_app /** @} */ // end cudpp_app
a0ab4a80969c9ddeeec9e517bd1958829b87e517.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void readLocalMemory(const float *data, float *output, int size, int repeat) { int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0; float sum = 0; int tid=threadIdx.x, localSize=blockDim.x, grpid=blockIdx.x, litems=2048/localSize, goffset=localSize*grpid+tid*litems; int s = tid; __shared__ float lbuf[2048]; for ( ; j<litems && j<(size-goffset) ; ++j) lbuf[tid*litems+j] = data[goffset+j]; for (int i=0 ; j<litems ; ++j,++i) lbuf[tid*litems+j] = data[i]; __syncthreads(); for (j=0 ; j<repeat ; ++j) { float a0 = lbuf[(s+0)&(2047)]; float a1 = lbuf[(s+1)&(2047)]; float a2 = lbuf[(s+2)&(2047)]; float a3 = lbuf[(s+3)&(2047)]; float a4 = lbuf[(s+4)&(2047)]; float a5 = lbuf[(s+5)&(2047)]; float a6 = lbuf[(s+6)&(2047)]; float a7 = lbuf[(s+7)&(2047)]; float a8 = lbuf[(s+8)&(2047)]; float a9 = lbuf[(s+9)&(2047)]; float a10 = lbuf[(s+10)&(2047)]; float a11 = lbuf[(s+11)&(2047)]; float a12 = lbuf[(s+12)&(2047)]; float a13 = lbuf[(s+13)&(2047)]; float a14 = lbuf[(s+14)&(2047)]; float a15 = lbuf[(s+15)&(2047)]; sum += a0+a1+a2+a3+a4+a5+a6+a7+a8+a9+a10+a11+a12+a13+a14+a15; s = (s+16)&(2047); } output[gid] = sum; }
a0ab4a80969c9ddeeec9e517bd1958829b87e517.cu
#include "includes.h" __global__ void readLocalMemory(const float *data, float *output, int size, int repeat) { int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0; float sum = 0; int tid=threadIdx.x, localSize=blockDim.x, grpid=blockIdx.x, litems=2048/localSize, goffset=localSize*grpid+tid*litems; int s = tid; __shared__ float lbuf[2048]; for ( ; j<litems && j<(size-goffset) ; ++j) lbuf[tid*litems+j] = data[goffset+j]; for (int i=0 ; j<litems ; ++j,++i) lbuf[tid*litems+j] = data[i]; __syncthreads(); for (j=0 ; j<repeat ; ++j) { float a0 = lbuf[(s+0)&(2047)]; float a1 = lbuf[(s+1)&(2047)]; float a2 = lbuf[(s+2)&(2047)]; float a3 = lbuf[(s+3)&(2047)]; float a4 = lbuf[(s+4)&(2047)]; float a5 = lbuf[(s+5)&(2047)]; float a6 = lbuf[(s+6)&(2047)]; float a7 = lbuf[(s+7)&(2047)]; float a8 = lbuf[(s+8)&(2047)]; float a9 = lbuf[(s+9)&(2047)]; float a10 = lbuf[(s+10)&(2047)]; float a11 = lbuf[(s+11)&(2047)]; float a12 = lbuf[(s+12)&(2047)]; float a13 = lbuf[(s+13)&(2047)]; float a14 = lbuf[(s+14)&(2047)]; float a15 = lbuf[(s+15)&(2047)]; sum += a0+a1+a2+a3+a4+a5+a6+a7+a8+a9+a10+a11+a12+a13+a14+a15; s = (s+16)&(2047); } output[gid] = sum; }
305e59056b873a30c44e2109f647ae75419a933a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include <stdio.h> #include <time.h> #include <assert.h> #include "network.h" #include "image.h" #include "data.h" #include "utils.h" #include "parser.h" #include "crop_layer.h" #include "connected_layer.h" #include "rnn_layer.h" #include "gru_layer.h" #include "crnn_layer.h" #include "detection_layer.h" #include "region_layer.h" #include "convolutional_layer.h" #include "activation_layer.h" #include "maxpool_layer.h" #include "reorg_layer.h" #include "avgpool_layer.h" #include "normalization_layer.h" #include "batchnorm_layer.h" #include "cost_layer.h" #include "local_layer.h" #include "softmax_layer.h" #include "dropout_layer.h" #include "route_layer.h" #include "shortcut_layer.h" #include "blas.h" } #ifdef OPENCV #include "opencv2/highgui/highgui_c.h" #endif #include "http_stream.h" float * get_network_output_gpu_layer(network net, int i); float * get_network_delta_gpu_layer(network net, int i); float * get_network_output_gpu(network net); void forward_network_gpu(network net, network_state state) { //hipDeviceSynchronize(); //printf("\n"); state.workspace = net.workspace; int i; for(i = 0; i < net.n; ++i){ state.index = i; layer l = net.layers[i]; if(l.delta_gpu && state.train){ fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1); } //printf("\n layer %d - type: %d - \n", i, l.type); //start_timer(); l.forward_gpu(l, state); //hipDeviceSynchronize(); //stop_timer_and_show(); if(net.wait_stream) hipStreamSynchronize(get_cuda_stream()); state.input = l.output_gpu; //hipDeviceSynchronize(); /* cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs); if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) { int j; for (j = 0; j < l.out_c; ++j) { image img = make_image(l.out_w, l.out_h, 3); memcpy(img.data, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float)); memcpy(img.data + l.out_w*l.out_h * 1, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float)); memcpy(img.data + l.out_w*l.out_h * 2, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float)); char buff[256]; sprintf(buff, "layer-%d slice-%d", i, j); show_image(img, buff); save_image(img, buff); } cvWaitKey(0); // wait press-key in console cvDestroyAllWindows(); } */ } //hipStreamSynchronize(get_cuda_stream()); // sync CUDA-functions //hipDeviceSynchronize(); //show_total_time(); } void backward_network_gpu(network net, network_state state) { state.workspace = net.workspace; int i; float * original_input = state.input; float * original_delta = state.delta; for(i = net.n-1; i >= 0; --i){ state.index = i; layer l = net.layers[i]; if (l.stopbackward) break; if(i == 0){ state.input = original_input; state.delta = original_delta; }else{ layer prev = net.layers[i-1]; state.input = prev.output_gpu; state.delta = prev.delta_gpu; } l.backward_gpu(l, state); } } void update_network_gpu(network net) { cuda_set_device(net.gpu_index); int i; int update_batch = net.batch*net.subdivisions; float rate = get_current_rate(net); for(i = 0; i < net.n; ++i){ layer l = net.layers[i]; l.t = get_current_batch(net); if(l.update_gpu){ l.update_gpu(l, update_batch, rate, net.momentum, net.decay); } } } void forward_backward_network_gpu(network net, float *x, float *y) { network_state state; state.index = 0; state.net = net; int x_size = get_network_input_size(net)*net.batch; int y_size = get_network_output_size(net)*net.batch; if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch; if(!*net.input_gpu){ *net.input_gpu = cuda_make_array(x, x_size); *net.truth_gpu = cuda_make_array(y, y_size); }else{ cuda_push_array(*net.input_gpu, x, x_size); cuda_push_array(*net.truth_gpu, y, y_size); } state.input = *net.input_gpu; state.delta = 0; state.truth = *net.truth_gpu; state.train = 1; #ifdef CUDNN_HALF int i; for (i = 0; i < net.n; ++i) { layer l = net.layers[i]; cuda_convert_f32_to_f16(l.weights_gpu, l.c*l.n*l.size*l.size, l.weights_gpu16); } #endif forward_network_gpu(net, state); //hipStreamSynchronize(get_cuda_stream()); backward_network_gpu(net, state); } float train_network_datum_gpu(network net, float *x, float *y) { *net.seen += net.batch; forward_backward_network_gpu(net, x, y); float error = get_network_cost(net); if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net); return error; } typedef struct { network net; data d; float *err; } train_args; void *train_thread(void *ptr) { train_args args = *(train_args*)ptr; free(ptr); cuda_set_device(args.net.gpu_index); *args.err = train_network(args.net, args.d); return 0; } pthread_t train_network_in_thread(network net, data d, float *err) { pthread_t thread; train_args *ptr = (train_args *)calloc(1, sizeof(train_args)); ptr->net = net; ptr->d = d; ptr->err = err; if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed"); return thread; } void pull_updates(layer l) { if(l.type == CONVOLUTIONAL){ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c); if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs); } } void push_updates(layer l) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c); if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs); } } void update_layer(layer l, network net) { int update_batch = net.batch*net.subdivisions; float rate = get_current_rate(net); l.t = get_current_batch(net); if(l.update_gpu){ l.update_gpu(l, update_batch, rate, net.momentum, net.decay); } } void merge_weights(layer l, layer base) { if (l.type == CONVOLUTIONAL) { axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1); axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1); if (l.scales) { axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1); } } else if(l.type == CONNECTED) { axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1); axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1); } } void scale_weights(layer l, float s) { if (l.type == CONVOLUTIONAL) { scal_cpu(l.n, s, l.biases, 1); scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1); if (l.scales) { scal_cpu(l.n, s, l.scales, 1); } } else if(l.type == CONNECTED) { scal_cpu(l.outputs, s, l.biases, 1); scal_cpu(l.outputs*l.inputs, s, l.weights, 1); } } void pull_weights(layer l) { if(l.type == CONVOLUTIONAL){ cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c); if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n); } else if(l.type == CONNECTED){ cuda_pull_array(l.biases_gpu, l.biases, l.outputs); cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs); } } void push_weights(layer l) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c); if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.biases_gpu, l.biases, l.outputs); cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs); } } void distribute_weights(layer l, layer base) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.biases_gpu, base.biases, l.n); cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c); if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.biases_gpu, base.biases, l.outputs); cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs); } } void merge_updates(layer l, layer base) { if (l.type == CONVOLUTIONAL) { axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1); axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1); if (l.scale_updates) { axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1); } } else if(l.type == CONNECTED) { axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1); axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1); } } void distribute_updates(layer l, layer base) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n); cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c); if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs); cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs); } } void sync_layer(network *nets, int n, int j) { //printf("Syncing layer %d\n", j); int i; network net = nets[0]; layer base = net.layers[j]; cuda_set_device(net.gpu_index); pull_weights(base); for (i = 1; i < n; ++i) { cuda_set_device(nets[i].gpu_index); layer l = nets[i].layers[j]; pull_weights(l); merge_weights(l, base); } scale_weights(base, 1./n); for (i = 0; i < n; ++i) { cuda_set_device(nets[i].gpu_index); layer l = nets[i].layers[j]; distribute_weights(l, base); } //printf("Done syncing layer %d\n", j); } typedef struct{ network *nets; int n; int j; } sync_args; void *sync_layer_thread(void *ptr) { sync_args args = *(sync_args*)ptr; sync_layer(args.nets, args.n, args.j); free(ptr); return 0; } pthread_t sync_layer_in_thread(network *nets, int n, int j) { pthread_t thread; sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args)); ptr->nets = nets; ptr->n = n; ptr->j = j; if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed"); return thread; } void sync_nets(network *nets, int n, int interval) { int j; int layers = nets[0].n; pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t)); *nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions; for (j = 0; j < n; ++j){ *nets[j].seen = *nets[0].seen; } for (j = 0; j < layers; ++j) { threads[j] = sync_layer_in_thread(nets, n, j); } for (j = 0; j < layers; ++j) { pthread_join(threads[j], 0); } free(threads); } float train_networks(network *nets, int n, data d, int interval) { int i; int batch = nets[0].batch; int subdivisions = nets[0].subdivisions; assert(batch * subdivisions * n == d.X.rows); pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t)); float *errors = (float *) calloc(n, sizeof(float)); float sum = 0; for(i = 0; i < n; ++i){ data p = get_data_part(d, i, n); threads[i] = train_network_in_thread(nets[i], p, errors + i); } for(i = 0; i < n; ++i){ pthread_join(threads[i], 0); //printf("%f\n", errors[i]); sum += errors[i]; } //hipDeviceSynchronize(); if (get_current_batch(nets[0]) % interval == 0) { printf("Syncing... "); fflush(stdout); sync_nets(nets, n, interval); printf("Done!\n"); } //hipDeviceSynchronize(); free(threads); free(errors); return (float)sum/(n); } float *get_network_output_layer_gpu(network net, int i) { layer l = net.layers[i]; if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); return l.output; } float *get_network_output_gpu(network net) { int i; for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break; return get_network_output_layer_gpu(net, i); } float *network_predict_gpu(network net, float *input) { if (net.gpu_index != cuda_get_device()) cuda_set_device(net.gpu_index); int size = get_network_input_size(net) * net.batch; network_state state; state.index = 0; state.net = net; //state.input = cuda_make_array(input, size); // memory will be allocated in the parse_network_cfg_custom() state.input = net.input_state_gpu; memcpy(net.input_pinned_cpu, input, size * sizeof(float)); cuda_push_array(state.input, net.input_pinned_cpu, size); state.truth = 0; state.train = 0; state.delta = 0; forward_network_gpu(net, state); float *out = get_network_output_gpu(net); //cuda_free(state.input); // will be freed in the free_network() return out; }
305e59056b873a30c44e2109f647ae75419a933a.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include <stdio.h> #include <time.h> #include <assert.h> #include "network.h" #include "image.h" #include "data.h" #include "utils.h" #include "parser.h" #include "crop_layer.h" #include "connected_layer.h" #include "rnn_layer.h" #include "gru_layer.h" #include "crnn_layer.h" #include "detection_layer.h" #include "region_layer.h" #include "convolutional_layer.h" #include "activation_layer.h" #include "maxpool_layer.h" #include "reorg_layer.h" #include "avgpool_layer.h" #include "normalization_layer.h" #include "batchnorm_layer.h" #include "cost_layer.h" #include "local_layer.h" #include "softmax_layer.h" #include "dropout_layer.h" #include "route_layer.h" #include "shortcut_layer.h" #include "blas.h" } #ifdef OPENCV #include "opencv2/highgui/highgui_c.h" #endif #include "http_stream.h" float * get_network_output_gpu_layer(network net, int i); float * get_network_delta_gpu_layer(network net, int i); float * get_network_output_gpu(network net); void forward_network_gpu(network net, network_state state) { //cudaDeviceSynchronize(); //printf("\n"); state.workspace = net.workspace; int i; for(i = 0; i < net.n; ++i){ state.index = i; layer l = net.layers[i]; if(l.delta_gpu && state.train){ fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1); } //printf("\n layer %d - type: %d - \n", i, l.type); //start_timer(); l.forward_gpu(l, state); //cudaDeviceSynchronize(); //stop_timer_and_show(); if(net.wait_stream) cudaStreamSynchronize(get_cuda_stream()); state.input = l.output_gpu; //cudaDeviceSynchronize(); /* cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs); if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) { int j; for (j = 0; j < l.out_c; ++j) { image img = make_image(l.out_w, l.out_h, 3); memcpy(img.data, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float)); memcpy(img.data + l.out_w*l.out_h * 1, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float)); memcpy(img.data + l.out_w*l.out_h * 2, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float)); char buff[256]; sprintf(buff, "layer-%d slice-%d", i, j); show_image(img, buff); save_image(img, buff); } cvWaitKey(0); // wait press-key in console cvDestroyAllWindows(); } */ } //cudaStreamSynchronize(get_cuda_stream()); // sync CUDA-functions //cudaDeviceSynchronize(); //show_total_time(); } void backward_network_gpu(network net, network_state state) { state.workspace = net.workspace; int i; float * original_input = state.input; float * original_delta = state.delta; for(i = net.n-1; i >= 0; --i){ state.index = i; layer l = net.layers[i]; if (l.stopbackward) break; if(i == 0){ state.input = original_input; state.delta = original_delta; }else{ layer prev = net.layers[i-1]; state.input = prev.output_gpu; state.delta = prev.delta_gpu; } l.backward_gpu(l, state); } } void update_network_gpu(network net) { cuda_set_device(net.gpu_index); int i; int update_batch = net.batch*net.subdivisions; float rate = get_current_rate(net); for(i = 0; i < net.n; ++i){ layer l = net.layers[i]; l.t = get_current_batch(net); if(l.update_gpu){ l.update_gpu(l, update_batch, rate, net.momentum, net.decay); } } } void forward_backward_network_gpu(network net, float *x, float *y) { network_state state; state.index = 0; state.net = net; int x_size = get_network_input_size(net)*net.batch; int y_size = get_network_output_size(net)*net.batch; if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch; if(!*net.input_gpu){ *net.input_gpu = cuda_make_array(x, x_size); *net.truth_gpu = cuda_make_array(y, y_size); }else{ cuda_push_array(*net.input_gpu, x, x_size); cuda_push_array(*net.truth_gpu, y, y_size); } state.input = *net.input_gpu; state.delta = 0; state.truth = *net.truth_gpu; state.train = 1; #ifdef CUDNN_HALF int i; for (i = 0; i < net.n; ++i) { layer l = net.layers[i]; cuda_convert_f32_to_f16(l.weights_gpu, l.c*l.n*l.size*l.size, l.weights_gpu16); } #endif forward_network_gpu(net, state); //cudaStreamSynchronize(get_cuda_stream()); backward_network_gpu(net, state); } float train_network_datum_gpu(network net, float *x, float *y) { *net.seen += net.batch; forward_backward_network_gpu(net, x, y); float error = get_network_cost(net); if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net); return error; } typedef struct { network net; data d; float *err; } train_args; void *train_thread(void *ptr) { train_args args = *(train_args*)ptr; free(ptr); cuda_set_device(args.net.gpu_index); *args.err = train_network(args.net, args.d); return 0; } pthread_t train_network_in_thread(network net, data d, float *err) { pthread_t thread; train_args *ptr = (train_args *)calloc(1, sizeof(train_args)); ptr->net = net; ptr->d = d; ptr->err = err; if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed"); return thread; } void pull_updates(layer l) { if(l.type == CONVOLUTIONAL){ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c); if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs); } } void push_updates(layer l) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c); if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs); } } void update_layer(layer l, network net) { int update_batch = net.batch*net.subdivisions; float rate = get_current_rate(net); l.t = get_current_batch(net); if(l.update_gpu){ l.update_gpu(l, update_batch, rate, net.momentum, net.decay); } } void merge_weights(layer l, layer base) { if (l.type == CONVOLUTIONAL) { axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1); axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1); if (l.scales) { axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1); } } else if(l.type == CONNECTED) { axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1); axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1); } } void scale_weights(layer l, float s) { if (l.type == CONVOLUTIONAL) { scal_cpu(l.n, s, l.biases, 1); scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1); if (l.scales) { scal_cpu(l.n, s, l.scales, 1); } } else if(l.type == CONNECTED) { scal_cpu(l.outputs, s, l.biases, 1); scal_cpu(l.outputs*l.inputs, s, l.weights, 1); } } void pull_weights(layer l) { if(l.type == CONVOLUTIONAL){ cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c); if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n); } else if(l.type == CONNECTED){ cuda_pull_array(l.biases_gpu, l.biases, l.outputs); cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs); } } void push_weights(layer l) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c); if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.biases_gpu, l.biases, l.outputs); cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs); } } void distribute_weights(layer l, layer base) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.biases_gpu, base.biases, l.n); cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c); if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.biases_gpu, base.biases, l.outputs); cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs); } } void merge_updates(layer l, layer base) { if (l.type == CONVOLUTIONAL) { axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1); axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1); if (l.scale_updates) { axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1); } } else if(l.type == CONNECTED) { axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1); axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1); } } void distribute_updates(layer l, layer base) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n); cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c); if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs); cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs); } } void sync_layer(network *nets, int n, int j) { //printf("Syncing layer %d\n", j); int i; network net = nets[0]; layer base = net.layers[j]; cuda_set_device(net.gpu_index); pull_weights(base); for (i = 1; i < n; ++i) { cuda_set_device(nets[i].gpu_index); layer l = nets[i].layers[j]; pull_weights(l); merge_weights(l, base); } scale_weights(base, 1./n); for (i = 0; i < n; ++i) { cuda_set_device(nets[i].gpu_index); layer l = nets[i].layers[j]; distribute_weights(l, base); } //printf("Done syncing layer %d\n", j); } typedef struct{ network *nets; int n; int j; } sync_args; void *sync_layer_thread(void *ptr) { sync_args args = *(sync_args*)ptr; sync_layer(args.nets, args.n, args.j); free(ptr); return 0; } pthread_t sync_layer_in_thread(network *nets, int n, int j) { pthread_t thread; sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args)); ptr->nets = nets; ptr->n = n; ptr->j = j; if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed"); return thread; } void sync_nets(network *nets, int n, int interval) { int j; int layers = nets[0].n; pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t)); *nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions; for (j = 0; j < n; ++j){ *nets[j].seen = *nets[0].seen; } for (j = 0; j < layers; ++j) { threads[j] = sync_layer_in_thread(nets, n, j); } for (j = 0; j < layers; ++j) { pthread_join(threads[j], 0); } free(threads); } float train_networks(network *nets, int n, data d, int interval) { int i; int batch = nets[0].batch; int subdivisions = nets[0].subdivisions; assert(batch * subdivisions * n == d.X.rows); pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t)); float *errors = (float *) calloc(n, sizeof(float)); float sum = 0; for(i = 0; i < n; ++i){ data p = get_data_part(d, i, n); threads[i] = train_network_in_thread(nets[i], p, errors + i); } for(i = 0; i < n; ++i){ pthread_join(threads[i], 0); //printf("%f\n", errors[i]); sum += errors[i]; } //cudaDeviceSynchronize(); if (get_current_batch(nets[0]) % interval == 0) { printf("Syncing... "); fflush(stdout); sync_nets(nets, n, interval); printf("Done!\n"); } //cudaDeviceSynchronize(); free(threads); free(errors); return (float)sum/(n); } float *get_network_output_layer_gpu(network net, int i) { layer l = net.layers[i]; if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); return l.output; } float *get_network_output_gpu(network net) { int i; for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break; return get_network_output_layer_gpu(net, i); } float *network_predict_gpu(network net, float *input) { if (net.gpu_index != cuda_get_device()) cuda_set_device(net.gpu_index); int size = get_network_input_size(net) * net.batch; network_state state; state.index = 0; state.net = net; //state.input = cuda_make_array(input, size); // memory will be allocated in the parse_network_cfg_custom() state.input = net.input_state_gpu; memcpy(net.input_pinned_cpu, input, size * sizeof(float)); cuda_push_array(state.input, net.input_pinned_cpu, size); state.truth = 0; state.train = 0; state.delta = 0; forward_network_gpu(net, state); float *out = get_network_output_gpu(net); //cuda_free(state.input); // will be freed in the free_network() return out; }
1233d39df9983267ddd9745c4edf2a43fbda8600.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void unpack_left( const int x, const int y, const int halo_depth, double* field, double* buffer, const int depth) { const int y_inner = y - 2*halo_depth; const int gid = threadIdx.x+blockDim.x*blockIdx.x; if(gid >= y_inner*depth) return; const int lines = gid / depth; const int offset = halo_depth - depth + lines*(x - depth); field[offset+gid] = buffer[gid]; }
1233d39df9983267ddd9745c4edf2a43fbda8600.cu
#include "includes.h" __global__ void unpack_left( const int x, const int y, const int halo_depth, double* field, double* buffer, const int depth) { const int y_inner = y - 2*halo_depth; const int gid = threadIdx.x+blockDim.x*blockIdx.x; if(gid >= y_inner*depth) return; const int lines = gid / depth; const int offset = halo_depth - depth + lines*(x - depth); field[offset+gid] = buffer[gid]; }
7398cc7df27acf52c226d4ff53fbfd00d445bdca.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Matrix_Product.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *A = NULL; hipMalloc(&A, XSIZE*YSIZE); double *g = NULL; hipMalloc(&g, XSIZE*YSIZE); double *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Matrix_Product), dim3(gridBlock),dim3(threadBlock), 0, 0, A,g,C); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Matrix_Product), dim3(gridBlock),dim3(threadBlock), 0, 0, A,g,C); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Matrix_Product), dim3(gridBlock),dim3(threadBlock), 0, 0, A,g,C); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7398cc7df27acf52c226d4ff53fbfd00d445bdca.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Matrix_Product.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); double *g = NULL; cudaMalloc(&g, XSIZE*YSIZE); double *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Matrix_Product<<<gridBlock,threadBlock>>>(A,g,C); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Matrix_Product<<<gridBlock,threadBlock>>>(A,g,C); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Matrix_Product<<<gridBlock,threadBlock>>>(A,g,C); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
60703cb4fda67605602b68618f11cf51aa03815d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * * Code and text by Sean Baxter, NVIDIA Research * See http://nvlabs.github.io/moderngpu for repository and documentation. * ******************************************************************************/ #include "util/mgpucontext.h" namespace mgpu { //////////////////////////////////////////////////////////////////////////////// // CudaTimer void CudaTimer::Start() { hipEventRecord(start); hipDeviceSynchronize(); } double CudaTimer::Split() { hipEventRecord(end); hipDeviceSynchronize(); float t; hipEventElapsedTime(&t, start, end); start.Swap(end); return (t / 1000.0); } double CudaTimer::Throughput(int count, int numIterations) { double elapsed = Split(); return (double)numIterations * count / elapsed; } //////////////////////////////////////////////////////////////////////////////// // CudaDevice __global__ void KernelVersionShim() { } struct DeviceGroup { int numCudaDevices; CudaDevice** cudaDevices; DeviceGroup() { numCudaDevices = -1; cudaDevices = 0; } int GetDeviceCount() { if(-1 == numCudaDevices) { hipError_t error = hipGetDeviceCount(&numCudaDevices); if(hipSuccess != error || numCudaDevices <= 0) { fprintf(stderr, "ERROR ENUMERATING CUDA DEVICES.\nExiting.\n"); exit(0); } cudaDevices = new CudaDevice*[numCudaDevices]; memset(cudaDevices, 0, sizeof(CudaDevice*) * numCudaDevices); } return numCudaDevices; } CudaDevice* GetByOrdinal(int ordinal) { if(ordinal >= GetDeviceCount()) return 0; if(!cudaDevices[ordinal]) { // Retrieve the device properties. CudaDevice* device = cudaDevices[ordinal] = new CudaDevice; device->_ordinal = ordinal; hipError_t error = hipGetDeviceProperties(&device->_prop, ordinal); if(hipSuccess != error) { fprintf(stderr, "FAILURE TO CREATE CUDA DEVICE %d\n", ordinal); exit(0); } // Get the compiler version for this device. //hipSetDevice(ordinal); // don't create new context hipFuncAttributes attr; error = hipFuncGetAttributes(&attr, KernelVersionShim); if(hipSuccess == error) device->_ptxVersion = 10 * attr.ptxVersion; else { printf("NOT COMPILED WITH COMPATIBLE PTX VERSION FOR DEVICE" " %d\n", ordinal); // The module wasn't compiled with support for this device. device->_ptxVersion = 0; } } return cudaDevices[ordinal]; } ~DeviceGroup() { if(cudaDevices) { for(int i = 0; i < numCudaDevices; ++i) delete cudaDevices[i]; delete [] cudaDevices; } hipDeviceReset(); } }; std::auto_ptr<DeviceGroup> deviceGroup; int CudaDevice::DeviceCount() { if(!deviceGroup.get()) deviceGroup.reset(new DeviceGroup); return deviceGroup->GetDeviceCount(); } CudaDevice& CudaDevice::ByOrdinal(int ordinal) { if(ordinal < 0 || ordinal >= DeviceCount()) { fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal); exit(0); } return *deviceGroup->GetByOrdinal(ordinal); } CudaDevice& CudaDevice::Selected() { int ordinal; hipError_t error = hipGetDevice(&ordinal); if(hipSuccess != error) { fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n"); exit(0); } return ByOrdinal(ordinal); } void CudaDevice::SetActive() { hipError_t error = hipSetDevice(_ordinal); if(hipSuccess != error) { fprintf(stderr, "ERROR SETTING CUDA DEVICE TO ORDINAL %d\n", _ordinal); exit(0); } } std::string CudaDevice::DeviceString() const { size_t freeMem, totalMem; hipError_t error = hipMemGetInfo(&freeMem, &totalMem); if(hipSuccess != error) { fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n", _ordinal); exit(0); } double memBandwidth = (_prop.memoryClockRate * 1000.0) * (_prop.memoryBusWidth / 8 * 2) / 1.0e9; std::string s = stringprintf( "%s : %8.3lf Mhz (Ordinal %d)\n" "%d SMs enabled. Compute Capability sm_%d%d\n" "FreeMem: %6dMB TotalMem: %6dMB %2d-bit pointers.\n" "Mem Clock: %8.3lf Mhz x %d bits (%5.1lf GB/s)\n" "ECC %s\n\n", _prop.name, _prop.clockRate / 1000.0, _ordinal, _prop.multiProcessorCount, _prop.major, _prop.minor, (int)(freeMem / (1<< 20)), (int)(totalMem / (1<< 20)), 8 * sizeof(int*), _prop.memoryClockRate / 1000.0, _prop.memoryBusWidth, memBandwidth, _prop.ECCEnabled ? "Enabled" : "Disabled"); return s; } //////////////////////////////////////////////////////////////////////////////// // CudaContext struct ContextGroup { CudaContext** standardContexts; int numDevices; ContextGroup() { numDevices = CudaDevice::DeviceCount(); standardContexts = new CudaContext*[numDevices]; memset(standardContexts, 0, sizeof(CudaContext*) * numDevices); } CudaContext* GetByOrdinal(int ordinal) { if(!standardContexts[ordinal]) { CudaDevice& device = CudaDevice::ByOrdinal(ordinal); standardContexts[ordinal] = new CudaContext(device, false, true); } return standardContexts[ordinal]; } ~ContextGroup() { if(standardContexts) { for(int i = 0; i < numDevices; ++i) delete standardContexts[i]; delete [] standardContexts; } } }; std::auto_ptr<ContextGroup> contextGroup; CudaContext::CudaContext(CudaDevice& device, bool newStream, bool standard) : _event(hipEventDisableTiming /*| hipEventBlockingSync */), _stream(0), _noRefCount(standard), _pageLocked(0) { // Create an allocator. if(standard) _alloc.reset(new CudaAllocSimple(device)); else _alloc = CreateDefaultAlloc(device); if(newStream) hipStreamCreate(&_stream); _ownStream = newStream; // Allocate 4KB of page-locked memory. hipError_t error; // error = hipHostMalloc((void**)&_pageLocked, 4096); // Allocate an auxiliary stream. error = hipStreamCreate(&_auxStream); } CudaContext::~CudaContext() { if(_pageLocked) hipHostFree(_pageLocked); if(_ownStream && _stream) hipStreamDestroy(_stream); if(_auxStream) hipStreamDestroy(_auxStream); } AllocPtr CudaContext::CreateDefaultAlloc(CudaDevice& device) { intrusive_ptr<CudaAllocBuckets> alloc(new CudaAllocBuckets(device)); size_t freeMem, totalMem; hipError_t error = hipMemGetInfo(&freeMem, &totalMem); if(hipSuccess != error) { fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n", device.Ordinal()); exit(0); } // Maintain a buffer of 128MB with max objects of 64MB. alloc->SetCapacity(128<< 20, 64<< 20); return AllocPtr(alloc.get()); } CudaContext& CudaContext::StandardContext(int ordinal) { bool setActive = -1 != ordinal; if(-1 == ordinal) { hipError_t error = hipGetDevice(&ordinal); if(hipSuccess != error) { fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n"); exit(0); } } int numDevices = CudaDevice::DeviceCount(); if(ordinal < 0 || ordinal >= numDevices) { fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal); exit(0); } if(!contextGroup.get()) contextGroup.reset(new ContextGroup); CudaContext& context = //*contextGroup->standardContexts[ordinal]; *contextGroup->GetByOrdinal(ordinal); if(!context.PTXVersion()) { fprintf(stderr, "This CUDA executable was not compiled with support" " for device %d (sm_%2d)\n", ordinal, context.ArchVersion() / 10); exit(0); } if(setActive) context.SetActive(); return context; } ContextPtr CreateCudaDevice(int ordinal) { CudaDevice& device = CudaDevice::ByOrdinal(ordinal); ContextPtr context(new CudaContext(device, false, false)); return context; } ContextPtr CreateCudaDevice(int argc, char** argv, bool printInfo) { int ordinal = 0; if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) { fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n"); exit(0); } ContextPtr context = CreateCudaDevice(ordinal); if(!context->PTXVersion()) { fprintf(stderr, "This CUDA executable was not compiled with support" " for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10); exit(0); } context->SetActive(); if(printInfo) printf("%s\n", context->Device().DeviceString().c_str()); return context; } ContextPtr CreateCudaDeviceStream(int ordinal) { ContextPtr context(new CudaContext( CudaDevice::ByOrdinal(ordinal), true, false)); return context; } ContextPtr CreateCudaDeviceStream(int argc, char** argv, bool printInfo) { int ordinal = 0; if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) { fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n"); exit(0); } ContextPtr context = CreateCudaDeviceStream(ordinal); if(!context->PTXVersion()) { fprintf(stderr, "This CUDA executable was not compiled with support" " for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10); exit(0); } context->SetActive(); if(printInfo) printf("%s\n", context->Device().DeviceString().c_str()); return context; } ContextPtr CreateCudaDeviceAttachStream(int ordinal, hipStream_t stream) { ContextPtr context(new CudaContext( CudaDevice::ByOrdinal(ordinal), false, false)); context->_stream = stream; return context; } ContextPtr CreateCudaDeviceAttachStream(hipStream_t stream) { int ordinal; hipGetDevice(&ordinal); return CreateCudaDeviceAttachStream(ordinal, stream); } //////////////////////////////////////////////////////////////////////////////// // CudaAllocSimple hipError_t CudaAllocSimple::Malloc(size_t size, void** p) { hipError_t error = hipSuccess; *p = 0; if(size) error = hipMalloc(p, size); if(hipSuccess != error) { printf("CUDA MALLOC ERROR %d\n", error); exit(0); } return error; } bool CudaAllocSimple::Free(void* p) { hipError_t error = hipSuccess; if(p) error = hipFree(p); return hipSuccess == error; } //////////////////////////////////////////////////////////////////////////////// // CudaAllocBuckets CudaAllocBuckets::CudaAllocBuckets(CudaDevice& device) : CudaAlloc(device) { _maxObjectSize = _capacity = _allocated = _committed = 0; _counter = 0; } CudaAllocBuckets::~CudaAllocBuckets() { SetCapacity(0, 0); assert(!_allocated); } bool CudaAllocBuckets::SanityCheck() const { // Iterate through all allocated objects and verify sizes. size_t allocatedCount = 0, committedCount = 0; for(AddressMap::const_iterator i = _addressMap.begin(); i != _addressMap.end(); ++i) { int bucket = i->second->bucket; size_t size = (bucket < NumBuckets) ? BucketSizes[bucket] : 0; allocatedCount += size; if(i->second->priority == _priorityMap.end()) committedCount += size; } return allocatedCount == _allocated && committedCount == _committed; } hipError_t CudaAllocBuckets::Malloc(size_t size, void** p) { // Locate the bucket index and adjust the size of the allocation to the // bucket size. size_t allocSize = size; size_t commitSize = 0; int bucket = LocateBucket(size); if(bucket < NumBuckets) allocSize = commitSize = BucketSizes[bucket]; // Peel off an already-allocated node and reuse it. MemList& list = _memLists[bucket]; if(list.size() && list.front().priority != _priorityMap.end()) { MemList::iterator memIt = list.begin(); _priorityMap.erase(memIt->priority); memIt->priority = _priorityMap.end(); list.splice(list.end(), list, memIt); _committed += commitSize; *p = memIt->address->first; return hipSuccess; } // Shrink if this allocation would put us over the limit. Compact(commitSize); hipError_t error = hipSuccess; *p = 0; if(size) error = hipMalloc(p, allocSize); while((hipErrorMemoryAllocation == error) && (_committed < _allocated)) { SetCapacity(_capacity - _capacity / 10, _maxObjectSize); error = hipMalloc(p, size); } if(hipSuccess != error) return error; MemList::iterator memIt = _memLists[bucket].insert(_memLists[bucket].end(), MemNode()); memIt->bucket = bucket; memIt->address = _addressMap.insert(std::make_pair(*p, memIt)).first; memIt->priority = _priorityMap.end(); _allocated += commitSize; _committed += commitSize; assert(SanityCheck()); return hipSuccess; } bool CudaAllocBuckets::Free(void* p) { AddressMap::iterator it = _addressMap.find(p); if(it == _addressMap.end()) { // If the pointer was not found in the address map, hipFree it anyways // but return false. if(p) hipFree(p); return false; } // Because we're freeing a page, it had better not be in the priority queue. MemList::iterator memIt = it->second; assert(memIt->priority == _priorityMap.end()); // Always free allocations larger than the largest bucket it->second->priority = _priorityMap.insert( std::make_pair(_counter++ - memIt->bucket, memIt)); // Freed nodes are moved to the front, committed nodes are moved to the // end. int bucket = memIt->bucket; size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0; MemList& list = _memLists[bucket]; list.splice(list.begin(), list, memIt); _committed -= commitSize; // Delete data that's not cached. if(NumBuckets == bucket) FreeNode(memIt); Compact(0); return true; } void CudaAllocBuckets::Clear() { Compact(_allocated); } void CudaAllocBuckets::FreeNode(CudaAllocBuckets::MemList::iterator memIt) { if(memIt->address->first) hipFree(memIt->address->first); int bucket = memIt->bucket; size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0; _addressMap.erase(memIt->address); if(memIt->priority != _priorityMap.end()) _priorityMap.erase(memIt->priority); else _committed -= commitSize; _allocated -= commitSize; _memLists[bucket].erase(memIt); assert(SanityCheck()); } void CudaAllocBuckets::Compact(size_t extra) { while(_allocated + extra > _capacity && _allocated > _committed) { // Walk the priority queue from beginning to end removing nodes. MemList::iterator memIt = _priorityMap.begin()->second; FreeNode(memIt); } } // Exponentially spaced buckets. const size_t CudaAllocBuckets::BucketSizes[CudaAllocBuckets::NumBuckets] = { 256, 512, 1024, 2048, 4096, 8192, 12288, 16384, 24576, 32768, 49152, 65536, 98304, 131072, 174848, 218624, 262144, 349696, 436992, 524288, 655360, 786432, 917504, 1048576, 1310720, 1572864, 1835008, 2097152, 2516736, 2936064, 3355648, 3774976, 4194304, 4893440, 5592576, 6291456, 6990592, 7689728, 8388608, 9786880, 11184896, 12582912, 13981184, 15379200, 16777216, 18874368, 20971520, 23068672, 25165824, 27262976, 29360128, 31457280, 33554432, 36910080, 40265472, 43620864, 46976256, 50331648, 53687296, 57042688, 60398080, 63753472, 67108864, 72701440, 78293760, 83886080, 89478656, 95070976, 100663296, 106255872, 111848192, 117440512, 123033088, 128625408, 134217728, 143804928, 153391872, 162978816, 172565760, 182152704, 191739648, 201326592, 210913792, 220500736 }; int CudaAllocBuckets::LocateBucket(size_t size) const { if(size > _maxObjectSize || size > BucketSizes[NumBuckets - 1]) return NumBuckets; return (int)(std::lower_bound(BucketSizes, BucketSizes + NumBuckets, size) - BucketSizes); } } // namespace mgpu
60703cb4fda67605602b68618f11cf51aa03815d.cu
/****************************************************************************** * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * * Code and text by Sean Baxter, NVIDIA Research * See http://nvlabs.github.io/moderngpu for repository and documentation. * ******************************************************************************/ #include "util/mgpucontext.h" namespace mgpu { //////////////////////////////////////////////////////////////////////////////// // CudaTimer void CudaTimer::Start() { cudaEventRecord(start); cudaDeviceSynchronize(); } double CudaTimer::Split() { cudaEventRecord(end); cudaDeviceSynchronize(); float t; cudaEventElapsedTime(&t, start, end); start.Swap(end); return (t / 1000.0); } double CudaTimer::Throughput(int count, int numIterations) { double elapsed = Split(); return (double)numIterations * count / elapsed; } //////////////////////////////////////////////////////////////////////////////// // CudaDevice __global__ void KernelVersionShim() { } struct DeviceGroup { int numCudaDevices; CudaDevice** cudaDevices; DeviceGroup() { numCudaDevices = -1; cudaDevices = 0; } int GetDeviceCount() { if(-1 == numCudaDevices) { cudaError_t error = cudaGetDeviceCount(&numCudaDevices); if(cudaSuccess != error || numCudaDevices <= 0) { fprintf(stderr, "ERROR ENUMERATING CUDA DEVICES.\nExiting.\n"); exit(0); } cudaDevices = new CudaDevice*[numCudaDevices]; memset(cudaDevices, 0, sizeof(CudaDevice*) * numCudaDevices); } return numCudaDevices; } CudaDevice* GetByOrdinal(int ordinal) { if(ordinal >= GetDeviceCount()) return 0; if(!cudaDevices[ordinal]) { // Retrieve the device properties. CudaDevice* device = cudaDevices[ordinal] = new CudaDevice; device->_ordinal = ordinal; cudaError_t error = cudaGetDeviceProperties(&device->_prop, ordinal); if(cudaSuccess != error) { fprintf(stderr, "FAILURE TO CREATE CUDA DEVICE %d\n", ordinal); exit(0); } // Get the compiler version for this device. //cudaSetDevice(ordinal); // don't create new context cudaFuncAttributes attr; error = cudaFuncGetAttributes(&attr, KernelVersionShim); if(cudaSuccess == error) device->_ptxVersion = 10 * attr.ptxVersion; else { printf("NOT COMPILED WITH COMPATIBLE PTX VERSION FOR DEVICE" " %d\n", ordinal); // The module wasn't compiled with support for this device. device->_ptxVersion = 0; } } return cudaDevices[ordinal]; } ~DeviceGroup() { if(cudaDevices) { for(int i = 0; i < numCudaDevices; ++i) delete cudaDevices[i]; delete [] cudaDevices; } cudaDeviceReset(); } }; std::auto_ptr<DeviceGroup> deviceGroup; int CudaDevice::DeviceCount() { if(!deviceGroup.get()) deviceGroup.reset(new DeviceGroup); return deviceGroup->GetDeviceCount(); } CudaDevice& CudaDevice::ByOrdinal(int ordinal) { if(ordinal < 0 || ordinal >= DeviceCount()) { fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal); exit(0); } return *deviceGroup->GetByOrdinal(ordinal); } CudaDevice& CudaDevice::Selected() { int ordinal; cudaError_t error = cudaGetDevice(&ordinal); if(cudaSuccess != error) { fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n"); exit(0); } return ByOrdinal(ordinal); } void CudaDevice::SetActive() { cudaError_t error = cudaSetDevice(_ordinal); if(cudaSuccess != error) { fprintf(stderr, "ERROR SETTING CUDA DEVICE TO ORDINAL %d\n", _ordinal); exit(0); } } std::string CudaDevice::DeviceString() const { size_t freeMem, totalMem; cudaError_t error = cudaMemGetInfo(&freeMem, &totalMem); if(cudaSuccess != error) { fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n", _ordinal); exit(0); } double memBandwidth = (_prop.memoryClockRate * 1000.0) * (_prop.memoryBusWidth / 8 * 2) / 1.0e9; std::string s = stringprintf( "%s : %8.3lf Mhz (Ordinal %d)\n" "%d SMs enabled. Compute Capability sm_%d%d\n" "FreeMem: %6dMB TotalMem: %6dMB %2d-bit pointers.\n" "Mem Clock: %8.3lf Mhz x %d bits (%5.1lf GB/s)\n" "ECC %s\n\n", _prop.name, _prop.clockRate / 1000.0, _ordinal, _prop.multiProcessorCount, _prop.major, _prop.minor, (int)(freeMem / (1<< 20)), (int)(totalMem / (1<< 20)), 8 * sizeof(int*), _prop.memoryClockRate / 1000.0, _prop.memoryBusWidth, memBandwidth, _prop.ECCEnabled ? "Enabled" : "Disabled"); return s; } //////////////////////////////////////////////////////////////////////////////// // CudaContext struct ContextGroup { CudaContext** standardContexts; int numDevices; ContextGroup() { numDevices = CudaDevice::DeviceCount(); standardContexts = new CudaContext*[numDevices]; memset(standardContexts, 0, sizeof(CudaContext*) * numDevices); } CudaContext* GetByOrdinal(int ordinal) { if(!standardContexts[ordinal]) { CudaDevice& device = CudaDevice::ByOrdinal(ordinal); standardContexts[ordinal] = new CudaContext(device, false, true); } return standardContexts[ordinal]; } ~ContextGroup() { if(standardContexts) { for(int i = 0; i < numDevices; ++i) delete standardContexts[i]; delete [] standardContexts; } } }; std::auto_ptr<ContextGroup> contextGroup; CudaContext::CudaContext(CudaDevice& device, bool newStream, bool standard) : _event(cudaEventDisableTiming /*| cudaEventBlockingSync */), _stream(0), _noRefCount(standard), _pageLocked(0) { // Create an allocator. if(standard) _alloc.reset(new CudaAllocSimple(device)); else _alloc = CreateDefaultAlloc(device); if(newStream) cudaStreamCreate(&_stream); _ownStream = newStream; // Allocate 4KB of page-locked memory. cudaError_t error; // error = cudaMallocHost((void**)&_pageLocked, 4096); // Allocate an auxiliary stream. error = cudaStreamCreate(&_auxStream); } CudaContext::~CudaContext() { if(_pageLocked) cudaFreeHost(_pageLocked); if(_ownStream && _stream) cudaStreamDestroy(_stream); if(_auxStream) cudaStreamDestroy(_auxStream); } AllocPtr CudaContext::CreateDefaultAlloc(CudaDevice& device) { intrusive_ptr<CudaAllocBuckets> alloc(new CudaAllocBuckets(device)); size_t freeMem, totalMem; cudaError_t error = cudaMemGetInfo(&freeMem, &totalMem); if(cudaSuccess != error) { fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n", device.Ordinal()); exit(0); } // Maintain a buffer of 128MB with max objects of 64MB. alloc->SetCapacity(128<< 20, 64<< 20); return AllocPtr(alloc.get()); } CudaContext& CudaContext::StandardContext(int ordinal) { bool setActive = -1 != ordinal; if(-1 == ordinal) { cudaError_t error = cudaGetDevice(&ordinal); if(cudaSuccess != error) { fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n"); exit(0); } } int numDevices = CudaDevice::DeviceCount(); if(ordinal < 0 || ordinal >= numDevices) { fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal); exit(0); } if(!contextGroup.get()) contextGroup.reset(new ContextGroup); CudaContext& context = //*contextGroup->standardContexts[ordinal]; *contextGroup->GetByOrdinal(ordinal); if(!context.PTXVersion()) { fprintf(stderr, "This CUDA executable was not compiled with support" " for device %d (sm_%2d)\n", ordinal, context.ArchVersion() / 10); exit(0); } if(setActive) context.SetActive(); return context; } ContextPtr CreateCudaDevice(int ordinal) { CudaDevice& device = CudaDevice::ByOrdinal(ordinal); ContextPtr context(new CudaContext(device, false, false)); return context; } ContextPtr CreateCudaDevice(int argc, char** argv, bool printInfo) { int ordinal = 0; if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) { fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n"); exit(0); } ContextPtr context = CreateCudaDevice(ordinal); if(!context->PTXVersion()) { fprintf(stderr, "This CUDA executable was not compiled with support" " for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10); exit(0); } context->SetActive(); if(printInfo) printf("%s\n", context->Device().DeviceString().c_str()); return context; } ContextPtr CreateCudaDeviceStream(int ordinal) { ContextPtr context(new CudaContext( CudaDevice::ByOrdinal(ordinal), true, false)); return context; } ContextPtr CreateCudaDeviceStream(int argc, char** argv, bool printInfo) { int ordinal = 0; if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) { fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n"); exit(0); } ContextPtr context = CreateCudaDeviceStream(ordinal); if(!context->PTXVersion()) { fprintf(stderr, "This CUDA executable was not compiled with support" " for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10); exit(0); } context->SetActive(); if(printInfo) printf("%s\n", context->Device().DeviceString().c_str()); return context; } ContextPtr CreateCudaDeviceAttachStream(int ordinal, cudaStream_t stream) { ContextPtr context(new CudaContext( CudaDevice::ByOrdinal(ordinal), false, false)); context->_stream = stream; return context; } ContextPtr CreateCudaDeviceAttachStream(cudaStream_t stream) { int ordinal; cudaGetDevice(&ordinal); return CreateCudaDeviceAttachStream(ordinal, stream); } //////////////////////////////////////////////////////////////////////////////// // CudaAllocSimple cudaError_t CudaAllocSimple::Malloc(size_t size, void** p) { cudaError_t error = cudaSuccess; *p = 0; if(size) error = cudaMalloc(p, size); if(cudaSuccess != error) { printf("CUDA MALLOC ERROR %d\n", error); exit(0); } return error; } bool CudaAllocSimple::Free(void* p) { cudaError_t error = cudaSuccess; if(p) error = cudaFree(p); return cudaSuccess == error; } //////////////////////////////////////////////////////////////////////////////// // CudaAllocBuckets CudaAllocBuckets::CudaAllocBuckets(CudaDevice& device) : CudaAlloc(device) { _maxObjectSize = _capacity = _allocated = _committed = 0; _counter = 0; } CudaAllocBuckets::~CudaAllocBuckets() { SetCapacity(0, 0); assert(!_allocated); } bool CudaAllocBuckets::SanityCheck() const { // Iterate through all allocated objects and verify sizes. size_t allocatedCount = 0, committedCount = 0; for(AddressMap::const_iterator i = _addressMap.begin(); i != _addressMap.end(); ++i) { int bucket = i->second->bucket; size_t size = (bucket < NumBuckets) ? BucketSizes[bucket] : 0; allocatedCount += size; if(i->second->priority == _priorityMap.end()) committedCount += size; } return allocatedCount == _allocated && committedCount == _committed; } cudaError_t CudaAllocBuckets::Malloc(size_t size, void** p) { // Locate the bucket index and adjust the size of the allocation to the // bucket size. size_t allocSize = size; size_t commitSize = 0; int bucket = LocateBucket(size); if(bucket < NumBuckets) allocSize = commitSize = BucketSizes[bucket]; // Peel off an already-allocated node and reuse it. MemList& list = _memLists[bucket]; if(list.size() && list.front().priority != _priorityMap.end()) { MemList::iterator memIt = list.begin(); _priorityMap.erase(memIt->priority); memIt->priority = _priorityMap.end(); list.splice(list.end(), list, memIt); _committed += commitSize; *p = memIt->address->first; return cudaSuccess; } // Shrink if this allocation would put us over the limit. Compact(commitSize); cudaError_t error = cudaSuccess; *p = 0; if(size) error = cudaMalloc(p, allocSize); while((cudaErrorMemoryAllocation == error) && (_committed < _allocated)) { SetCapacity(_capacity - _capacity / 10, _maxObjectSize); error = cudaMalloc(p, size); } if(cudaSuccess != error) return error; MemList::iterator memIt = _memLists[bucket].insert(_memLists[bucket].end(), MemNode()); memIt->bucket = bucket; memIt->address = _addressMap.insert(std::make_pair(*p, memIt)).first; memIt->priority = _priorityMap.end(); _allocated += commitSize; _committed += commitSize; assert(SanityCheck()); return cudaSuccess; } bool CudaAllocBuckets::Free(void* p) { AddressMap::iterator it = _addressMap.find(p); if(it == _addressMap.end()) { // If the pointer was not found in the address map, cudaFree it anyways // but return false. if(p) cudaFree(p); return false; } // Because we're freeing a page, it had better not be in the priority queue. MemList::iterator memIt = it->second; assert(memIt->priority == _priorityMap.end()); // Always free allocations larger than the largest bucket it->second->priority = _priorityMap.insert( std::make_pair(_counter++ - memIt->bucket, memIt)); // Freed nodes are moved to the front, committed nodes are moved to the // end. int bucket = memIt->bucket; size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0; MemList& list = _memLists[bucket]; list.splice(list.begin(), list, memIt); _committed -= commitSize; // Delete data that's not cached. if(NumBuckets == bucket) FreeNode(memIt); Compact(0); return true; } void CudaAllocBuckets::Clear() { Compact(_allocated); } void CudaAllocBuckets::FreeNode(CudaAllocBuckets::MemList::iterator memIt) { if(memIt->address->first) cudaFree(memIt->address->first); int bucket = memIt->bucket; size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0; _addressMap.erase(memIt->address); if(memIt->priority != _priorityMap.end()) _priorityMap.erase(memIt->priority); else _committed -= commitSize; _allocated -= commitSize; _memLists[bucket].erase(memIt); assert(SanityCheck()); } void CudaAllocBuckets::Compact(size_t extra) { while(_allocated + extra > _capacity && _allocated > _committed) { // Walk the priority queue from beginning to end removing nodes. MemList::iterator memIt = _priorityMap.begin()->second; FreeNode(memIt); } } // Exponentially spaced buckets. const size_t CudaAllocBuckets::BucketSizes[CudaAllocBuckets::NumBuckets] = { 256, 512, 1024, 2048, 4096, 8192, 12288, 16384, 24576, 32768, 49152, 65536, 98304, 131072, 174848, 218624, 262144, 349696, 436992, 524288, 655360, 786432, 917504, 1048576, 1310720, 1572864, 1835008, 2097152, 2516736, 2936064, 3355648, 3774976, 4194304, 4893440, 5592576, 6291456, 6990592, 7689728, 8388608, 9786880, 11184896, 12582912, 13981184, 15379200, 16777216, 18874368, 20971520, 23068672, 25165824, 27262976, 29360128, 31457280, 33554432, 36910080, 40265472, 43620864, 46976256, 50331648, 53687296, 57042688, 60398080, 63753472, 67108864, 72701440, 78293760, 83886080, 89478656, 95070976, 100663296, 106255872, 111848192, 117440512, 123033088, 128625408, 134217728, 143804928, 153391872, 162978816, 172565760, 182152704, 191739648, 201326592, 210913792, 220500736 }; int CudaAllocBuckets::LocateBucket(size_t size) const { if(size > _maxObjectSize || size > BucketSizes[NumBuckets - 1]) return NumBuckets; return (int)(std::lower_bound(BucketSizes, BucketSizes + NumBuckets, size) - BucketSizes); } } // namespace mgpu
40e15d96c1be3ccf9613e4b46d7412768dd6ea1e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <openacc.h> #define IPMACC_MAX1(A) (A) #define IPMACC_MAX2(A,B) (A>B?A:B) #define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B)) #ifdef __cplusplus #include "openacc_container.h" #endif #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include "../../common/mgbenchUtilFunctions.h" #define SIZE 1000 #define GPU_DEVICE 0 #define PERCENT_DIFF_ERROR_THRESHOLD 0.01 __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_0(float * a,float * c_cpu,float * c_gpu,float * b); void init(float *a, float *b, float *c_cpu, float *c_gpu) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation c_cpu\n"); acc_present_or_create((void*)c_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation c_gpu\n"); acc_present_or_create((void*)c_gpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation b\n"); acc_present_or_create((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation a\n"); acc_present_or_create((void*)a,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin c_cpu\n"); acc_pcopyin((void*)c_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin c_gpu\n"); acc_pcopyin((void*)c_gpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin b\n"); acc_pcopyin((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin a\n"); acc_pcopyin((void*)a,(999999+0)*sizeof(float )); { /* kernel call statement [0, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_0), dim3((((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (float *)acc_deviceptr((void*)a), (float *)acc_deviceptr((void*)c_cpu), (float *)acc_deviceptr((void*)c_gpu), (float *)acc_deviceptr((void*)b)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout c_cpu\n"); acc_copyout_and_keep((void*)c_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout c_gpu\n"); acc_copyout_and_keep((void*)c_gpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout b\n"); acc_copyout_and_keep((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout a\n"); acc_copyout_and_keep((void*)a,(999999+0)*sizeof(float )); } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_1(float * a,float * b,float * c); void sum_GPU(float *a, float *b, float *c) { int i, j; #pragma omp target device (GPU_DEVICE) #pragma omp target map(to: a[0:SIZE*SIZE], b[0:SIZE*SIZE]) map(tofrom: c[0:SIZE*SIZE]) { #pragma omp parallel for collapse(2) ipmacc_prompt((char*)"IPMACC: memory allocation a\n"); acc_present_or_create((void*)a,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation b\n"); acc_present_or_create((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation c\n"); acc_present_or_create((void*)c,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin a\n"); acc_pcopyin((void*)a,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin b\n"); acc_pcopyin((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin c\n"); acc_pcopyin((void*)c,(999999+0)*sizeof(float )); { /* kernel call statement [1, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_1), dim3((((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (float *)acc_deviceptr((void*)a), (float *)acc_deviceptr((void*)b), (float *)acc_deviceptr((void*)c)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout a\n"); acc_copyout_and_keep((void*)a,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout b\n"); acc_copyout_and_keep((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout c\n"); acc_copyout_and_keep((void*)c,(999999+0)*sizeof(float )); } } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_2(float * a,float * b,float * c); void sum_CPU(float *a, float *b, float *c) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation c\n"); acc_present_or_create((void*)c,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation b\n"); acc_present_or_create((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation a\n"); acc_present_or_create((void*)a,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin c\n"); acc_pcopyin((void*)c,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin b\n"); acc_pcopyin((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin a\n"); acc_pcopyin((void*)a,(999999+0)*sizeof(float )); { /* kernel call statement [2, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 2 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_2), dim3((((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (float *)acc_deviceptr((void*)a), (float *)acc_deviceptr((void*)b), (float *)acc_deviceptr((void*)c)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout c\n"); acc_copyout_and_keep((void*)c,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout b\n"); acc_copyout_and_keep((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout a\n"); acc_copyout_and_keep((void*)a,(999999+0)*sizeof(float )); } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_3(float * b_gpu,float * b_cpu,int fail); void compareResults(float *b_cpu, float *b_gpu) { int i, j, fail; fail = 0; ipmacc_prompt((char*)"IPMACC: memory allocation b_cpu\n"); acc_present_or_create((void*)b_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation b_gpu\n"); acc_present_or_create((void*)b_gpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin b_cpu\n"); acc_pcopyin((void*)b_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin b_gpu\n"); acc_pcopyin((void*)b_gpu,(999999+0)*sizeof(float )); { /* kernel call statement [3, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 3 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_3), dim3((((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (float *)acc_deviceptr((void*)b_gpu), (float *)acc_deviceptr((void*)b_cpu), fail); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout b_cpu\n"); acc_copyout_and_keep((void*)b_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout b_gpu\n"); acc_copyout_and_keep((void*)b_gpu,(999999+0)*sizeof(float )); printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } int main(int argc, char *argv[]) { double t_start, t_end; float *a, *b, *c_cpu, *c_gpu; a = (float*)malloc(sizeof(float) * SIZE * SIZE); b = (float*)malloc(sizeof(float) * SIZE * SIZE); c_cpu = (float*)malloc(sizeof(float) * SIZE * SIZE); c_gpu = (float*)malloc(sizeof(float) * SIZE * SIZE); fprintf(stdout, "<< Matrix Sum >>\n"); init(a, b, c_cpu, c_gpu); t_start = rtclock(); sum_GPU(a, b, c_gpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); sum_CPU(a, b, c_cpu); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(c_cpu, c_gpu); free(a); free(b); free(c_cpu); free(c_gpu); return 0; } __device__ float __accelerator_absVal( float a ) { if ( a < 0 ) { return ( a * -1) ; } else { return a ; } } __device__ float __accelerator_percentDiff( double val1 , double val2 ) { if ( ( __accelerator_absVal( val1 ) < 0.01) && ( __accelerator_absVal( val2 ) < 0.01) ) { return 0.0f ; } else { return 100.0f * ( __accelerator_absVal( __accelerator_absVal( val1 - val2 ) / __accelerator_absVal( val1 + 0.00000001f ) ) ) ; } } __global__ void __generated_kernel_region_0(float * a,float * c_cpu,float * c_gpu,float * b){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { for(j = 0; j < SIZE; ++j) { a [i * SIZE + j] = (float)i + j; b [i * SIZE + j] = (float)i + j; c_cpu [i * SIZE + j] = 0.0f; c_gpu [i * SIZE + j] = 0.0f; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_1(float * a,float * b,float * c){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { for(j = 0; j < SIZE; ++j) { c [i * SIZE + j] = a [i * SIZE + j] + b [i * SIZE + j]; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_2(float * a,float * b,float * c){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { for(j = 0; j < SIZE; ++j) { c [i * SIZE + j] = a [i * SIZE + j] + b [i * SIZE + j]; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_3(float * b_gpu,float * b_cpu,int fail){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { for(j = 0; j < SIZE; j++) { if (__accelerator_percentDiff(b_cpu [i * SIZE + j], b_gpu [i * SIZE + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } } } } //append writeback of scalar variables }
40e15d96c1be3ccf9613e4b46d7412768dd6ea1e.cu
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <openacc.h> #define IPMACC_MAX1(A) (A) #define IPMACC_MAX2(A,B) (A>B?A:B) #define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B)) #ifdef __cplusplus #include "openacc_container.h" #endif #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include "../../common/mgbenchUtilFunctions.h" #define SIZE 1000 #define GPU_DEVICE 0 #define PERCENT_DIFF_ERROR_THRESHOLD 0.01 __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_0(float * a,float * c_cpu,float * c_gpu,float * b); void init(float *a, float *b, float *c_cpu, float *c_gpu) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation c_cpu\n"); acc_present_or_create((void*)c_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation c_gpu\n"); acc_present_or_create((void*)c_gpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation b\n"); acc_present_or_create((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation a\n"); acc_present_or_create((void*)a,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin c_cpu\n"); acc_pcopyin((void*)c_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin c_gpu\n"); acc_pcopyin((void*)c_gpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin b\n"); acc_pcopyin((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin a\n"); acc_pcopyin((void*)a,(999999+0)*sizeof(float )); { /* kernel call statement [0, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_0<<<(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (float *)acc_deviceptr((void*)a), (float *)acc_deviceptr((void*)c_cpu), (float *)acc_deviceptr((void*)c_gpu), (float *)acc_deviceptr((void*)b)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout c_cpu\n"); acc_copyout_and_keep((void*)c_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout c_gpu\n"); acc_copyout_and_keep((void*)c_gpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout b\n"); acc_copyout_and_keep((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout a\n"); acc_copyout_and_keep((void*)a,(999999+0)*sizeof(float )); } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_1(float * a,float * b,float * c); void sum_GPU(float *a, float *b, float *c) { int i, j; #pragma omp target device (GPU_DEVICE) #pragma omp target map(to: a[0:SIZE*SIZE], b[0:SIZE*SIZE]) map(tofrom: c[0:SIZE*SIZE]) { #pragma omp parallel for collapse(2) ipmacc_prompt((char*)"IPMACC: memory allocation a\n"); acc_present_or_create((void*)a,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation b\n"); acc_present_or_create((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation c\n"); acc_present_or_create((void*)c,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin a\n"); acc_pcopyin((void*)a,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin b\n"); acc_pcopyin((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin c\n"); acc_pcopyin((void*)c,(999999+0)*sizeof(float )); { /* kernel call statement [1, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_1<<<(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (float *)acc_deviceptr((void*)a), (float *)acc_deviceptr((void*)b), (float *)acc_deviceptr((void*)c)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout a\n"); acc_copyout_and_keep((void*)a,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout b\n"); acc_copyout_and_keep((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout c\n"); acc_copyout_and_keep((void*)c,(999999+0)*sizeof(float )); } } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_2(float * a,float * b,float * c); void sum_CPU(float *a, float *b, float *c) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation c\n"); acc_present_or_create((void*)c,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation b\n"); acc_present_or_create((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation a\n"); acc_present_or_create((void*)a,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin c\n"); acc_pcopyin((void*)c,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin b\n"); acc_pcopyin((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin a\n"); acc_pcopyin((void*)a,(999999+0)*sizeof(float )); { /* kernel call statement [2, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 2 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_2<<<(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (float *)acc_deviceptr((void*)a), (float *)acc_deviceptr((void*)b), (float *)acc_deviceptr((void*)c)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout c\n"); acc_copyout_and_keep((void*)c,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout b\n"); acc_copyout_and_keep((void*)b,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout a\n"); acc_copyout_and_keep((void*)a,(999999+0)*sizeof(float )); } __device__ float __accelerator_absVal( float a ); __device__ float __accelerator_percentDiff( double val1 , double val2 ); __global__ void __generated_kernel_region_3(float * b_gpu,float * b_cpu,int fail); void compareResults(float *b_cpu, float *b_gpu) { int i, j, fail; fail = 0; ipmacc_prompt((char*)"IPMACC: memory allocation b_cpu\n"); acc_present_or_create((void*)b_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory allocation b_gpu\n"); acc_present_or_create((void*)b_gpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin b_cpu\n"); acc_pcopyin((void*)b_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyin b_gpu\n"); acc_pcopyin((void*)b_gpu,(999999+0)*sizeof(float )); { /* kernel call statement [3, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 3 > gridDim: %d\tblockDim: %d\n",(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_3<<<(((abs((int)((SIZE))-(0+0)))/(1)))/256+(((((abs((int)((SIZE))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (float *)acc_deviceptr((void*)b_gpu), (float *)acc_deviceptr((void*)b_cpu), fail); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout b_cpu\n"); acc_copyout_and_keep((void*)b_cpu,(999999+0)*sizeof(float )); ipmacc_prompt((char*)"IPMACC: memory copyout b_gpu\n"); acc_copyout_and_keep((void*)b_gpu,(999999+0)*sizeof(float )); printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } int main(int argc, char *argv[]) { double t_start, t_end; float *a, *b, *c_cpu, *c_gpu; a = (float*)malloc(sizeof(float) * SIZE * SIZE); b = (float*)malloc(sizeof(float) * SIZE * SIZE); c_cpu = (float*)malloc(sizeof(float) * SIZE * SIZE); c_gpu = (float*)malloc(sizeof(float) * SIZE * SIZE); fprintf(stdout, "<< Matrix Sum >>\n"); init(a, b, c_cpu, c_gpu); t_start = rtclock(); sum_GPU(a, b, c_gpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); sum_CPU(a, b, c_cpu); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(c_cpu, c_gpu); free(a); free(b); free(c_cpu); free(c_gpu); return 0; } __device__ float __accelerator_absVal( float a ) { if ( a < 0 ) { return ( a * -1) ; } else { return a ; } } __device__ float __accelerator_percentDiff( double val1 , double val2 ) { if ( ( __accelerator_absVal( val1 ) < 0.01) && ( __accelerator_absVal( val2 ) < 0.01) ) { return 0.0f ; } else { return 100.0f * ( __accelerator_absVal( __accelerator_absVal( val1 - val2 ) / __accelerator_absVal( val1 + 0.00000001f ) ) ) ; } } __global__ void __generated_kernel_region_0(float * a,float * c_cpu,float * c_gpu,float * b){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { for(j = 0; j < SIZE; ++j) { a [i * SIZE + j] = (float)i + j; b [i * SIZE + j] = (float)i + j; c_cpu [i * SIZE + j] = 0.0f; c_gpu [i * SIZE + j] = 0.0f; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_1(float * a,float * b,float * c){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { for(j = 0; j < SIZE; ++j) { c [i * SIZE + j] = a [i * SIZE + j] + b [i * SIZE + j]; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_2(float * a,float * b,float * c){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { for(j = 0; j < SIZE; ++j) { c [i * SIZE + j] = a [i * SIZE + j] + b [i * SIZE + j]; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_3(float * b_gpu,float * b_cpu,int fail){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < SIZE) { for(j = 0; j < SIZE; j++) { if (__accelerator_percentDiff(b_cpu [i * SIZE + j], b_gpu [i * SIZE + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } } } } //append writeback of scalar variables }
4192ee8cdf8f6ff2afd101bbe34e490dfe499f80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dedisperse.h" #include "dedisperse_kernel.cuh" // Kernel tuning parameters #define DEDISP_BLOCK_SIZE 256 #define DEDISP_BLOCK_SAMPS 8 #define DEDISP_SAMPS_PER_THREAD 2 // 4 is better for Fermi? /* * Helper functions */ bool check_use_texture_mem() { // Decides based on GPU architecture int device_idx; hipGetDevice(&device_idx); hipDeviceProp_t device_props; hipGetDeviceProperties(&device_props, device_idx); // Fermi runs worse with texture mem bool use_texture_mem = (device_props.major < 2); return use_texture_mem; } void copy_delay_table( const void* src, size_t count, size_t offset, hipStream_t stream) { hipMemcpyToSymbolAsync(c_delay_table, src, count, offset, hipMemcpyDeviceToDevice, stream); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if( error != hipSuccess ) { throw_error(DEDISP_MEM_COPY_FAILED); } } void copy_killmask( const void* src, size_t count, size_t offset, hipStream_t stream) { hipMemcpyToSymbolAsync(c_killmask, src, count, offset, hipMemcpyDeviceToDevice, stream); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if( error != hipSuccess ) { throw_error(DEDISP_MEM_COPY_FAILED); } } /* * dedisperse routine */ bool dedisperse(const dedisp_word* d_in, dedisp_size in_stride, dedisp_size nsamps, dedisp_size in_nbits, dedisp_size nchans, dedisp_size chan_stride, const dedisp_float* d_dm_list, dedisp_size dm_count, dedisp_size dm_stride, dedisp_byte* d_out, dedisp_size out_stride, dedisp_size out_nbits, dedisp_size batch_size, dedisp_size batch_in_stride, dedisp_size batch_dm_stride, dedisp_size batch_chan_stride, dedisp_size batch_out_stride) { enum { BITS_PER_BYTE = 8, BYTES_PER_WORD = sizeof(dedisp_word) / sizeof(dedisp_byte), BLOCK_DIM_X = DEDISP_BLOCK_SAMPS, BLOCK_DIM_Y = DEDISP_BLOCK_SIZE / DEDISP_BLOCK_SAMPS, MAX_CUDA_GRID_SIZE_X = 65535, MAX_CUDA_GRID_SIZE_Y = 65535, MAX_CUDA_1D_TEXTURE_SIZE = (1<<27) }; // Initialise texture memory if necessary // -------------------------------------- // Determine whether we should use texture memory bool use_texture_mem = check_use_texture_mem(); if( use_texture_mem ) { dedisp_size chans_per_word = sizeof(dedisp_word)*BITS_PER_BYTE / in_nbits; dedisp_size nchan_words = nchans / chans_per_word; dedisp_size input_words = in_stride * nchan_words; // Check the texture size limit if( input_words > MAX_CUDA_1D_TEXTURE_SIZE ) { return false; } // Bind the texture memory hipChannelFormatDesc channel_desc = hipCreateChannelDesc<dedisp_word>(); hipBindTexture(0, t_in, d_in, channel_desc, input_words * sizeof(dedisp_word)); #ifdef DEDISP_DEBUG hipError_t cuda_error = hipGetLastError(); if( cuda_error != hipSuccess ) { return false; } #endif // DEDISP_DEBUG } // -------------------------------------- // Define thread decomposition // Note: Block dimensions x and y represent time samples and DMs respectively dim3 block(BLOCK_DIM_X, BLOCK_DIM_Y); // Note: Grid dimension x represents time samples. Dimension y represents // DMs and batch jobs flattened together. // Divide and round up dedisp_size nsamp_blocks = (nsamps - 1) / ((dedisp_size)DEDISP_SAMPS_PER_THREAD*block.x) + 1; dedisp_size ndm_blocks = (dm_count - 1) / (dedisp_size)block.y + 1; // Constrain the grid size to the maximum allowed // TODO: Consider cropping the batch size dimension instead and looping over it // inside the kernel ndm_blocks = min((unsigned int)ndm_blocks, (unsigned int)(MAX_CUDA_GRID_SIZE_Y/batch_size)); // Note: We combine the DM and batch dimensions into one dim3 grid(nsamp_blocks, ndm_blocks * batch_size); // Divide and round up dedisp_size nsamps_reduced = (nsamps - 1) / DEDISP_SAMPS_PER_THREAD + 1; hipStream_t stream = 0; // Execute the kernel #define DEDISP_CALL_KERNEL(NBITS, USE_TEXTURE_MEM) \ hipLaunchKernelGGL(( dedisperse_kernel<NBITS,DEDISP_SAMPS_PER_THREAD,BLOCK_DIM_X, \ BLOCK_DIM_Y,USE_TEXTURE_MEM>) \ , dim3(grid), dim3(block), 0, stream, d_in, \ nsamps, \ nsamps_reduced, \ nsamp_blocks, \ in_stride, \ dm_count, \ dm_stride, \ ndm_blocks, \ nchans, \ chan_stride, \ d_out, \ out_nbits, \ out_stride, \ d_dm_list, \ batch_in_stride, \ batch_dm_stride, \ batch_chan_stride, \ batch_out_stride) // Note: Here we dispatch dynamically on nbits for supported values if( use_texture_mem ) { switch( in_nbits ) { case 1: DEDISP_CALL_KERNEL(1,true); break; case 2: DEDISP_CALL_KERNEL(2,true); break; case 4: DEDISP_CALL_KERNEL(4,true); break; case 8: DEDISP_CALL_KERNEL(8,true); break; case 16: DEDISP_CALL_KERNEL(16,true); break; case 32: DEDISP_CALL_KERNEL(32,true); break; default: /* should never be reached */ break; } } else { switch( in_nbits ) { case 1: DEDISP_CALL_KERNEL(1,false); break; case 2: DEDISP_CALL_KERNEL(2,false); break; case 4: DEDISP_CALL_KERNEL(4,false); break; case 8: DEDISP_CALL_KERNEL(8,false); break; case 16: DEDISP_CALL_KERNEL(16,false); break; case 32: DEDISP_CALL_KERNEL(32,false); break; default: /* should never be reached */ break; } } #undef DEDISP_CALL_KERNEL // Check for kernel errors #ifdef DEDISP_DEBUG //hipStreamSynchronize(stream); hipDeviceSynchronize(); hipError_t cuda_error = hipGetLastError(); if( cuda_error != hipSuccess ) { return false; } #endif // DEDISP_DEBUG return true; }
4192ee8cdf8f6ff2afd101bbe34e490dfe499f80.cu
#include "dedisperse.h" #include "dedisperse_kernel.cuh" // Kernel tuning parameters #define DEDISP_BLOCK_SIZE 256 #define DEDISP_BLOCK_SAMPS 8 #define DEDISP_SAMPS_PER_THREAD 2 // 4 is better for Fermi? /* * Helper functions */ bool check_use_texture_mem() { // Decides based on GPU architecture int device_idx; cudaGetDevice(&device_idx); cudaDeviceProp device_props; cudaGetDeviceProperties(&device_props, device_idx); // Fermi runs worse with texture mem bool use_texture_mem = (device_props.major < 2); return use_texture_mem; } void copy_delay_table( const void* src, size_t count, size_t offset, cudaStream_t stream) { cudaMemcpyToSymbolAsync(c_delay_table, src, count, offset, cudaMemcpyDeviceToDevice, stream); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if( error != cudaSuccess ) { throw_error(DEDISP_MEM_COPY_FAILED); } } void copy_killmask( const void* src, size_t count, size_t offset, cudaStream_t stream) { cudaMemcpyToSymbolAsync(c_killmask, src, count, offset, cudaMemcpyDeviceToDevice, stream); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if( error != cudaSuccess ) { throw_error(DEDISP_MEM_COPY_FAILED); } } /* * dedisperse routine */ bool dedisperse(const dedisp_word* d_in, dedisp_size in_stride, dedisp_size nsamps, dedisp_size in_nbits, dedisp_size nchans, dedisp_size chan_stride, const dedisp_float* d_dm_list, dedisp_size dm_count, dedisp_size dm_stride, dedisp_byte* d_out, dedisp_size out_stride, dedisp_size out_nbits, dedisp_size batch_size, dedisp_size batch_in_stride, dedisp_size batch_dm_stride, dedisp_size batch_chan_stride, dedisp_size batch_out_stride) { enum { BITS_PER_BYTE = 8, BYTES_PER_WORD = sizeof(dedisp_word) / sizeof(dedisp_byte), BLOCK_DIM_X = DEDISP_BLOCK_SAMPS, BLOCK_DIM_Y = DEDISP_BLOCK_SIZE / DEDISP_BLOCK_SAMPS, MAX_CUDA_GRID_SIZE_X = 65535, MAX_CUDA_GRID_SIZE_Y = 65535, MAX_CUDA_1D_TEXTURE_SIZE = (1<<27) }; // Initialise texture memory if necessary // -------------------------------------- // Determine whether we should use texture memory bool use_texture_mem = check_use_texture_mem(); if( use_texture_mem ) { dedisp_size chans_per_word = sizeof(dedisp_word)*BITS_PER_BYTE / in_nbits; dedisp_size nchan_words = nchans / chans_per_word; dedisp_size input_words = in_stride * nchan_words; // Check the texture size limit if( input_words > MAX_CUDA_1D_TEXTURE_SIZE ) { return false; } // Bind the texture memory cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<dedisp_word>(); cudaBindTexture(0, t_in, d_in, channel_desc, input_words * sizeof(dedisp_word)); #ifdef DEDISP_DEBUG cudaError_t cuda_error = cudaGetLastError(); if( cuda_error != cudaSuccess ) { return false; } #endif // DEDISP_DEBUG } // -------------------------------------- // Define thread decomposition // Note: Block dimensions x and y represent time samples and DMs respectively dim3 block(BLOCK_DIM_X, BLOCK_DIM_Y); // Note: Grid dimension x represents time samples. Dimension y represents // DMs and batch jobs flattened together. // Divide and round up dedisp_size nsamp_blocks = (nsamps - 1) / ((dedisp_size)DEDISP_SAMPS_PER_THREAD*block.x) + 1; dedisp_size ndm_blocks = (dm_count - 1) / (dedisp_size)block.y + 1; // Constrain the grid size to the maximum allowed // TODO: Consider cropping the batch size dimension instead and looping over it // inside the kernel ndm_blocks = min((unsigned int)ndm_blocks, (unsigned int)(MAX_CUDA_GRID_SIZE_Y/batch_size)); // Note: We combine the DM and batch dimensions into one dim3 grid(nsamp_blocks, ndm_blocks * batch_size); // Divide and round up dedisp_size nsamps_reduced = (nsamps - 1) / DEDISP_SAMPS_PER_THREAD + 1; cudaStream_t stream = 0; // Execute the kernel #define DEDISP_CALL_KERNEL(NBITS, USE_TEXTURE_MEM) \ dedisperse_kernel<NBITS,DEDISP_SAMPS_PER_THREAD,BLOCK_DIM_X, \ BLOCK_DIM_Y,USE_TEXTURE_MEM> \ <<<grid, block, 0, stream>>>(d_in, \ nsamps, \ nsamps_reduced, \ nsamp_blocks, \ in_stride, \ dm_count, \ dm_stride, \ ndm_blocks, \ nchans, \ chan_stride, \ d_out, \ out_nbits, \ out_stride, \ d_dm_list, \ batch_in_stride, \ batch_dm_stride, \ batch_chan_stride, \ batch_out_stride) // Note: Here we dispatch dynamically on nbits for supported values if( use_texture_mem ) { switch( in_nbits ) { case 1: DEDISP_CALL_KERNEL(1,true); break; case 2: DEDISP_CALL_KERNEL(2,true); break; case 4: DEDISP_CALL_KERNEL(4,true); break; case 8: DEDISP_CALL_KERNEL(8,true); break; case 16: DEDISP_CALL_KERNEL(16,true); break; case 32: DEDISP_CALL_KERNEL(32,true); break; default: /* should never be reached */ break; } } else { switch( in_nbits ) { case 1: DEDISP_CALL_KERNEL(1,false); break; case 2: DEDISP_CALL_KERNEL(2,false); break; case 4: DEDISP_CALL_KERNEL(4,false); break; case 8: DEDISP_CALL_KERNEL(8,false); break; case 16: DEDISP_CALL_KERNEL(16,false); break; case 32: DEDISP_CALL_KERNEL(32,false); break; default: /* should never be reached */ break; } } #undef DEDISP_CALL_KERNEL // Check for kernel errors #ifdef DEDISP_DEBUG //cudaStreamSynchronize(stream); cudaDeviceSynchronize(); cudaError_t cuda_error = cudaGetLastError(); if( cuda_error != cudaSuccess ) { return false; } #endif // DEDISP_DEBUG return true; }
43d297340e250477bb900293a96609131f5022d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void broadcast(const float* x, float* y, unsigned int c, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = x[tid % c]; } }
43d297340e250477bb900293a96609131f5022d9.cu
#include "includes.h" extern "C" { } __global__ void broadcast(const float* x, float* y, unsigned int c, unsigned int len) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < len) { y[tid] = x[tid % c]; } }
ee78b4a0b81e665ad5eb77e99e7215606a489682.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <fstream> #include <iostream> #include <sstream> #include <vector> #include <cmath> #include <algorithm> #include <chrono> using namespace std; __global__ void setClusters(float* pointVec, int* cur_cluster, float* means, float* sums, int vecSize, int k, int dimensions, int* counter, int* done) { extern __shared__ float shared_means[]; // idx corresponds to point id int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= vecSize) return; if (threadIdx.x < k * dimensions) { // Put means into shared memory to reduce time to take from global memory shared_means[threadIdx.x] = means[threadIdx.x]; } __syncthreads(); float minDist = FLT_MAX; int bestCluster = INT_MAX; float distance; // Find best cluster for each point for (int i = 0; i < k; i++) { distance = 0; for (int j = 0; j < dimensions; j++) { distance += (pointVec[idx + vecSize * j] - shared_means[i + k * j]) * (pointVec[idx + vecSize * j] - shared_means[i + k * j]); } if (distance < minDist) { minDist = distance; bestCluster = i; } } // If cluster changed, update cluster id list and set done to false (1) if (cur_cluster[idx] != bestCluster) { cur_cluster[idx] = bestCluster; done[0] = 1; } // Atomically add sums and counter with cluster for (int i = 0; i < dimensions; i++) { atomicAdd(&sums[bestCluster + k * i], pointVec[idx + vecSize * i]); } atomicAdd(&counter[bestCluster], 1); } __global__ void getNewMeans(float* means, float* sums, int* counter, int k, int dimensions) { // Get new mean for each cluster int idx = blockIdx.x * blockDim.x + threadIdx.x; int count = (counter[idx] != 0) ? counter[idx] : 1; for (int i = 0; i < dimensions; i++) { means[idx + k * i] = sums[idx + k * i] / count; } } int main(int argc, char* argv[]) { cout << "---CUDA Atomic---" << endl; int k = 5; int iters = 300; int dimensions = 3; int vecSize = 100000; string filename = "test3d100000.csv"; ifstream infile(filename.c_str()); string line; if (!infile.is_open()) { cout << "Error: Failed to open file" << endl; return 1; } vector<float> h_pointVec(dimensions * vecSize); float val; char eater; int curPoint = 0; int offset; // Add point to vector while (getline(infile, line)) { stringstream is(line); offset = 0; while (is >> val) { h_pointVec[curPoint + vecSize * offset] = val; is >> eater; offset++; } curPoint++; } infile.close(); cout << "Fetched data successfully from " << filename << endl; float* d_pointVec; int* h_done = new int(0); hipMalloc(&d_pointVec, dimensions * vecSize * sizeof(float)); hipMemcpy(d_pointVec, h_pointVec.data(), dimensions * vecSize * sizeof(float), hipMemcpyHostToDevice); // each dimension has k size vector<float> h_means(k * dimensions); int check; // Initialize clusters for (int i = 0; i < k; i++) { while (true) { int idx = rand() % vecSize; check = 0; for (int j = 0; j < dimensions; j++) { if (find(h_means.begin() + k * j, h_means.begin() + k * (j + 1), h_pointVec[idx + vecSize * j]) == h_means.begin() + k * (j + 1)) { check++; } h_means[i + j * k] = h_pointVec[idx + vecSize * j]; } if (check > 0) { break; } } } cout << k << " clusters initialized" << endl; cout << "Running K-means clustering" << endl; int* d_cur_cluster; float* d_means; float* d_sums; int* d_counter; int* d_done; hipMalloc(&d_cur_cluster, vecSize * sizeof(int)); hipMalloc(&d_means, k * dimensions *sizeof(float)); hipMemcpy(d_means, h_means.data(), k * dimensions * sizeof(float), hipMemcpyHostToDevice); hipMalloc(&d_sums, k * dimensions * sizeof(float)); hipMalloc(&d_counter, k * sizeof(int)); hipMalloc(&d_done, sizeof(int)); int blockSize = 1024; int gridSize = (vecSize - 1) / blockSize + 1; int sharedSize = dimensions * k * sizeof(float); auto start = chrono::high_resolution_clock::now(); int iter; for (iter = 0; iter < iters; iter++) { // Clear sum and counter array to prepare for new iteration hipMemset(d_sums, 0, k * dimensions * sizeof(int)); hipMemset(d_counter, 0, k * sizeof(int)); hipMemset(d_done, 0, sizeof(int)); // For each point, find nearest cluster and add itself to the cluster's sum setClusters << <gridSize, blockSize, sharedSize >> > (d_pointVec, d_cur_cluster, d_means, d_sums, vecSize, k, dimensions, d_counter, d_done); // Get new mean of each cluster getNewMeans << <1, k >> > (d_means, d_sums, d_counter, k, dimensions); // Check if done became false(1), if so continue hipMemcpy(h_done, d_done, sizeof(int), hipMemcpyDeviceToHost); if (h_done[0] == 0) break; } auto end = chrono::high_resolution_clock::now(); cout << "Clustering completed in iteration : " << iter << endl << endl; cout << "Time taken: " << chrono::duration_cast<chrono::microseconds>(end - start).count() << " microseconds" << endl; hipMemcpy(h_means.data(), d_means, k * dimensions * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < k; i++) { cout << "Centroid in cluster " << i << " : "; for (int j = 0; j < dimensions; j++) { cout << h_means[i + k * j] << " "; } cout << endl; } }
ee78b4a0b81e665ad5eb77e99e7215606a489682.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <fstream> #include <iostream> #include <sstream> #include <vector> #include <cmath> #include <algorithm> #include <chrono> using namespace std; __global__ void setClusters(float* pointVec, int* cur_cluster, float* means, float* sums, int vecSize, int k, int dimensions, int* counter, int* done) { extern __shared__ float shared_means[]; // idx corresponds to point id int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= vecSize) return; if (threadIdx.x < k * dimensions) { // Put means into shared memory to reduce time to take from global memory shared_means[threadIdx.x] = means[threadIdx.x]; } __syncthreads(); float minDist = FLT_MAX; int bestCluster = INT_MAX; float distance; // Find best cluster for each point for (int i = 0; i < k; i++) { distance = 0; for (int j = 0; j < dimensions; j++) { distance += (pointVec[idx + vecSize * j] - shared_means[i + k * j]) * (pointVec[idx + vecSize * j] - shared_means[i + k * j]); } if (distance < minDist) { minDist = distance; bestCluster = i; } } // If cluster changed, update cluster id list and set done to false (1) if (cur_cluster[idx] != bestCluster) { cur_cluster[idx] = bestCluster; done[0] = 1; } // Atomically add sums and counter with cluster for (int i = 0; i < dimensions; i++) { atomicAdd(&sums[bestCluster + k * i], pointVec[idx + vecSize * i]); } atomicAdd(&counter[bestCluster], 1); } __global__ void getNewMeans(float* means, float* sums, int* counter, int k, int dimensions) { // Get new mean for each cluster int idx = blockIdx.x * blockDim.x + threadIdx.x; int count = (counter[idx] != 0) ? counter[idx] : 1; for (int i = 0; i < dimensions; i++) { means[idx + k * i] = sums[idx + k * i] / count; } } int main(int argc, char* argv[]) { cout << "---CUDA Atomic---" << endl; int k = 5; int iters = 300; int dimensions = 3; int vecSize = 100000; string filename = "test3d100000.csv"; ifstream infile(filename.c_str()); string line; if (!infile.is_open()) { cout << "Error: Failed to open file" << endl; return 1; } vector<float> h_pointVec(dimensions * vecSize); float val; char eater; int curPoint = 0; int offset; // Add point to vector while (getline(infile, line)) { stringstream is(line); offset = 0; while (is >> val) { h_pointVec[curPoint + vecSize * offset] = val; is >> eater; offset++; } curPoint++; } infile.close(); cout << "Fetched data successfully from " << filename << endl; float* d_pointVec; int* h_done = new int(0); cudaMalloc(&d_pointVec, dimensions * vecSize * sizeof(float)); cudaMemcpy(d_pointVec, h_pointVec.data(), dimensions * vecSize * sizeof(float), cudaMemcpyHostToDevice); // each dimension has k size vector<float> h_means(k * dimensions); int check; // Initialize clusters for (int i = 0; i < k; i++) { while (true) { int idx = rand() % vecSize; check = 0; for (int j = 0; j < dimensions; j++) { if (find(h_means.begin() + k * j, h_means.begin() + k * (j + 1), h_pointVec[idx + vecSize * j]) == h_means.begin() + k * (j + 1)) { check++; } h_means[i + j * k] = h_pointVec[idx + vecSize * j]; } if (check > 0) { break; } } } cout << k << " clusters initialized" << endl; cout << "Running K-means clustering" << endl; int* d_cur_cluster; float* d_means; float* d_sums; int* d_counter; int* d_done; cudaMalloc(&d_cur_cluster, vecSize * sizeof(int)); cudaMalloc(&d_means, k * dimensions *sizeof(float)); cudaMemcpy(d_means, h_means.data(), k * dimensions * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc(&d_sums, k * dimensions * sizeof(float)); cudaMalloc(&d_counter, k * sizeof(int)); cudaMalloc(&d_done, sizeof(int)); int blockSize = 1024; int gridSize = (vecSize - 1) / blockSize + 1; int sharedSize = dimensions * k * sizeof(float); auto start = chrono::high_resolution_clock::now(); int iter; for (iter = 0; iter < iters; iter++) { // Clear sum and counter array to prepare for new iteration cudaMemset(d_sums, 0, k * dimensions * sizeof(int)); cudaMemset(d_counter, 0, k * sizeof(int)); cudaMemset(d_done, 0, sizeof(int)); // For each point, find nearest cluster and add itself to the cluster's sum setClusters << <gridSize, blockSize, sharedSize >> > (d_pointVec, d_cur_cluster, d_means, d_sums, vecSize, k, dimensions, d_counter, d_done); // Get new mean of each cluster getNewMeans << <1, k >> > (d_means, d_sums, d_counter, k, dimensions); // Check if done became false(1), if so continue cudaMemcpy(h_done, d_done, sizeof(int), cudaMemcpyDeviceToHost); if (h_done[0] == 0) break; } auto end = chrono::high_resolution_clock::now(); cout << "Clustering completed in iteration : " << iter << endl << endl; cout << "Time taken: " << chrono::duration_cast<chrono::microseconds>(end - start).count() << " microseconds" << endl; cudaMemcpy(h_means.data(), d_means, k * dimensions * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < k; i++) { cout << "Centroid in cluster " << i << " : "; for (int j = 0; j < dimensions; j++) { cout << h_means[i + k * j] << " "; } cout << endl; } }
20e78a79605056c6d1e78fab401674b63df9fbe0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/matrix/ell_kernels.hpp" #include <array> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/base/types.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/dense.hpp> #include "core/matrix/dense_kernels.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/cusparse_bindings.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/format_conversion.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/zero_array.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The ELL matrix format namespace. * * @ingroup ell */ namespace ell { constexpr int default_block_size = 512; // TODO: num_threads_per_core and ratio are parameters should be tuned /** * num_threads_per_core is the oversubscribing parameter. There are * `num_threads_per_core` threads assigned to each physical core. */ constexpr int num_threads_per_core = 4; /** * ratio is the parameter to decide when to use threads to do reduction on each * row. (#cols/#rows > ratio) */ constexpr double ratio = 1e-2; /** * A compile-time list of sub-warp sizes for which the spmv kernels should be * compiled. * 0 is a special case where it uses a sub-warp size of warp_size in * combination with atomic_adds. */ using compiled_kernels = syn::value_list<int, 0, 1, 2, 4, 8, 16, 32, cuda_config::warp_size>; namespace kernel { namespace { template <int subwarp_size, bool atomic, typename ValueType, typename IndexType, typename Closure> __device__ void spmv_kernel(const size_type num_rows, const int nwarps_per_row, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, ValueType *__restrict__ c, const size_type c_stride, Closure op) { const auto tidx = static_cast<IndexType>(blockDim.x) * blockIdx.x + threadIdx.x; const IndexType x = tidx / subwarp_size / nwarps_per_row; const auto warp_id = tidx / subwarp_size % nwarps_per_row; const auto y_start = tidx % subwarp_size + num_stored_elements_per_row * warp_id / nwarps_per_row; const auto y_end = num_stored_elements_per_row * (warp_id + 1) / nwarps_per_row; if (x < num_rows) { const auto tile_block = group::tiled_partition<subwarp_size>(group::this_thread_block()); ValueType temp = zero<ValueType>(); const auto column_id = blockIdx.y; for (IndexType idx = y_start; idx < y_end; idx += subwarp_size) { const auto ind = x + idx * stride; const auto col_idx = col[ind]; if (col_idx < idx) { break; } else { temp += val[ind] * b[col_idx * b_stride + column_id]; } } const auto answer = reduce( tile_block, temp, [](ValueType x, ValueType y) { return x + y; }); if (tile_block.thread_rank() == 0) { if (atomic) { atomic_add(&(c[x * c_stride + column_id]), op(answer, c[x * c_stride + column_id])); } else { c[x * c_stride + column_id] = op(answer, c[x * c_stride + column_id]); } } } } template <int subwarp_size, bool atomic = false, typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void spmv( const size_type num_rows, const int nwarps_per_row, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, ValueType *__restrict__ c, const size_type c_stride) { spmv_kernel<subwarp_size, atomic>( num_rows, nwarps_per_row, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [](const ValueType &x, const ValueType &y) { return x; }); } template <int subwarp_size, bool atomic = false, typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void spmv( const size_type num_rows, const int nwarps_per_row, const ValueType *__restrict__ alpha, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, const ValueType *__restrict__ beta, ValueType *__restrict__ c, const size_type c_stride) { const ValueType alpha_val = alpha[0]; const ValueType beta_val = beta[0]; // Because the atomic operation changes the values of c during computation, // it can not do the right alpha * a * b + beta * c operation. // Thus, the cuda kernel only computes alpha * a * b when it uses atomic // operation. if (atomic) { spmv_kernel<subwarp_size, atomic>( num_rows, nwarps_per_row, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [&alpha_val](const ValueType &x, const ValueType &y) { return alpha_val * x; }); } else { spmv_kernel<subwarp_size, atomic>( num_rows, nwarps_per_row, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [&alpha_val, &beta_val](const ValueType &x, const ValueType &y) { return alpha_val * x + beta_val * y; }); } } } // namespace } // namespace kernel namespace { template <int info, typename ValueType, typename IndexType> void abstract_spmv(syn::value_list<int, info>, int nwarps_per_row, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c, const matrix::Dense<ValueType> *alpha = nullptr, const matrix::Dense<ValueType> *beta = nullptr) { const auto nrows = a->get_size()[0]; constexpr int subwarp_size = (info == 0) ? cuda_config::warp_size : info; constexpr bool atomic = (info == 0); const dim3 block_size(default_block_size, 1, 1); const dim3 grid_size( ceildiv(nrows * subwarp_size * nwarps_per_row, block_size.x), b->get_size()[1], 1); if (alpha == nullptr && beta == nullptr) { hipLaunchKernelGGL(( kernel::spmv<subwarp_size, atomic>), dim3(grid_size), dim3(block_size), 0, 0, nrows, nwarps_per_row, as_cuda_type(a->get_const_values()), a->get_const_col_idxs(), a->get_stride(), a->get_num_stored_elements_per_row(), as_cuda_type(b->get_const_values()), b->get_stride(), as_cuda_type(c->get_values()), c->get_stride()); } else if (alpha != nullptr && beta != nullptr) { hipLaunchKernelGGL(( kernel::spmv<subwarp_size, atomic>), dim3(grid_size), dim3(block_size), 0, 0, nrows, nwarps_per_row, as_cuda_type(alpha->get_const_values()), as_cuda_type(a->get_const_values()), a->get_const_col_idxs(), a->get_stride(), a->get_num_stored_elements_per_row(), as_cuda_type(b->get_const_values()), b->get_stride(), as_cuda_type(beta->get_const_values()), as_cuda_type(c->get_values()), c->get_stride()); } else { GKO_KERNEL_NOT_FOUND; } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_abstract_spmv, abstract_spmv); template <typename ValueType, typename IndexType> std::array<int, 3> compute_subwarp_size_and_atomicity( std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *a) { int subwarp_size = 1; int atomic = 0; int nwarps_per_row = 1; const auto nrows = a->get_size()[0]; const auto ell_ncols = a->get_num_stored_elements_per_row(); const auto nwarps = exec->get_num_cores_per_sm() / cuda_config::warp_size * exec->get_num_multiprocessor() * num_threads_per_core; // Use multithreads to perform the reduction on each row when the matrix is // wide. // To make every thread have computation, so pick the value which is the // power of 2 less than warp_size and is less than or equal to ell_ncols. If // the subwarp_size is warp_size and allow more than one warps to work on // the same row, use atomic add to handle the warps write the value into the // same position. The #warps is decided according to the number of warps // allowed on GPU. if (static_cast<double>(ell_ncols) / nrows > ratio) { while (subwarp_size < cuda_config::warp_size && (subwarp_size << 1) <= ell_ncols) { subwarp_size <<= 1; } if (subwarp_size == cuda_config::warp_size) { nwarps_per_row = ::min(ell_ncols / cuda_config::warp_size, nwarps / nrows); nwarps_per_row = ::max(nwarps_per_row, 1); } if (nwarps_per_row > 1) { atomic = 1; } } return {subwarp_size, atomic, nwarps_per_row}; } } // namespace template <typename ValueType, typename IndexType> void spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c) { const auto data = compute_subwarp_size_and_atomicity(exec, a); const int subwarp_size = std::get<0>(data); const int atomic = std::get<1>(data); const int nwarps_per_row = std::get<2>(data); /** * info is the parameter for selecting the cuda kernel. * for info == 0, it uses the kernel by warp_size threads with atomic * operation for other value, it uses the kernel without atomic_add */ const int info = (!atomic) * subwarp_size; if (atomic) { zero_array(c->get_num_stored_elements(), c->get_values()); } select_abstract_spmv( compiled_kernels(), [&info](int compiled_info) { return info == compiled_info; }, syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_ELL_SPMV_KERNEL); template <typename ValueType, typename IndexType> void advanced_spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *alpha, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, const matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *c) { const auto data = compute_subwarp_size_and_atomicity(exec, a); const int subwarp_size = std::get<0>(data); const int atomic = std::get<1>(data); const int nwarps_per_row = std::get<2>(data); /** * info is the parameter for selecting the cuda kernel. * for info == 0, it uses the kernel by warp_size threads with atomic * operation for other value, it uses the kernel without atomic_add */ const int info = (!atomic) * subwarp_size; if (atomic) { dense::scale(exec, beta, c); } select_abstract_spmv( compiled_kernels(), [&info](int compiled_info) { return info == compiled_info; }, syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c, alpha, beta); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_ADVANCED_SPMV_KERNEL); namespace kernel { template <typename ValueType> __global__ __launch_bounds__(cuda_config::max_block_size) void initialize_zero_dense( size_type num_rows, size_type num_cols, size_type stride, ValueType *__restrict__ result) { const auto tidx_x = threadIdx.x + blockDim.x * blockIdx.x; const auto tidx_y = threadIdx.y + blockDim.y * blockIdx.y; if (tidx_x < num_cols && tidx_y < num_rows) { result[tidx_y * stride + tidx_x] = zero<ValueType>(); } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_dense( size_type num_rows, size_type nnz, size_type source_stride, const IndexType *__restrict__ col_idxs, const ValueType *__restrict__ values, size_type result_stride, ValueType *__restrict__ result) { const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; if (tidx < num_rows) { for (auto col = 0; col < nnz; col++) { result[tidx * result_stride + col_idxs[tidx + col * source_stride]] += values[tidx + col * source_stride]; } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_dense(std::shared_ptr<const CudaExecutor> exec, matrix::Dense<ValueType> *result, const matrix::Ell<ValueType, IndexType> *source) { const auto num_rows = result->get_size()[0]; const auto num_cols = result->get_size()[1]; const auto result_stride = result->get_stride(); const auto col_idxs = source->get_const_col_idxs(); const auto vals = source->get_const_values(); const auto source_stride = source->get_stride(); const dim3 block_size(cuda_config::warp_size, cuda_config::max_block_size / cuda_config::warp_size, 1); const dim3 init_grid_dim(ceildiv(result_stride, block_size.x), ceildiv(num_rows, block_size.y), 1); hipLaunchKernelGGL(( kernel::initialize_zero_dense), dim3(init_grid_dim), dim3(block_size), 0, 0, num_rows, num_cols, result_stride, as_cuda_type(result->get_values())); const auto grid_dim = ceildiv(num_rows, default_block_size); hipLaunchKernelGGL(( kernel::fill_in_dense), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, source->get_num_stored_elements_per_row(), source_stride, as_cuda_type(col_idxs), as_cuda_type(vals), result_stride, as_cuda_type(result->get_values())); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CONVERT_TO_DENSE_KERNEL); namespace kernel { template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void count_nnz_per_row( size_type num_rows, size_type max_nnz_per_row, size_type stride, const ValueType *__restrict__ values, IndexType *__restrict__ result) { constexpr auto warp_size = cuda_config::warp_size; const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; const auto row_idx = tidx / warp_size; if (row_idx < num_rows) { IndexType part_result{}; for (auto i = threadIdx.x % warp_size; i < max_nnz_per_row; i += warp_size) { if (values[stride * i + row_idx] != zero<ValueType>()) { part_result += 1; } } auto warp_tile = group::tiled_partition<warp_size>(group::this_thread_block()); result[row_idx] = reduce( warp_tile, part_result, [](const size_type &a, const size_type &b) { return a + b; }); } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_csr( size_type num_rows, size_type max_nnz_per_row, size_type stride, const ValueType *__restrict__ source_values, const IndexType *__restrict__ source_col_idxs, IndexType *__restrict__ result_row_ptrs, IndexType *__restrict__ result_col_idxs, ValueType *__restrict__ result_values) { const auto tidx = threadIdx.x + blockDim.x * blockIdx.x; if (tidx < num_rows) { auto write_to = result_row_ptrs[tidx]; for (auto i = 0; i < max_nnz_per_row; i++) { const auto source_idx = tidx + stride * i; if (source_values[source_idx] != zero<ValueType>()) { result_values[write_to] = source_values[source_idx]; result_col_idxs[write_to] = source_col_idxs[source_idx]; write_to++; } } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_csr(std::shared_ptr<const CudaExecutor> exec, matrix::Csr<ValueType, IndexType> *result, const matrix::Ell<ValueType, IndexType> *source) { auto num_rows = result->get_size()[0]; auto row_ptrs = result->get_row_ptrs(); auto col_idxs = result->get_col_idxs(); auto values = result->get_values(); const auto stride = source->get_stride(); const auto max_nnz_per_row = source->get_num_stored_elements_per_row(); constexpr auto rows_per_block = ceildiv(default_block_size, cuda_config::warp_size); const auto grid_dim_nnz = ceildiv(source->get_size()[0], rows_per_block); hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_dim_nnz), dim3(default_block_size), 0, 0, num_rows, max_nnz_per_row, stride, as_cuda_type(source->get_const_values()), as_cuda_type(row_ptrs)); size_type grid_dim = ceildiv(num_rows + 1, default_block_size); auto add_values = Array<IndexType>(exec, grid_dim); hipLaunchKernelGGL(( start_prefix_sum<default_block_size>) , dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_data())); hipLaunchKernelGGL(( finalize_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_const_data())); hipLaunchKernelGGL(( kernel::fill_in_csr), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, max_nnz_per_row, stride, as_cuda_type(source->get_const_values()), as_cuda_type(source->get_const_col_idxs()), as_cuda_type(row_ptrs), as_cuda_type(col_idxs), as_cuda_type(values)); add_values.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CONVERT_TO_CSR_KERNEL); template <typename ValueType, typename IndexType> void count_nonzeros(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *source, size_type *result) { const auto num_rows = source->get_size()[0]; auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); *result = reduce_add_array(exec, num_rows, nnz_per_row.get_const_data()); nnz_per_row.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_COUNT_NONZEROS_KERNEL); template <typename ValueType, typename IndexType> void calculate_nonzeros_per_row(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *source, Array<size_type> *result) { const auto num_rows = source->get_size()[0]; const auto max_nnz_per_row = source->get_num_stored_elements_per_row(); const auto stride = source->get_stride(); const auto values = source->get_const_values(); const auto warp_size = cuda_config::warp_size; const auto grid_dim = ceildiv(num_rows * warp_size, default_block_size); hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, max_nnz_per_row, stride, as_cuda_type(values), as_cuda_type(result->get_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CALCULATE_NONZEROS_PER_ROW_KERNEL); } // namespace ell } // namespace cuda } // namespace kernels } // namespace gko
20e78a79605056c6d1e78fab401674b63df9fbe0.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/matrix/ell_kernels.hpp" #include <array> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/base/types.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/dense.hpp> #include "core/matrix/dense_kernels.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/cusparse_bindings.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/format_conversion.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/zero_array.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The ELL matrix format namespace. * * @ingroup ell */ namespace ell { constexpr int default_block_size = 512; // TODO: num_threads_per_core and ratio are parameters should be tuned /** * num_threads_per_core is the oversubscribing parameter. There are * `num_threads_per_core` threads assigned to each physical core. */ constexpr int num_threads_per_core = 4; /** * ratio is the parameter to decide when to use threads to do reduction on each * row. (#cols/#rows > ratio) */ constexpr double ratio = 1e-2; /** * A compile-time list of sub-warp sizes for which the spmv kernels should be * compiled. * 0 is a special case where it uses a sub-warp size of warp_size in * combination with atomic_adds. */ using compiled_kernels = syn::value_list<int, 0, 1, 2, 4, 8, 16, 32, cuda_config::warp_size>; namespace kernel { namespace { template <int subwarp_size, bool atomic, typename ValueType, typename IndexType, typename Closure> __device__ void spmv_kernel(const size_type num_rows, const int nwarps_per_row, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, ValueType *__restrict__ c, const size_type c_stride, Closure op) { const auto tidx = static_cast<IndexType>(blockDim.x) * blockIdx.x + threadIdx.x; const IndexType x = tidx / subwarp_size / nwarps_per_row; const auto warp_id = tidx / subwarp_size % nwarps_per_row; const auto y_start = tidx % subwarp_size + num_stored_elements_per_row * warp_id / nwarps_per_row; const auto y_end = num_stored_elements_per_row * (warp_id + 1) / nwarps_per_row; if (x < num_rows) { const auto tile_block = group::tiled_partition<subwarp_size>(group::this_thread_block()); ValueType temp = zero<ValueType>(); const auto column_id = blockIdx.y; for (IndexType idx = y_start; idx < y_end; idx += subwarp_size) { const auto ind = x + idx * stride; const auto col_idx = col[ind]; if (col_idx < idx) { break; } else { temp += val[ind] * b[col_idx * b_stride + column_id]; } } const auto answer = reduce( tile_block, temp, [](ValueType x, ValueType y) { return x + y; }); if (tile_block.thread_rank() == 0) { if (atomic) { atomic_add(&(c[x * c_stride + column_id]), op(answer, c[x * c_stride + column_id])); } else { c[x * c_stride + column_id] = op(answer, c[x * c_stride + column_id]); } } } } template <int subwarp_size, bool atomic = false, typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void spmv( const size_type num_rows, const int nwarps_per_row, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, ValueType *__restrict__ c, const size_type c_stride) { spmv_kernel<subwarp_size, atomic>( num_rows, nwarps_per_row, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [](const ValueType &x, const ValueType &y) { return x; }); } template <int subwarp_size, bool atomic = false, typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void spmv( const size_type num_rows, const int nwarps_per_row, const ValueType *__restrict__ alpha, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, const ValueType *__restrict__ beta, ValueType *__restrict__ c, const size_type c_stride) { const ValueType alpha_val = alpha[0]; const ValueType beta_val = beta[0]; // Because the atomic operation changes the values of c during computation, // it can not do the right alpha * a * b + beta * c operation. // Thus, the cuda kernel only computes alpha * a * b when it uses atomic // operation. if (atomic) { spmv_kernel<subwarp_size, atomic>( num_rows, nwarps_per_row, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [&alpha_val](const ValueType &x, const ValueType &y) { return alpha_val * x; }); } else { spmv_kernel<subwarp_size, atomic>( num_rows, nwarps_per_row, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [&alpha_val, &beta_val](const ValueType &x, const ValueType &y) { return alpha_val * x + beta_val * y; }); } } } // namespace } // namespace kernel namespace { template <int info, typename ValueType, typename IndexType> void abstract_spmv(syn::value_list<int, info>, int nwarps_per_row, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c, const matrix::Dense<ValueType> *alpha = nullptr, const matrix::Dense<ValueType> *beta = nullptr) { const auto nrows = a->get_size()[0]; constexpr int subwarp_size = (info == 0) ? cuda_config::warp_size : info; constexpr bool atomic = (info == 0); const dim3 block_size(default_block_size, 1, 1); const dim3 grid_size( ceildiv(nrows * subwarp_size * nwarps_per_row, block_size.x), b->get_size()[1], 1); if (alpha == nullptr && beta == nullptr) { kernel::spmv<subwarp_size, atomic><<<grid_size, block_size, 0, 0>>>( nrows, nwarps_per_row, as_cuda_type(a->get_const_values()), a->get_const_col_idxs(), a->get_stride(), a->get_num_stored_elements_per_row(), as_cuda_type(b->get_const_values()), b->get_stride(), as_cuda_type(c->get_values()), c->get_stride()); } else if (alpha != nullptr && beta != nullptr) { kernel::spmv<subwarp_size, atomic><<<grid_size, block_size, 0, 0>>>( nrows, nwarps_per_row, as_cuda_type(alpha->get_const_values()), as_cuda_type(a->get_const_values()), a->get_const_col_idxs(), a->get_stride(), a->get_num_stored_elements_per_row(), as_cuda_type(b->get_const_values()), b->get_stride(), as_cuda_type(beta->get_const_values()), as_cuda_type(c->get_values()), c->get_stride()); } else { GKO_KERNEL_NOT_FOUND; } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_abstract_spmv, abstract_spmv); template <typename ValueType, typename IndexType> std::array<int, 3> compute_subwarp_size_and_atomicity( std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *a) { int subwarp_size = 1; int atomic = 0; int nwarps_per_row = 1; const auto nrows = a->get_size()[0]; const auto ell_ncols = a->get_num_stored_elements_per_row(); const auto nwarps = exec->get_num_cores_per_sm() / cuda_config::warp_size * exec->get_num_multiprocessor() * num_threads_per_core; // Use multithreads to perform the reduction on each row when the matrix is // wide. // To make every thread have computation, so pick the value which is the // power of 2 less than warp_size and is less than or equal to ell_ncols. If // the subwarp_size is warp_size and allow more than one warps to work on // the same row, use atomic add to handle the warps write the value into the // same position. The #warps is decided according to the number of warps // allowed on GPU. if (static_cast<double>(ell_ncols) / nrows > ratio) { while (subwarp_size < cuda_config::warp_size && (subwarp_size << 1) <= ell_ncols) { subwarp_size <<= 1; } if (subwarp_size == cuda_config::warp_size) { nwarps_per_row = std::min(ell_ncols / cuda_config::warp_size, nwarps / nrows); nwarps_per_row = std::max(nwarps_per_row, 1); } if (nwarps_per_row > 1) { atomic = 1; } } return {subwarp_size, atomic, nwarps_per_row}; } } // namespace template <typename ValueType, typename IndexType> void spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c) { const auto data = compute_subwarp_size_and_atomicity(exec, a); const int subwarp_size = std::get<0>(data); const int atomic = std::get<1>(data); const int nwarps_per_row = std::get<2>(data); /** * info is the parameter for selecting the cuda kernel. * for info == 0, it uses the kernel by warp_size threads with atomic * operation for other value, it uses the kernel without atomic_add */ const int info = (!atomic) * subwarp_size; if (atomic) { zero_array(c->get_num_stored_elements(), c->get_values()); } select_abstract_spmv( compiled_kernels(), [&info](int compiled_info) { return info == compiled_info; }, syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_ELL_SPMV_KERNEL); template <typename ValueType, typename IndexType> void advanced_spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *alpha, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, const matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *c) { const auto data = compute_subwarp_size_and_atomicity(exec, a); const int subwarp_size = std::get<0>(data); const int atomic = std::get<1>(data); const int nwarps_per_row = std::get<2>(data); /** * info is the parameter for selecting the cuda kernel. * for info == 0, it uses the kernel by warp_size threads with atomic * operation for other value, it uses the kernel without atomic_add */ const int info = (!atomic) * subwarp_size; if (atomic) { dense::scale(exec, beta, c); } select_abstract_spmv( compiled_kernels(), [&info](int compiled_info) { return info == compiled_info; }, syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c, alpha, beta); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_ADVANCED_SPMV_KERNEL); namespace kernel { template <typename ValueType> __global__ __launch_bounds__(cuda_config::max_block_size) void initialize_zero_dense( size_type num_rows, size_type num_cols, size_type stride, ValueType *__restrict__ result) { const auto tidx_x = threadIdx.x + blockDim.x * blockIdx.x; const auto tidx_y = threadIdx.y + blockDim.y * blockIdx.y; if (tidx_x < num_cols && tidx_y < num_rows) { result[tidx_y * stride + tidx_x] = zero<ValueType>(); } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_dense( size_type num_rows, size_type nnz, size_type source_stride, const IndexType *__restrict__ col_idxs, const ValueType *__restrict__ values, size_type result_stride, ValueType *__restrict__ result) { const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; if (tidx < num_rows) { for (auto col = 0; col < nnz; col++) { result[tidx * result_stride + col_idxs[tidx + col * source_stride]] += values[tidx + col * source_stride]; } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_dense(std::shared_ptr<const CudaExecutor> exec, matrix::Dense<ValueType> *result, const matrix::Ell<ValueType, IndexType> *source) { const auto num_rows = result->get_size()[0]; const auto num_cols = result->get_size()[1]; const auto result_stride = result->get_stride(); const auto col_idxs = source->get_const_col_idxs(); const auto vals = source->get_const_values(); const auto source_stride = source->get_stride(); const dim3 block_size(cuda_config::warp_size, cuda_config::max_block_size / cuda_config::warp_size, 1); const dim3 init_grid_dim(ceildiv(result_stride, block_size.x), ceildiv(num_rows, block_size.y), 1); kernel::initialize_zero_dense<<<init_grid_dim, block_size>>>( num_rows, num_cols, result_stride, as_cuda_type(result->get_values())); const auto grid_dim = ceildiv(num_rows, default_block_size); kernel::fill_in_dense<<<grid_dim, default_block_size>>>( num_rows, source->get_num_stored_elements_per_row(), source_stride, as_cuda_type(col_idxs), as_cuda_type(vals), result_stride, as_cuda_type(result->get_values())); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CONVERT_TO_DENSE_KERNEL); namespace kernel { template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void count_nnz_per_row( size_type num_rows, size_type max_nnz_per_row, size_type stride, const ValueType *__restrict__ values, IndexType *__restrict__ result) { constexpr auto warp_size = cuda_config::warp_size; const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; const auto row_idx = tidx / warp_size; if (row_idx < num_rows) { IndexType part_result{}; for (auto i = threadIdx.x % warp_size; i < max_nnz_per_row; i += warp_size) { if (values[stride * i + row_idx] != zero<ValueType>()) { part_result += 1; } } auto warp_tile = group::tiled_partition<warp_size>(group::this_thread_block()); result[row_idx] = reduce( warp_tile, part_result, [](const size_type &a, const size_type &b) { return a + b; }); } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_csr( size_type num_rows, size_type max_nnz_per_row, size_type stride, const ValueType *__restrict__ source_values, const IndexType *__restrict__ source_col_idxs, IndexType *__restrict__ result_row_ptrs, IndexType *__restrict__ result_col_idxs, ValueType *__restrict__ result_values) { const auto tidx = threadIdx.x + blockDim.x * blockIdx.x; if (tidx < num_rows) { auto write_to = result_row_ptrs[tidx]; for (auto i = 0; i < max_nnz_per_row; i++) { const auto source_idx = tidx + stride * i; if (source_values[source_idx] != zero<ValueType>()) { result_values[write_to] = source_values[source_idx]; result_col_idxs[write_to] = source_col_idxs[source_idx]; write_to++; } } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_csr(std::shared_ptr<const CudaExecutor> exec, matrix::Csr<ValueType, IndexType> *result, const matrix::Ell<ValueType, IndexType> *source) { auto num_rows = result->get_size()[0]; auto row_ptrs = result->get_row_ptrs(); auto col_idxs = result->get_col_idxs(); auto values = result->get_values(); const auto stride = source->get_stride(); const auto max_nnz_per_row = source->get_num_stored_elements_per_row(); constexpr auto rows_per_block = ceildiv(default_block_size, cuda_config::warp_size); const auto grid_dim_nnz = ceildiv(source->get_size()[0], rows_per_block); kernel::count_nnz_per_row<<<grid_dim_nnz, default_block_size>>>( num_rows, max_nnz_per_row, stride, as_cuda_type(source->get_const_values()), as_cuda_type(row_ptrs)); size_type grid_dim = ceildiv(num_rows + 1, default_block_size); auto add_values = Array<IndexType>(exec, grid_dim); start_prefix_sum<default_block_size> <<<grid_dim, default_block_size>>>(num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_data())); finalize_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>( num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_const_data())); kernel::fill_in_csr<<<grid_dim, default_block_size>>>( num_rows, max_nnz_per_row, stride, as_cuda_type(source->get_const_values()), as_cuda_type(source->get_const_col_idxs()), as_cuda_type(row_ptrs), as_cuda_type(col_idxs), as_cuda_type(values)); add_values.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CONVERT_TO_CSR_KERNEL); template <typename ValueType, typename IndexType> void count_nonzeros(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *source, size_type *result) { const auto num_rows = source->get_size()[0]; auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); *result = reduce_add_array(exec, num_rows, nnz_per_row.get_const_data()); nnz_per_row.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_COUNT_NONZEROS_KERNEL); template <typename ValueType, typename IndexType> void calculate_nonzeros_per_row(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *source, Array<size_type> *result) { const auto num_rows = source->get_size()[0]; const auto max_nnz_per_row = source->get_num_stored_elements_per_row(); const auto stride = source->get_stride(); const auto values = source->get_const_values(); const auto warp_size = cuda_config::warp_size; const auto grid_dim = ceildiv(num_rows * warp_size, default_block_size); kernel::count_nnz_per_row<<<grid_dim, default_block_size>>>( num_rows, max_nnz_per_row, stride, as_cuda_type(values), as_cuda_type(result->get_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CALCULATE_NONZEROS_PER_ROW_KERNEL); } // namespace ell } // namespace cuda } // namespace kernels } // namespace gko
7b3fbeccd34bc1e2c7d087350894509435e92dd9.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorMathCompareT.cuh" #include "THHTensor.hpp" #include "THHStream.h" #include "../generic/THCTensorMathCompareT.cu" #include "../THCGenerateFloatType.h"
7b3fbeccd34bc1e2c7d087350894509435e92dd9.cu
#include "../THCTensorMathCompareT.cuh" #include "THCTensor.hpp" #include "THCStream.h" #include "../generic/THCTensorMathCompareT.cu" #include "../THCGenerateFloatType.h"
477ad75aa8ba873f3ad3392312ce753bc6575972.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "cudaUtility.h" // gpuPreImageNet __global__ void gpuPreImageNet( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int n = oWidth * oHeight; if( x >= oWidth || y >= oHeight ) return; const int dx = ((float)x * scale.x); const int dy = ((float)y * scale.y); const float4 px = input[ dy * iWidth + dx ]; const float3 bgr = make_float3(px.z, px.y, px.x); output[n * 0 + y * oWidth + x] = bgr.x; output[n * 1 + y * oWidth + x] = bgr.y; output[n * 2 + y * oWidth + x] = bgr.z; } // cudaPreImageNet hipError_t cudaPreImageNet( float4* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight, hipStream_t stream ) { if( !input || !output ) return hipErrorInvalidDevicePointer; if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ) return hipErrorInvalidValue; const float2 scale = make_float2( float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight) ); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y)); hipLaunchKernelGGL(( gpuPreImageNet), dim3(gridDim), dim3(blockDim), 0, stream, scale, input, inputWidth, output, outputWidth, outputHeight); return CUDA(hipGetLastError()); } // gpuPreImageNetMean __global__ void gpuPreImageNetMean( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float3 mean_value ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int n = oWidth * oHeight; if( x >= oWidth || y >= oHeight ) return; const int dx = ((float)x * scale.x); const int dy = ((float)y * scale.y); const float4 px = input[ dy * iWidth + dx ]; const float3 bgr = make_float3(px.z - mean_value.x, px.y - mean_value.y, px.x - mean_value.z); output[n * 0 + y * oWidth + x] = bgr.x; output[n * 1 + y * oWidth + x] = bgr.y; output[n * 2 + y * oWidth + x] = bgr.z; } // cudaPreImageNetMean hipError_t cudaPreImageNetMean( float4* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight, const float3& mean_value, hipStream_t stream ) { if( !input || !output ) return hipErrorInvalidDevicePointer; if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ) return hipErrorInvalidValue; const float2 scale = make_float2( float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight) ); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y)); hipLaunchKernelGGL(( gpuPreImageNetMean), dim3(gridDim), dim3(blockDim), 0, stream, scale, input, inputWidth, output, outputWidth, outputHeight, mean_value); return CUDA(hipGetLastError()); }
477ad75aa8ba873f3ad3392312ce753bc6575972.cu
/* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "cudaUtility.h" // gpuPreImageNet __global__ void gpuPreImageNet( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int n = oWidth * oHeight; if( x >= oWidth || y >= oHeight ) return; const int dx = ((float)x * scale.x); const int dy = ((float)y * scale.y); const float4 px = input[ dy * iWidth + dx ]; const float3 bgr = make_float3(px.z, px.y, px.x); output[n * 0 + y * oWidth + x] = bgr.x; output[n * 1 + y * oWidth + x] = bgr.y; output[n * 2 + y * oWidth + x] = bgr.z; } // cudaPreImageNet cudaError_t cudaPreImageNet( float4* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight, cudaStream_t stream ) { if( !input || !output ) return cudaErrorInvalidDevicePointer; if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ) return cudaErrorInvalidValue; const float2 scale = make_float2( float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight) ); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y)); gpuPreImageNet<<<gridDim, blockDim, 0, stream>>>(scale, input, inputWidth, output, outputWidth, outputHeight); return CUDA(cudaGetLastError()); } // gpuPreImageNetMean __global__ void gpuPreImageNetMean( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float3 mean_value ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int n = oWidth * oHeight; if( x >= oWidth || y >= oHeight ) return; const int dx = ((float)x * scale.x); const int dy = ((float)y * scale.y); const float4 px = input[ dy * iWidth + dx ]; const float3 bgr = make_float3(px.z - mean_value.x, px.y - mean_value.y, px.x - mean_value.z); output[n * 0 + y * oWidth + x] = bgr.x; output[n * 1 + y * oWidth + x] = bgr.y; output[n * 2 + y * oWidth + x] = bgr.z; } // cudaPreImageNetMean cudaError_t cudaPreImageNetMean( float4* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight, const float3& mean_value, cudaStream_t stream ) { if( !input || !output ) return cudaErrorInvalidDevicePointer; if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ) return cudaErrorInvalidValue; const float2 scale = make_float2( float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight) ); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y)); gpuPreImageNetMean<<<gridDim, blockDim, 0, stream>>>(scale, input, inputWidth, output, outputWidth, outputHeight, mean_value); return CUDA(cudaGetLastError()); }
8af4b5e6e7cbda37162140ad149d94987bb0363b.hip
// !!! This is a file automatically generated by hipify!!! #include <sys/time.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <hip/hip_runtime_api.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> //#include <hip/hip_runtime.h> //#include <helper_cuda.h> // time stamp function in seconds double getTimeStamp() { struct timeval tv ; gettimeofday( &tv, NULL ) ; return (double) tv.tv_usec/1000000 + tv.tv_sec ; } // host side matrix addition void h_addmat(float *A, float *B, float *C, int nx, int ny){ for (int i =0;i<nx;i++){ for(int j=0;j<ny;j++){ C[i*ny+j] = A[i*ny+j]+B[i*ny+j]; } } return; } // device-side matrix addition __global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ // kernel code might look something like this // but you may want to pad the matrices and index into them accordingly int ix = threadIdx.x + blockIdx.x*blockDim.x ; int iy = threadIdx.y + blockIdx.y*blockDim.y ; int idx = ix*ny + iy ; //iy*ny + ix previously with <= instead of = if( (ix<nx) && (iy<ny) ) C[idx] = A[idx] + B[idx] ; } void initData(float *M, long x, long y, int flag ){ //remove and put it in main assigining values in a single lool if(flag) { printf("A\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*y+j] = (float)(i+j)/3.0; //printf("%f ",M[i*y+j]); } //printf("\n"); } } else { printf("B\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*y+j] = (float)3.14*(i+j) ; //printf("%f ",M[i*y+j]); } //printf("\n"); } } } int main( int argc, char *argv[] ) { // get program arguments if (argc!=3){ printf("Fail"); exit(1); //printf("Fail"); } int nx = atoi( argv[1] ) ; // should check validity int ny = atoi( argv[2] ) ; // should check validity int noElems = nx*ny ; int bytes = noElems * sizeof(float) ; // but you may want to pad the matrices // alloc memory host-side float *h_A = (float *) malloc( bytes ) ; float *h_B = (float *) malloc( bytes ) ; float *h_hC = (float *) malloc( bytes ) ; // host result float *h_dC = (float *) malloc( bytes ) ; // gpu result // init matrices with random data initData(h_A,nx,ny,1); initData(h_B,nx,ny,0); // alloc memory dev-side float *d_A, *d_B, *d_C ; hipMalloc( (void **) &d_A, bytes ) ; hipMalloc( (void **) &d_B, bytes ) ; hipMalloc( (void **) &d_C, bytes ) ; double timeStampA = getTimeStamp() ; //transfer data to dev hipMemcpy( d_A, h_A, bytes, hipMemcpyHostToDevice ) ; hipMemcpy( d_B, h_B, bytes, hipMemcpyHostToDevice ) ; // note that the transfers would be twice as fast if h_A and h_B // matrices are pinned double timeStampB = getTimeStamp() ; // invoke Kernel dim3 block( 16, 16) ; // you will want to configure this dim3 grid( (nx + block.x-1)/block.x, (ny + block.y-1)/block.y) ; printf("%d\n",(ny+block.y-1)/block.y); hipLaunchKernelGGL(( f_addmat), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nx, ny ) ; hipDeviceSynchronize() ; double timeStampC = getTimeStamp() ; //copy data back hipMemcpyAsync(h_dC, d_C, bytes, hipMemcpyDeviceToHost); //learn how to comment and uncomment in one go /* printf("C\n"); for(int i=0;i<nx;i++){ for(int j=0;j<ny;j++){ //printf("%f ",h_dC[i*ny+j]); } //printf("\n"); } */ double timeStampD = getTimeStamp() ; //for(int i=0; i<nx; i++){ // for(int j=0; j<ny; j++){ // printf("%f ",h_dC[i*ny+j]); // } // printf("\n"); //} // free GPU resources hipFree( d_A ) ; hipFree( d_B ) ; hipFree( d_C ) ; hipDeviceReset() ; // check result printf("%f %f %f %f\n",(timeStampD-timeStampA),(timeStampB-timeStampA),(timeStampC-timeStampB),(timeStampD-timeStampC)); h_addmat( h_A, h_B, h_hC, nx, ny ) ; int flag = 0; for(int i=0;i<nx;i++){ for(int j=0;j<ny;j++){ if(h_hC[i*ny+j] != h_dC[i*ny+j]) flag=1; } } printf("\n %d \n",flag); // print out results }
8af4b5e6e7cbda37162140ad149d94987bb0363b.cu
#include <sys/time.h> #include <cuda.h> #include <stdio.h> #include <cuda_runtime_api.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> //#include <cuda.h> //#include <helper_cuda.h> // time stamp function in seconds double getTimeStamp() { struct timeval tv ; gettimeofday( &tv, NULL ) ; return (double) tv.tv_usec/1000000 + tv.tv_sec ; } // host side matrix addition void h_addmat(float *A, float *B, float *C, int nx, int ny){ for (int i =0;i<nx;i++){ for(int j=0;j<ny;j++){ C[i*ny+j] = A[i*ny+j]+B[i*ny+j]; } } return; } // device-side matrix addition __global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ // kernel code might look something like this // but you may want to pad the matrices and index into them accordingly int ix = threadIdx.x + blockIdx.x*blockDim.x ; int iy = threadIdx.y + blockIdx.y*blockDim.y ; int idx = ix*ny + iy ; //iy*ny + ix previously with <= instead of = if( (ix<nx) && (iy<ny) ) C[idx] = A[idx] + B[idx] ; } void initData(float *M, long x, long y, int flag ){ //remove and put it in main assigining values in a single lool if(flag) { printf("A\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*y+j] = (float)(i+j)/3.0; //printf("%f ",M[i*y+j]); } //printf("\n"); } } else { printf("B\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*y+j] = (float)3.14*(i+j) ; //printf("%f ",M[i*y+j]); } //printf("\n"); } } } int main( int argc, char *argv[] ) { // get program arguments if (argc!=3){ printf("Fail"); exit(1); //printf("Fail"); } int nx = atoi( argv[1] ) ; // should check validity int ny = atoi( argv[2] ) ; // should check validity int noElems = nx*ny ; int bytes = noElems * sizeof(float) ; // but you may want to pad the matrices… // alloc memory host-side float *h_A = (float *) malloc( bytes ) ; float *h_B = (float *) malloc( bytes ) ; float *h_hC = (float *) malloc( bytes ) ; // host result float *h_dC = (float *) malloc( bytes ) ; // gpu result // init matrices with random data initData(h_A,nx,ny,1); initData(h_B,nx,ny,0); // alloc memory dev-side float *d_A, *d_B, *d_C ; cudaMalloc( (void **) &d_A, bytes ) ; cudaMalloc( (void **) &d_B, bytes ) ; cudaMalloc( (void **) &d_C, bytes ) ; double timeStampA = getTimeStamp() ; //transfer data to dev cudaMemcpy( d_A, h_A, bytes, cudaMemcpyHostToDevice ) ; cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice ) ; // note that the transfers would be twice as fast if h_A and h_B // matrices are pinned double timeStampB = getTimeStamp() ; // invoke Kernel dim3 block( 16, 16) ; // you will want to configure this dim3 grid( (nx + block.x-1)/block.x, (ny + block.y-1)/block.y) ; printf("%d\n",(ny+block.y-1)/block.y); f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny ) ; cudaDeviceSynchronize() ; double timeStampC = getTimeStamp() ; //copy data back cudaMemcpyAsync(h_dC, d_C, bytes, cudaMemcpyDeviceToHost); //learn how to comment and uncomment in one go /* printf("C\n"); for(int i=0;i<nx;i++){ for(int j=0;j<ny;j++){ //printf("%f ",h_dC[i*ny+j]); } //printf("\n"); } */ double timeStampD = getTimeStamp() ; //for(int i=0; i<nx; i++){ // for(int j=0; j<ny; j++){ // printf("%f ",h_dC[i*ny+j]); // } // printf("\n"); //} // free GPU resources cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ; cudaDeviceReset() ; // check result printf("%f %f %f %f\n",(timeStampD-timeStampA),(timeStampB-timeStampA),(timeStampC-timeStampB),(timeStampD-timeStampC)); h_addmat( h_A, h_B, h_hC, nx, ny ) ; int flag = 0; for(int i=0;i<nx;i++){ for(int j=0;j<ny;j++){ if(h_hC[i*ny+j] != h_dC[i*ny+j]) flag=1; } } printf("\n %d \n",flag); // print out results }
7f6eee057bd4280186cc646d17a0e09d5e24d9e5.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2019-2023 by XGBoost Contributors */ #include <gtest/gtest.h> #include <thrust/device_vector.h> #include <xgboost/c_api.h> #include <xgboost/data.h> #include <algorithm> #include <cmath> #include "../../../include/xgboost/logging.h" #include "../../../src/common/device_helpers.cuh" #include "../../../src/common/hist_util.cuh" #include "../../../src/common/hist_util.h" #include "../../../src/common/math.h" #include "../../../src/data/device_adapter.cuh" #include "../../../src/data/simple_dmatrix.h" #include "../data/test_array_interface.h" #include "../filesystem.h" // dmlc::TemporaryDirectory #include "../helpers.h" #include "test_hist_util.h" namespace xgboost { namespace common { template <typename AdapterT> HistogramCuts GetHostCuts(Context const* ctx, AdapterT* adapter, int num_bins, float missing) { data::SimpleDMatrix dmat(adapter, missing, 1); HistogramCuts cuts = SketchOnDMatrix(ctx, &dmat, num_bins); return cuts; } TEST(HistUtil, DeviceSketch) { int num_columns = 1; int num_bins = 4; std::vector<float> x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 7.0f, -1.0f}; int num_rows = x.size(); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto device_cuts = DeviceSketch(0, dmat.get(), num_bins); Context ctx; HistogramCuts host_cuts = SketchOnDMatrix(&ctx, dmat.get(), num_bins); EXPECT_EQ(device_cuts.Values(), host_cuts.Values()); EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs()); EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues()); } TEST(HistUtil, SketchBatchNumElements) { #if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1 LOG(WARNING) << "Test not runnable with RMM enabled."; return; #endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1 size_t constexpr kCols = 10000; int device; dh::safe_cuda(hipGetDevice(&device)); auto avail = static_cast<size_t>(dh::AvailableMemory(device) * 0.8); auto per_elem = detail::BytesPerElement(false); auto avail_elem = avail / per_elem; size_t rows = avail_elem / kCols * 10; auto batch = detail::SketchBatchNumElements(0, rows, kCols, rows * kCols, device, 256, false); ASSERT_EQ(batch, avail_elem); } TEST(HistUtil, DeviceSketchMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); dh::GlobalMemoryLogger().Clear(); ConsoleLogger::Configure({{"verbosity", "3"}}); auto device_cuts = DeviceSketch(0, dmat.get(), num_bins); size_t bytes_required = detail::RequiredMemory( num_rows, num_columns, num_rows * num_columns, num_bins, false); EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05); EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95); ConsoleLogger::Configure({{"verbosity", "0"}}); } TEST(HistUtil, DeviceSketchWeightsMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows); dh::GlobalMemoryLogger().Clear(); ConsoleLogger::Configure({{"verbosity", "3"}}); auto device_cuts = DeviceSketch(0, dmat.get(), num_bins); ConsoleLogger::Configure({{"verbosity", "0"}}); size_t bytes_required = detail::RequiredMemory( num_rows, num_columns, num_rows * num_columns, num_bins, true); EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05); EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required); } TEST(HistUtil, DeviceSketchDeterminism) { int num_rows = 500; int num_columns = 5; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto reference_sketch = DeviceSketch(0, dmat.get(), num_bins); size_t constexpr kRounds{ 100 }; for (size_t r = 0; r < kRounds; ++r) { auto new_sketch = DeviceSketch(0, dmat.get(), num_bins); ASSERT_EQ(reference_sketch.Values(), new_sketch.Values()); ASSERT_EQ(reference_sketch.MinValues(), new_sketch.MinValues()); } } TEST(HistUtil, DeviceSketchCategoricalAsNumeric) { int categorical_sizes[] = {2, 6, 8, 12}; int num_bins = 256; int sizes[] = {25, 100, 1000}; for (auto n : sizes) { for (auto num_categories : categorical_sizes) { auto x = GenerateRandomCategoricalSingleColumn(n, num_categories); auto dmat = GetDMatrixFromData(x, n, 1); auto cuts = DeviceSketch(0, dmat.get(), num_bins); ValidateCuts(cuts, dmat.get(), num_bins); } } } TEST(HistUtil, DeviceSketchCategoricalFeatures) { TestCategoricalSketch(1000, 256, 32, false, [](DMatrix *p_fmat, int32_t num_bins) { return DeviceSketch(0, p_fmat, num_bins); }); TestCategoricalSketch(1000, 256, 32, true, [](DMatrix *p_fmat, int32_t num_bins) { return DeviceSketch(0, p_fmat, num_bins); }); } void TestMixedSketch() { size_t n_samples = 1000, n_features = 2, n_categories = 3; std::vector<float> data(n_samples * n_features); SimpleLCG gen; SimpleRealUniformDistribution<float> cat_d{0.0f, static_cast<float>(n_categories)}; SimpleRealUniformDistribution<float> num_d{0.0f, 3.0f}; for (size_t i = 0; i < n_samples * n_features; ++i) { if (i % 2 == 0) { data[i] = ::floor(cat_d(&gen)); } else { data[i] = num_d(&gen); } } auto m = GetDMatrixFromData(data, n_samples, n_features); m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical); m->Info().feature_types.HostVector().push_back(FeatureType::kNumerical); auto cuts = DeviceSketch(0, m.get(), 64); ASSERT_EQ(cuts.Values().size(), 64 + n_categories); } TEST(HistUtil, DeviceSketchMixedFeatures) { TestMixedSketch(); } TEST(HistUtil, DeviceSketchMultipleColumns) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); for (auto num_bins : bin_sizes) { auto cuts = DeviceSketch(0, dmat.get(), num_bins); ValidateCuts(cuts, dmat.get(), num_bins); } } } TEST(HistUtil, DeviceSketchMultipleColumnsWeights) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows); for (auto num_bins : bin_sizes) { auto cuts = DeviceSketch(0, dmat.get(), num_bins); ValidateCuts(cuts, dmat.get(), num_bins); } } } TEST(HistUitl, DeviceSketchWeights) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto weighted_dmat = GetDMatrixFromData(x, num_rows, num_columns); auto& h_weights = weighted_dmat->Info().weights_.HostVector(); h_weights.resize(num_rows); std::fill(h_weights.begin(), h_weights.end(), 1.0f); for (auto num_bins : bin_sizes) { auto cuts = DeviceSketch(0, dmat.get(), num_bins); auto wcuts = DeviceSketch(0, weighted_dmat.get(), num_bins); ASSERT_EQ(cuts.MinValues(), wcuts.MinValues()); ASSERT_EQ(cuts.Ptrs(), wcuts.Ptrs()); ASSERT_EQ(cuts.Values(), wcuts.Values()); ValidateCuts(cuts, dmat.get(), num_bins); ValidateCuts(wcuts, weighted_dmat.get(), num_bins); } } } TEST(HistUtil, DeviceSketchBatches) { int num_bins = 256; int num_rows = 5000; int batch_sizes[] = {0, 100, 1500, 6000}; int num_columns = 5; for (auto batch_size : batch_sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto cuts = DeviceSketch(0, dmat.get(), num_bins, batch_size); ValidateCuts(cuts, dmat.get(), num_bins); } num_rows = 1000; size_t batches = 16; auto x = GenerateRandom(num_rows * batches, num_columns); auto dmat = GetDMatrixFromData(x, num_rows * batches, num_columns); auto cuts_with_batches = DeviceSketch(0, dmat.get(), num_bins, num_rows); auto cuts = DeviceSketch(0, dmat.get(), num_bins, 0); auto const& cut_values_batched = cuts_with_batches.Values(); auto const& cut_values = cuts.Values(); CHECK_EQ(cut_values.size(), cut_values_batched.size()); for (size_t i = 0; i < cut_values.size(); ++i) { ASSERT_NEAR(cut_values_batched[i], cut_values[i], 1e5); } } TEST(HistUtil, DeviceSketchMultipleColumnsExternal) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns =5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); dmlc::TemporaryDirectory temp; auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp); for (auto num_bins : bin_sizes) { auto cuts = DeviceSketch(0, dmat.get(), num_bins); ValidateCuts(cuts, dmat.get(), num_bins); } } } // See https://github.com/dmlc/xgboost/issues/5866. TEST(HistUtil, DeviceSketchExternalMemoryWithWeights) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; dmlc::TemporaryDirectory temp; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp); dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows); for (auto num_bins : bin_sizes) { auto cuts = DeviceSketch(0, dmat.get(), num_bins); ValidateCuts(cuts, dmat.get(), num_bins); } } } template <typename Adapter> auto MakeUnweightedCutsForTest(Adapter adapter, int32_t num_bins, float missing, size_t batch_size = 0) { common::HistogramCuts batched_cuts; HostDeviceVector<FeatureType> ft; SketchContainer sketch_container(ft, num_bins, adapter.NumColumns(), adapter.NumRows(), 0); MetaInfo info; AdapterDeviceSketch(adapter.Value(), num_bins, info, missing, &sketch_container, batch_size); sketch_container.MakeCuts(&batched_cuts); return batched_cuts; } template <typename Adapter> void ValidateBatchedCuts(Adapter adapter, int num_bins, DMatrix* dmat, size_t batch_size = 0) { common::HistogramCuts batched_cuts = MakeUnweightedCutsForTest( adapter, num_bins, std::numeric_limits<float>::quiet_NaN(), batch_size); ValidateCuts(batched_cuts, dmat, num_bins); } TEST(HistUtil, AdapterDeviceSketch) { int rows = 5; int cols = 1; int num_bins = 4; float missing = - 1.0; thrust::device_vector< float> data(rows*cols); auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data); data = std::vector<float >{ 1.0,2.0,3.0,4.0,5.0 }; std::string str; Json::Dump(json_array_interface, &str); data::CupyAdapter adapter(str); auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing); Context ctx; auto host_cuts = GetHostCuts(&ctx, &adapter, num_bins, missing); EXPECT_EQ(device_cuts.Values(), host_cuts.Values()); EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs()); EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues()); } TEST(HistUtil, AdapterDeviceSketchMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, num_rows, num_columns); dh::GlobalMemoryLogger().Clear(); ConsoleLogger::Configure({{"verbosity", "3"}}); auto cuts = MakeUnweightedCutsForTest(adapter, num_bins, std::numeric_limits<float>::quiet_NaN()); ConsoleLogger::Configure({{"verbosity", "0"}}); size_t bytes_required = detail::RequiredMemory( num_rows, num_columns, num_rows * num_columns, num_bins, false); EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05); EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95); } TEST(HistUtil, AdapterSketchSlidingWindowMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, num_rows, num_columns); MetaInfo info; dh::GlobalMemoryLogger().Clear(); ConsoleLogger::Configure({{"verbosity", "3"}}); common::HistogramCuts batched_cuts; HostDeviceVector<FeatureType> ft; SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0); AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(), &sketch_container); HistogramCuts cuts; sketch_container.MakeCuts(&cuts); size_t bytes_required = detail::RequiredMemory( num_rows, num_columns, num_rows * num_columns, num_bins, false); EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05); EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95); ConsoleLogger::Configure({{"verbosity", "0"}}); } TEST(HistUtil, AdapterSketchSlidingWindowWeightedMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, num_rows, num_columns); MetaInfo info; auto& h_weights = info.weights_.HostVector(); h_weights.resize(num_rows); std::fill(h_weights.begin(), h_weights.end(), 1.0f); dh::GlobalMemoryLogger().Clear(); ConsoleLogger::Configure({{"verbosity", "3"}}); common::HistogramCuts batched_cuts; HostDeviceVector<FeatureType> ft; SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0); AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(), &sketch_container); HistogramCuts cuts; sketch_container.MakeCuts(&cuts); ConsoleLogger::Configure({{"verbosity", "0"}}); size_t bytes_required = detail::RequiredMemory( num_rows, num_columns, num_rows * num_columns, num_bins, true); EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05); EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required); } void TestCategoricalSketchAdapter(size_t n, size_t num_categories, int32_t num_bins, bool weighted) { auto h_x = GenerateRandomCategoricalSingleColumn(n, num_categories); thrust::device_vector<float> x(h_x); auto adapter = AdapterFromData(x, n, 1); MetaInfo info; info.num_row_ = n; info.num_col_ = 1; info.feature_types.HostVector().push_back(FeatureType::kCategorical); if (weighted) { std::vector<float> weights(n, 0); SimpleLCG lcg; SimpleRealUniformDistribution<float> dist(0, 1); for (auto& v : weights) { v = dist(&lcg); } info.weights_.HostVector() = weights; } ASSERT_EQ(info.feature_types.Size(), 1); SketchContainer container(info.feature_types, num_bins, 1, n, 0); AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(), &container); HistogramCuts cuts; container.MakeCuts(&cuts); thrust::sort(x.begin(), x.end()); auto n_uniques = thrust::unique(x.begin(), x.end()) - x.begin(); ASSERT_NE(n_uniques, x.size()); ASSERT_EQ(cuts.TotalBins(), n_uniques); ASSERT_EQ(n_uniques, num_categories); auto& values = cuts.cut_values_.HostVector(); ASSERT_TRUE(std::is_sorted(values.cbegin(), values.cend())); auto is_unique = (std::unique(values.begin(), values.end()) - values.begin()) == n_uniques; ASSERT_TRUE(is_unique); x.resize(n_uniques); h_x.resize(n_uniques); thrust::copy(x.begin(), x.end(), h_x.begin()); for (decltype(n_uniques) i = 0; i < n_uniques; ++i) { ASSERT_EQ(h_x[i], values[i]); } } TEST(HistUtil, AdapterDeviceSketchCategorical) { int categorical_sizes[] = {2, 6, 8, 12}; int num_bins = 256; int sizes[] = {25, 100, 1000}; for (auto n : sizes) { for (auto num_categories : categorical_sizes) { auto x = GenerateRandomCategoricalSingleColumn(n, num_categories); auto dmat = GetDMatrixFromData(x, n, 1); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, n, 1); ValidateBatchedCuts(adapter, num_bins, dmat.get()); TestCategoricalSketchAdapter(n, num_categories, num_bins, true); TestCategoricalSketchAdapter(n, num_categories, num_bins, false); } } } TEST(HistUtil, AdapterDeviceSketchMultipleColumns) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto x_device = thrust::device_vector<float>(x); for (auto num_bins : bin_sizes) { auto adapter = AdapterFromData(x_device, num_rows, num_columns); ValidateBatchedCuts(adapter, num_bins, dmat.get()); } } } TEST(HistUtil, AdapterDeviceSketchBatches) { int num_bins = 256; int num_rows = 5000; int batch_sizes[] = {0, 100, 1500, 6000}; int num_columns = 5; for (auto batch_size : batch_sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, num_rows, num_columns); ValidateBatchedCuts(adapter, num_bins, dmat.get(), batch_size); } } namespace { auto MakeData(Context const* ctx, std::size_t n_samples, bst_feature_t n_features) { dh::safe_cuda(hipSetDevice(ctx->gpu_id)); auto n = n_samples * n_features; std::vector<float> x; x.resize(n); std::iota(x.begin(), x.end(), 0); std::int32_t c{0}; float missing = n_samples * n_features; for (std::size_t i = 0; i < x.size(); ++i) { if (i % 5 == 0) { x[i] = missing; c++; } } thrust::device_vector<float> d_x; d_x = x; auto n_invalids = n / 10 * 2 + 1; auto is_valid = data::IsValidFunctor{missing}; return std::tuple{x, d_x, n_invalids, is_valid}; } void TestGetColumnSize(std::size_t n_samples) { auto ctx = MakeCUDACtx(0); bst_feature_t n_features = 12; [[maybe_unused]] auto [x, d_x, n_invalids, is_valid] = MakeData(&ctx, n_samples, n_features); auto adapter = AdapterFromData(d_x, n_samples, n_features); auto batch = adapter.Value(); auto batch_iter = dh::MakeTransformIterator<data::COOTuple>( thrust::make_counting_iterator(0llu), [=] __device__(std::size_t idx) { return batch.GetElement(idx); }); dh::caching_device_vector<std::size_t> column_sizes_scan; column_sizes_scan.resize(n_features + 1); std::vector<std::size_t> h_column_size(column_sizes_scan.size()); std::vector<std::size_t> h_column_size_1(column_sizes_scan.size()); detail::LaunchGetColumnSizeKernel<decltype(batch_iter), true, true>( ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan)); thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size.begin()); detail::LaunchGetColumnSizeKernel<decltype(batch_iter), true, false>( ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan)); thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin()); ASSERT_EQ(h_column_size, h_column_size_1); detail::LaunchGetColumnSizeKernel<decltype(batch_iter), false, true>( ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan)); thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin()); ASSERT_EQ(h_column_size, h_column_size_1); detail::LaunchGetColumnSizeKernel<decltype(batch_iter), false, false>( ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan)); thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin()); ASSERT_EQ(h_column_size, h_column_size_1); } } // namespace TEST(HistUtil, GetColumnSize) { bst_row_t n_samples = 4096; TestGetColumnSize(n_samples); } // Check sketching from adapter or DMatrix results in the same answer // Consistency here is useful for testing and user experience TEST(HistUtil, SketchingEquivalent) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); for (auto num_bins : bin_sizes) { auto dmat_cuts = DeviceSketch(0, dmat.get(), num_bins); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, num_rows, num_columns); common::HistogramCuts adapter_cuts = MakeUnweightedCutsForTest( adapter, num_bins, std::numeric_limits<float>::quiet_NaN()); EXPECT_EQ(dmat_cuts.Values(), adapter_cuts.Values()); EXPECT_EQ(dmat_cuts.Ptrs(), adapter_cuts.Ptrs()); EXPECT_EQ(dmat_cuts.MinValues(), adapter_cuts.MinValues()); ValidateBatchedCuts(adapter, num_bins, dmat.get()); } } } TEST(HistUtil, DeviceSketchFromGroupWeights) { size_t constexpr kRows = 3000, kCols = 200, kBins = 256; size_t constexpr kGroups = 10; auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix(); auto& h_weights = m->Info().weights_.HostVector(); h_weights.resize(kRows); std::fill(h_weights.begin(), h_weights.end(), 1.0f); std::vector<bst_group_t> groups(kGroups); for (size_t i = 0; i < kGroups; ++i) { groups[i] = kRows / kGroups; } m->SetInfo("group", groups.data(), DataType::kUInt32, kGroups); HistogramCuts weighted_cuts = DeviceSketch(0, m.get(), kBins, 0); h_weights.clear(); HistogramCuts cuts = DeviceSketch(0, m.get(), kBins, 0); ASSERT_EQ(cuts.Values().size(), weighted_cuts.Values().size()); ASSERT_EQ(cuts.MinValues().size(), weighted_cuts.MinValues().size()); ASSERT_EQ(cuts.Ptrs().size(), weighted_cuts.Ptrs().size()); for (size_t i = 0; i < cuts.Values().size(); ++i) { EXPECT_EQ(cuts.Values()[i], weighted_cuts.Values()[i]) << "i:"<< i; } for (size_t i = 0; i < cuts.MinValues().size(); ++i) { ASSERT_EQ(cuts.MinValues()[i], weighted_cuts.MinValues()[i]); } for (size_t i = 0; i < cuts.Ptrs().size(); ++i) { ASSERT_EQ(cuts.Ptrs().at(i), weighted_cuts.Ptrs().at(i)); } ValidateCuts(weighted_cuts, m.get(), kBins); } void TestAdapterSketchFromWeights(bool with_group) { size_t constexpr kRows = 300, kCols = 20, kBins = 256; size_t constexpr kGroups = 10; HostDeviceVector<float> storage; std::string m = RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface( &storage); MetaInfo info; Context ctx; auto& h_weights = info.weights_.HostVector(); if (with_group) { h_weights.resize(kGroups); } else { h_weights.resize(kRows); } std::fill(h_weights.begin(), h_weights.end(), 1.0f); std::vector<bst_group_t> groups(kGroups); if (with_group) { for (size_t i = 0; i < kGroups; ++i) { groups[i] = kRows / kGroups; } info.SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups); } info.weights_.SetDevice(0); info.num_row_ = kRows; info.num_col_ = kCols; data::CupyAdapter adapter(m); auto const& batch = adapter.Value(); HostDeviceVector<FeatureType> ft; SketchContainer sketch_container(ft, kBins, kCols, kRows, 0); AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(), &sketch_container); common::HistogramCuts cuts; sketch_container.MakeCuts(&cuts); auto dmat = GetDMatrixFromData(storage.HostVector(), kRows, kCols); if (with_group) { dmat->Info().SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups); } dmat->Info().SetInfo(ctx, "weight", h_weights.data(), DataType::kFloat32, h_weights.size()); dmat->Info().num_col_ = kCols; dmat->Info().num_row_ = kRows; ASSERT_EQ(cuts.Ptrs().size(), kCols + 1); ValidateCuts(cuts, dmat.get(), kBins); if (with_group) { dmat->Info().weights_ = decltype(dmat->Info().weights_)(); // remove weight HistogramCuts non_weighted = DeviceSketch(0, dmat.get(), kBins, 0); for (size_t i = 0; i < cuts.Values().size(); ++i) { ASSERT_EQ(cuts.Values()[i], non_weighted.Values()[i]); } for (size_t i = 0; i < cuts.MinValues().size(); ++i) { ASSERT_EQ(cuts.MinValues()[i], non_weighted.MinValues()[i]); } for (size_t i = 0; i < cuts.Ptrs().size(); ++i) { ASSERT_EQ(cuts.Ptrs().at(i), non_weighted.Ptrs().at(i)); } } if (with_group) { common::HistogramCuts weighted; auto& h_weights = info.weights_.HostVector(); h_weights.resize(kGroups); // Generate different weight. for (size_t i = 0; i < h_weights.size(); ++i) { // FIXME(jiamingy): Some entries generated GPU test cannot pass the validate cuts if // we use more diverse weights, partially caused by // https://github.com/dmlc/xgboost/issues/7946 h_weights[i] = (i % 2 == 0 ? 1 : 2) / static_cast<float>(kGroups); } SketchContainer sketch_container(ft, kBins, kCols, kRows, 0); AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(), &sketch_container); sketch_container.MakeCuts(&weighted); ValidateCuts(weighted, dmat.get(), kBins); } } TEST(HistUtil, AdapterSketchFromWeights) { TestAdapterSketchFromWeights(false); TestAdapterSketchFromWeights(true); } } // namespace common } // namespace xgboost
7f6eee057bd4280186cc646d17a0e09d5e24d9e5.cu
/** * Copyright 2019-2023 by XGBoost Contributors */ #include <gtest/gtest.h> #include <thrust/device_vector.h> #include <xgboost/c_api.h> #include <xgboost/data.h> #include <algorithm> #include <cmath> #include "../../../include/xgboost/logging.h" #include "../../../src/common/device_helpers.cuh" #include "../../../src/common/hist_util.cuh" #include "../../../src/common/hist_util.h" #include "../../../src/common/math.h" #include "../../../src/data/device_adapter.cuh" #include "../../../src/data/simple_dmatrix.h" #include "../data/test_array_interface.h" #include "../filesystem.h" // dmlc::TemporaryDirectory #include "../helpers.h" #include "test_hist_util.h" namespace xgboost { namespace common { template <typename AdapterT> HistogramCuts GetHostCuts(Context const* ctx, AdapterT* adapter, int num_bins, float missing) { data::SimpleDMatrix dmat(adapter, missing, 1); HistogramCuts cuts = SketchOnDMatrix(ctx, &dmat, num_bins); return cuts; } TEST(HistUtil, DeviceSketch) { int num_columns = 1; int num_bins = 4; std::vector<float> x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 7.0f, -1.0f}; int num_rows = x.size(); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto device_cuts = DeviceSketch(0, dmat.get(), num_bins); Context ctx; HistogramCuts host_cuts = SketchOnDMatrix(&ctx, dmat.get(), num_bins); EXPECT_EQ(device_cuts.Values(), host_cuts.Values()); EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs()); EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues()); } TEST(HistUtil, SketchBatchNumElements) { #if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1 LOG(WARNING) << "Test not runnable with RMM enabled."; return; #endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1 size_t constexpr kCols = 10000; int device; dh::safe_cuda(cudaGetDevice(&device)); auto avail = static_cast<size_t>(dh::AvailableMemory(device) * 0.8); auto per_elem = detail::BytesPerElement(false); auto avail_elem = avail / per_elem; size_t rows = avail_elem / kCols * 10; auto batch = detail::SketchBatchNumElements(0, rows, kCols, rows * kCols, device, 256, false); ASSERT_EQ(batch, avail_elem); } TEST(HistUtil, DeviceSketchMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); dh::GlobalMemoryLogger().Clear(); ConsoleLogger::Configure({{"verbosity", "3"}}); auto device_cuts = DeviceSketch(0, dmat.get(), num_bins); size_t bytes_required = detail::RequiredMemory( num_rows, num_columns, num_rows * num_columns, num_bins, false); EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05); EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95); ConsoleLogger::Configure({{"verbosity", "0"}}); } TEST(HistUtil, DeviceSketchWeightsMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows); dh::GlobalMemoryLogger().Clear(); ConsoleLogger::Configure({{"verbosity", "3"}}); auto device_cuts = DeviceSketch(0, dmat.get(), num_bins); ConsoleLogger::Configure({{"verbosity", "0"}}); size_t bytes_required = detail::RequiredMemory( num_rows, num_columns, num_rows * num_columns, num_bins, true); EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05); EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required); } TEST(HistUtil, DeviceSketchDeterminism) { int num_rows = 500; int num_columns = 5; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto reference_sketch = DeviceSketch(0, dmat.get(), num_bins); size_t constexpr kRounds{ 100 }; for (size_t r = 0; r < kRounds; ++r) { auto new_sketch = DeviceSketch(0, dmat.get(), num_bins); ASSERT_EQ(reference_sketch.Values(), new_sketch.Values()); ASSERT_EQ(reference_sketch.MinValues(), new_sketch.MinValues()); } } TEST(HistUtil, DeviceSketchCategoricalAsNumeric) { int categorical_sizes[] = {2, 6, 8, 12}; int num_bins = 256; int sizes[] = {25, 100, 1000}; for (auto n : sizes) { for (auto num_categories : categorical_sizes) { auto x = GenerateRandomCategoricalSingleColumn(n, num_categories); auto dmat = GetDMatrixFromData(x, n, 1); auto cuts = DeviceSketch(0, dmat.get(), num_bins); ValidateCuts(cuts, dmat.get(), num_bins); } } } TEST(HistUtil, DeviceSketchCategoricalFeatures) { TestCategoricalSketch(1000, 256, 32, false, [](DMatrix *p_fmat, int32_t num_bins) { return DeviceSketch(0, p_fmat, num_bins); }); TestCategoricalSketch(1000, 256, 32, true, [](DMatrix *p_fmat, int32_t num_bins) { return DeviceSketch(0, p_fmat, num_bins); }); } void TestMixedSketch() { size_t n_samples = 1000, n_features = 2, n_categories = 3; std::vector<float> data(n_samples * n_features); SimpleLCG gen; SimpleRealUniformDistribution<float> cat_d{0.0f, static_cast<float>(n_categories)}; SimpleRealUniformDistribution<float> num_d{0.0f, 3.0f}; for (size_t i = 0; i < n_samples * n_features; ++i) { if (i % 2 == 0) { data[i] = std::floor(cat_d(&gen)); } else { data[i] = num_d(&gen); } } auto m = GetDMatrixFromData(data, n_samples, n_features); m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical); m->Info().feature_types.HostVector().push_back(FeatureType::kNumerical); auto cuts = DeviceSketch(0, m.get(), 64); ASSERT_EQ(cuts.Values().size(), 64 + n_categories); } TEST(HistUtil, DeviceSketchMixedFeatures) { TestMixedSketch(); } TEST(HistUtil, DeviceSketchMultipleColumns) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); for (auto num_bins : bin_sizes) { auto cuts = DeviceSketch(0, dmat.get(), num_bins); ValidateCuts(cuts, dmat.get(), num_bins); } } } TEST(HistUtil, DeviceSketchMultipleColumnsWeights) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows); for (auto num_bins : bin_sizes) { auto cuts = DeviceSketch(0, dmat.get(), num_bins); ValidateCuts(cuts, dmat.get(), num_bins); } } } TEST(HistUitl, DeviceSketchWeights) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto weighted_dmat = GetDMatrixFromData(x, num_rows, num_columns); auto& h_weights = weighted_dmat->Info().weights_.HostVector(); h_weights.resize(num_rows); std::fill(h_weights.begin(), h_weights.end(), 1.0f); for (auto num_bins : bin_sizes) { auto cuts = DeviceSketch(0, dmat.get(), num_bins); auto wcuts = DeviceSketch(0, weighted_dmat.get(), num_bins); ASSERT_EQ(cuts.MinValues(), wcuts.MinValues()); ASSERT_EQ(cuts.Ptrs(), wcuts.Ptrs()); ASSERT_EQ(cuts.Values(), wcuts.Values()); ValidateCuts(cuts, dmat.get(), num_bins); ValidateCuts(wcuts, weighted_dmat.get(), num_bins); } } } TEST(HistUtil, DeviceSketchBatches) { int num_bins = 256; int num_rows = 5000; int batch_sizes[] = {0, 100, 1500, 6000}; int num_columns = 5; for (auto batch_size : batch_sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto cuts = DeviceSketch(0, dmat.get(), num_bins, batch_size); ValidateCuts(cuts, dmat.get(), num_bins); } num_rows = 1000; size_t batches = 16; auto x = GenerateRandom(num_rows * batches, num_columns); auto dmat = GetDMatrixFromData(x, num_rows * batches, num_columns); auto cuts_with_batches = DeviceSketch(0, dmat.get(), num_bins, num_rows); auto cuts = DeviceSketch(0, dmat.get(), num_bins, 0); auto const& cut_values_batched = cuts_with_batches.Values(); auto const& cut_values = cuts.Values(); CHECK_EQ(cut_values.size(), cut_values_batched.size()); for (size_t i = 0; i < cut_values.size(); ++i) { ASSERT_NEAR(cut_values_batched[i], cut_values[i], 1e5); } } TEST(HistUtil, DeviceSketchMultipleColumnsExternal) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns =5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); dmlc::TemporaryDirectory temp; auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp); for (auto num_bins : bin_sizes) { auto cuts = DeviceSketch(0, dmat.get(), num_bins); ValidateCuts(cuts, dmat.get(), num_bins); } } } // See https://github.com/dmlc/xgboost/issues/5866. TEST(HistUtil, DeviceSketchExternalMemoryWithWeights) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; dmlc::TemporaryDirectory temp; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, temp); dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows); for (auto num_bins : bin_sizes) { auto cuts = DeviceSketch(0, dmat.get(), num_bins); ValidateCuts(cuts, dmat.get(), num_bins); } } } template <typename Adapter> auto MakeUnweightedCutsForTest(Adapter adapter, int32_t num_bins, float missing, size_t batch_size = 0) { common::HistogramCuts batched_cuts; HostDeviceVector<FeatureType> ft; SketchContainer sketch_container(ft, num_bins, adapter.NumColumns(), adapter.NumRows(), 0); MetaInfo info; AdapterDeviceSketch(adapter.Value(), num_bins, info, missing, &sketch_container, batch_size); sketch_container.MakeCuts(&batched_cuts); return batched_cuts; } template <typename Adapter> void ValidateBatchedCuts(Adapter adapter, int num_bins, DMatrix* dmat, size_t batch_size = 0) { common::HistogramCuts batched_cuts = MakeUnweightedCutsForTest( adapter, num_bins, std::numeric_limits<float>::quiet_NaN(), batch_size); ValidateCuts(batched_cuts, dmat, num_bins); } TEST(HistUtil, AdapterDeviceSketch) { int rows = 5; int cols = 1; int num_bins = 4; float missing = - 1.0; thrust::device_vector< float> data(rows*cols); auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data); data = std::vector<float >{ 1.0,2.0,3.0,4.0,5.0 }; std::string str; Json::Dump(json_array_interface, &str); data::CupyAdapter adapter(str); auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing); Context ctx; auto host_cuts = GetHostCuts(&ctx, &adapter, num_bins, missing); EXPECT_EQ(device_cuts.Values(), host_cuts.Values()); EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs()); EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues()); } TEST(HistUtil, AdapterDeviceSketchMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, num_rows, num_columns); dh::GlobalMemoryLogger().Clear(); ConsoleLogger::Configure({{"verbosity", "3"}}); auto cuts = MakeUnweightedCutsForTest(adapter, num_bins, std::numeric_limits<float>::quiet_NaN()); ConsoleLogger::Configure({{"verbosity", "0"}}); size_t bytes_required = detail::RequiredMemory( num_rows, num_columns, num_rows * num_columns, num_bins, false); EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05); EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95); } TEST(HistUtil, AdapterSketchSlidingWindowMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, num_rows, num_columns); MetaInfo info; dh::GlobalMemoryLogger().Clear(); ConsoleLogger::Configure({{"verbosity", "3"}}); common::HistogramCuts batched_cuts; HostDeviceVector<FeatureType> ft; SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0); AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(), &sketch_container); HistogramCuts cuts; sketch_container.MakeCuts(&cuts); size_t bytes_required = detail::RequiredMemory( num_rows, num_columns, num_rows * num_columns, num_bins, false); EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05); EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95); ConsoleLogger::Configure({{"verbosity", "0"}}); } TEST(HistUtil, AdapterSketchSlidingWindowWeightedMemory) { int num_columns = 100; int num_rows = 1000; int num_bins = 256; auto x = GenerateRandom(num_rows, num_columns); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, num_rows, num_columns); MetaInfo info; auto& h_weights = info.weights_.HostVector(); h_weights.resize(num_rows); std::fill(h_weights.begin(), h_weights.end(), 1.0f); dh::GlobalMemoryLogger().Clear(); ConsoleLogger::Configure({{"verbosity", "3"}}); common::HistogramCuts batched_cuts; HostDeviceVector<FeatureType> ft; SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0); AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(), &sketch_container); HistogramCuts cuts; sketch_container.MakeCuts(&cuts); ConsoleLogger::Configure({{"verbosity", "0"}}); size_t bytes_required = detail::RequiredMemory( num_rows, num_columns, num_rows * num_columns, num_bins, true); EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05); EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required); } void TestCategoricalSketchAdapter(size_t n, size_t num_categories, int32_t num_bins, bool weighted) { auto h_x = GenerateRandomCategoricalSingleColumn(n, num_categories); thrust::device_vector<float> x(h_x); auto adapter = AdapterFromData(x, n, 1); MetaInfo info; info.num_row_ = n; info.num_col_ = 1; info.feature_types.HostVector().push_back(FeatureType::kCategorical); if (weighted) { std::vector<float> weights(n, 0); SimpleLCG lcg; SimpleRealUniformDistribution<float> dist(0, 1); for (auto& v : weights) { v = dist(&lcg); } info.weights_.HostVector() = weights; } ASSERT_EQ(info.feature_types.Size(), 1); SketchContainer container(info.feature_types, num_bins, 1, n, 0); AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(), &container); HistogramCuts cuts; container.MakeCuts(&cuts); thrust::sort(x.begin(), x.end()); auto n_uniques = thrust::unique(x.begin(), x.end()) - x.begin(); ASSERT_NE(n_uniques, x.size()); ASSERT_EQ(cuts.TotalBins(), n_uniques); ASSERT_EQ(n_uniques, num_categories); auto& values = cuts.cut_values_.HostVector(); ASSERT_TRUE(std::is_sorted(values.cbegin(), values.cend())); auto is_unique = (std::unique(values.begin(), values.end()) - values.begin()) == n_uniques; ASSERT_TRUE(is_unique); x.resize(n_uniques); h_x.resize(n_uniques); thrust::copy(x.begin(), x.end(), h_x.begin()); for (decltype(n_uniques) i = 0; i < n_uniques; ++i) { ASSERT_EQ(h_x[i], values[i]); } } TEST(HistUtil, AdapterDeviceSketchCategorical) { int categorical_sizes[] = {2, 6, 8, 12}; int num_bins = 256; int sizes[] = {25, 100, 1000}; for (auto n : sizes) { for (auto num_categories : categorical_sizes) { auto x = GenerateRandomCategoricalSingleColumn(n, num_categories); auto dmat = GetDMatrixFromData(x, n, 1); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, n, 1); ValidateBatchedCuts(adapter, num_bins, dmat.get()); TestCategoricalSketchAdapter(n, num_categories, num_bins, true); TestCategoricalSketchAdapter(n, num_categories, num_bins, false); } } } TEST(HistUtil, AdapterDeviceSketchMultipleColumns) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto x_device = thrust::device_vector<float>(x); for (auto num_bins : bin_sizes) { auto adapter = AdapterFromData(x_device, num_rows, num_columns); ValidateBatchedCuts(adapter, num_bins, dmat.get()); } } } TEST(HistUtil, AdapterDeviceSketchBatches) { int num_bins = 256; int num_rows = 5000; int batch_sizes[] = {0, 100, 1500, 6000}; int num_columns = 5; for (auto batch_size : batch_sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, num_rows, num_columns); ValidateBatchedCuts(adapter, num_bins, dmat.get(), batch_size); } } namespace { auto MakeData(Context const* ctx, std::size_t n_samples, bst_feature_t n_features) { dh::safe_cuda(cudaSetDevice(ctx->gpu_id)); auto n = n_samples * n_features; std::vector<float> x; x.resize(n); std::iota(x.begin(), x.end(), 0); std::int32_t c{0}; float missing = n_samples * n_features; for (std::size_t i = 0; i < x.size(); ++i) { if (i % 5 == 0) { x[i] = missing; c++; } } thrust::device_vector<float> d_x; d_x = x; auto n_invalids = n / 10 * 2 + 1; auto is_valid = data::IsValidFunctor{missing}; return std::tuple{x, d_x, n_invalids, is_valid}; } void TestGetColumnSize(std::size_t n_samples) { auto ctx = MakeCUDACtx(0); bst_feature_t n_features = 12; [[maybe_unused]] auto [x, d_x, n_invalids, is_valid] = MakeData(&ctx, n_samples, n_features); auto adapter = AdapterFromData(d_x, n_samples, n_features); auto batch = adapter.Value(); auto batch_iter = dh::MakeTransformIterator<data::COOTuple>( thrust::make_counting_iterator(0llu), [=] __device__(std::size_t idx) { return batch.GetElement(idx); }); dh::caching_device_vector<std::size_t> column_sizes_scan; column_sizes_scan.resize(n_features + 1); std::vector<std::size_t> h_column_size(column_sizes_scan.size()); std::vector<std::size_t> h_column_size_1(column_sizes_scan.size()); detail::LaunchGetColumnSizeKernel<decltype(batch_iter), true, true>( ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan)); thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size.begin()); detail::LaunchGetColumnSizeKernel<decltype(batch_iter), true, false>( ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan)); thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin()); ASSERT_EQ(h_column_size, h_column_size_1); detail::LaunchGetColumnSizeKernel<decltype(batch_iter), false, true>( ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan)); thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin()); ASSERT_EQ(h_column_size, h_column_size_1); detail::LaunchGetColumnSizeKernel<decltype(batch_iter), false, false>( ctx.gpu_id, IterSpan{batch_iter, batch.Size()}, is_valid, dh::ToSpan(column_sizes_scan)); thrust::copy(column_sizes_scan.begin(), column_sizes_scan.end(), h_column_size_1.begin()); ASSERT_EQ(h_column_size, h_column_size_1); } } // namespace TEST(HistUtil, GetColumnSize) { bst_row_t n_samples = 4096; TestGetColumnSize(n_samples); } // Check sketching from adapter or DMatrix results in the same answer // Consistency here is useful for testing and user experience TEST(HistUtil, SketchingEquivalent) { int bin_sizes[] = {2, 16, 256, 512}; int sizes[] = {100, 1000, 1500}; int num_columns = 5; for (auto num_rows : sizes) { auto x = GenerateRandom(num_rows, num_columns); auto dmat = GetDMatrixFromData(x, num_rows, num_columns); for (auto num_bins : bin_sizes) { auto dmat_cuts = DeviceSketch(0, dmat.get(), num_bins); auto x_device = thrust::device_vector<float>(x); auto adapter = AdapterFromData(x_device, num_rows, num_columns); common::HistogramCuts adapter_cuts = MakeUnweightedCutsForTest( adapter, num_bins, std::numeric_limits<float>::quiet_NaN()); EXPECT_EQ(dmat_cuts.Values(), adapter_cuts.Values()); EXPECT_EQ(dmat_cuts.Ptrs(), adapter_cuts.Ptrs()); EXPECT_EQ(dmat_cuts.MinValues(), adapter_cuts.MinValues()); ValidateBatchedCuts(adapter, num_bins, dmat.get()); } } } TEST(HistUtil, DeviceSketchFromGroupWeights) { size_t constexpr kRows = 3000, kCols = 200, kBins = 256; size_t constexpr kGroups = 10; auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix(); auto& h_weights = m->Info().weights_.HostVector(); h_weights.resize(kRows); std::fill(h_weights.begin(), h_weights.end(), 1.0f); std::vector<bst_group_t> groups(kGroups); for (size_t i = 0; i < kGroups; ++i) { groups[i] = kRows / kGroups; } m->SetInfo("group", groups.data(), DataType::kUInt32, kGroups); HistogramCuts weighted_cuts = DeviceSketch(0, m.get(), kBins, 0); h_weights.clear(); HistogramCuts cuts = DeviceSketch(0, m.get(), kBins, 0); ASSERT_EQ(cuts.Values().size(), weighted_cuts.Values().size()); ASSERT_EQ(cuts.MinValues().size(), weighted_cuts.MinValues().size()); ASSERT_EQ(cuts.Ptrs().size(), weighted_cuts.Ptrs().size()); for (size_t i = 0; i < cuts.Values().size(); ++i) { EXPECT_EQ(cuts.Values()[i], weighted_cuts.Values()[i]) << "i:"<< i; } for (size_t i = 0; i < cuts.MinValues().size(); ++i) { ASSERT_EQ(cuts.MinValues()[i], weighted_cuts.MinValues()[i]); } for (size_t i = 0; i < cuts.Ptrs().size(); ++i) { ASSERT_EQ(cuts.Ptrs().at(i), weighted_cuts.Ptrs().at(i)); } ValidateCuts(weighted_cuts, m.get(), kBins); } void TestAdapterSketchFromWeights(bool with_group) { size_t constexpr kRows = 300, kCols = 20, kBins = 256; size_t constexpr kGroups = 10; HostDeviceVector<float> storage; std::string m = RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface( &storage); MetaInfo info; Context ctx; auto& h_weights = info.weights_.HostVector(); if (with_group) { h_weights.resize(kGroups); } else { h_weights.resize(kRows); } std::fill(h_weights.begin(), h_weights.end(), 1.0f); std::vector<bst_group_t> groups(kGroups); if (with_group) { for (size_t i = 0; i < kGroups; ++i) { groups[i] = kRows / kGroups; } info.SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups); } info.weights_.SetDevice(0); info.num_row_ = kRows; info.num_col_ = kCols; data::CupyAdapter adapter(m); auto const& batch = adapter.Value(); HostDeviceVector<FeatureType> ft; SketchContainer sketch_container(ft, kBins, kCols, kRows, 0); AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(), &sketch_container); common::HistogramCuts cuts; sketch_container.MakeCuts(&cuts); auto dmat = GetDMatrixFromData(storage.HostVector(), kRows, kCols); if (with_group) { dmat->Info().SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups); } dmat->Info().SetInfo(ctx, "weight", h_weights.data(), DataType::kFloat32, h_weights.size()); dmat->Info().num_col_ = kCols; dmat->Info().num_row_ = kRows; ASSERT_EQ(cuts.Ptrs().size(), kCols + 1); ValidateCuts(cuts, dmat.get(), kBins); if (with_group) { dmat->Info().weights_ = decltype(dmat->Info().weights_)(); // remove weight HistogramCuts non_weighted = DeviceSketch(0, dmat.get(), kBins, 0); for (size_t i = 0; i < cuts.Values().size(); ++i) { ASSERT_EQ(cuts.Values()[i], non_weighted.Values()[i]); } for (size_t i = 0; i < cuts.MinValues().size(); ++i) { ASSERT_EQ(cuts.MinValues()[i], non_weighted.MinValues()[i]); } for (size_t i = 0; i < cuts.Ptrs().size(); ++i) { ASSERT_EQ(cuts.Ptrs().at(i), non_weighted.Ptrs().at(i)); } } if (with_group) { common::HistogramCuts weighted; auto& h_weights = info.weights_.HostVector(); h_weights.resize(kGroups); // Generate different weight. for (size_t i = 0; i < h_weights.size(); ++i) { // FIXME(jiamingy): Some entries generated GPU test cannot pass the validate cuts if // we use more diverse weights, partially caused by // https://github.com/dmlc/xgboost/issues/7946 h_weights[i] = (i % 2 == 0 ? 1 : 2) / static_cast<float>(kGroups); } SketchContainer sketch_container(ft, kBins, kCols, kRows, 0); AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(), &sketch_container); sketch_container.MakeCuts(&weighted); ValidateCuts(weighted, dmat.get(), kBins); } } TEST(HistUtil, AdapterSketchFromWeights) { TestAdapterSketchFromWeights(false); TestAdapterSketchFromWeights(true); } } // namespace common } // namespace xgboost
814dffb0c488392eec896413619260ef67319975.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <iostream> #include "error.h" #include "cudaerror.h" using namespace std; const int N = 16384; // Num blocks const int M = 128; // Num threads per block __global__ void add(int *a, int *b, int *c) { int index = blockIdx.x*M + threadIdx.x; if (index < N) c[index] = a[index] + b[index]; } void fill_array(int *arr, int N, int val) { for(int i = 0; i < N; i++) arr[i] = val; } void print_arr(int *arr, int N){ for(int i = 0; i < N; i++){ cout << arr[i] << " "; if(i % 45 == 44) cout << endl; } cout << endl; } int main(void){ int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N*sizeof(int); // Allocate space for device copies of a, b, c ecudaMalloc((void **)&d_a, size); ecudaMalloc((void **)&d_b, size); ecudaMalloc((void **)&d_c, size); // Allocate for host copies of a, b, c, and setup input values a = (int *)emalloc(size); fill_array(a, N, 20); b = (int *)emalloc(size); fill_array(b, N, -15); c = (int *)emalloc(size); // Copy input to device ecudaMemcpy(d_a, a, size, hipMemcpyHostToDevice); ecudaMemcpy(d_b, b, size, hipMemcpyHostToDevice); // Launch add() kernel on GPU hipLaunchKernelGGL(( add), dim3(N/M), dim3(M), 0, 0, d_a, d_b, d_c); // Copy result back to host ecudaMemcpy(c, d_c, size, hipMemcpyDeviceToHost); print_arr(c, N); // Cleanup free(a); free(b); free(c); ecudaFree(d_a); ecudaFree(d_b); ecudaFree(d_c); return 0; }
814dffb0c488392eec896413619260ef67319975.cu
#include <cstdio> #include <iostream> #include "error.h" #include "cudaerror.h" using namespace std; const int N = 16384; // Num blocks const int M = 128; // Num threads per block __global__ void add(int *a, int *b, int *c) { int index = blockIdx.x*M + threadIdx.x; if (index < N) c[index] = a[index] + b[index]; } void fill_array(int *arr, int N, int val) { for(int i = 0; i < N; i++) arr[i] = val; } void print_arr(int *arr, int N){ for(int i = 0; i < N; i++){ cout << arr[i] << " "; if(i % 45 == 44) cout << endl; } cout << endl; } int main(void){ int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N*sizeof(int); // Allocate space for device copies of a, b, c ecudaMalloc((void **)&d_a, size); ecudaMalloc((void **)&d_b, size); ecudaMalloc((void **)&d_c, size); // Allocate for host copies of a, b, c, and setup input values a = (int *)emalloc(size); fill_array(a, N, 20); b = (int *)emalloc(size); fill_array(b, N, -15); c = (int *)emalloc(size); // Copy input to device ecudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); ecudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU add<<<N/M, M>>>(d_a, d_b, d_c); // Copy result back to host ecudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); print_arr(c, N); // Cleanup free(a); free(b); free(c); ecudaFree(d_a); ecudaFree(d_b); ecudaFree(d_c); return 0; }
8369c12fca7c51f7cf174ea36c28c1082e4a94f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "additionally.h" #include "gpu.h" extern int gpu_index; #define BLOCK 512 void pull_batchnorm_layer(layer l) {} // not required now void push_batchnorm_layer(layer l) {} // not required now void pull_local_layer(local_layer l) {} // not required now void push_local_layer(local_layer l) {} // not required now void pull_connected_layer(local_layer l) {} // not required now void push_connected_layer(local_layer l) {} // not required now void check_error(hipError_t status) { //hipDeviceSynchronize(); hipError_t status2 = hipGetLastError(); if (status != hipSuccess) { const char *s = hipGetErrorString(status); char buffer[256]; printf("CUDA Error: %s\n", s); assert(0); snprintf(buffer, 256, "CUDA Error: %s", s); error(buffer); } if (status2 != hipSuccess) { const char *s = hipGetErrorString(status); char buffer[256]; printf("CUDA Error Prev: %s\n", s); assert(0); snprintf(buffer, 256, "CUDA Error Prev: %s", s); error(buffer); } } void cuda_set_device(int n) { gpu_index = n; hipError_t status = hipSetDevice(n); check_error(status); } int cuda_get_device() { int n = 0; hipError_t status = hipGetDevice(&n); check_error(status); return n; } #ifdef CUDNN cudnnHandle_t cudnn_handle() { static int init[16] = { 0 }; static cudnnHandle_t handle[16]; int i = cuda_get_device(); if (!init[i]) { cudnnCreate(&handle[i]); init[i] = 1; } return handle[i]; } #endif float *cuda_make_array(float *x, size_t n) { float *x_gpu; size_t size = sizeof(float)*n; hipError_t status = hipMalloc((void **)&x_gpu, size); check_error(status); if (x) { status = hipMemcpy(x_gpu, x, size, hipMemcpyHostToDevice); check_error(status); } if (!x_gpu) error("Cuda malloc failed\n"); return x_gpu; } int *cuda_make_int_array(size_t n) { int *x_gpu; size_t size = sizeof(int)*n; hipError_t status = hipMalloc((void **)&x_gpu, size); check_error(status); return x_gpu; } void cuda_free(float *x_gpu) { hipError_t status = hipFree(x_gpu); check_error(status); } void cuda_push_array(float *x_gpu, float *x, size_t n) { size_t size = sizeof(float)*n; hipError_t status = hipMemcpy(x_gpu, x, size, hipMemcpyHostToDevice); check_error(status); } void cuda_pull_array(float *x_gpu, float *x, size_t n) { size_t size = sizeof(float)*n; hipError_t status = hipMemcpy(x, x_gpu, size, hipMemcpyDeviceToHost); check_error(status); } float *get_network_output_layer_gpu(network net, int i) { layer l = net.layers[i]; if (l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); return l.output; } float *get_network_output_gpu(network net) { int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return get_network_output_layer_gpu(net, i); } dim3 cuda_gridsize(size_t n) { size_t k = (n - 1) / BLOCK + 1; size_t x = k; size_t y = 1; if (x > 65535) { x = ceil(sqrtf(k)); y = (n - 1) / (x*BLOCK) + 1; } dim3 d; d.x = x; d.y = y; d.z = 1; //printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK); return d; } void pull_convolutional_layer(convolutional_layer layer) { cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size); cuda_pull_array(layer.biases_gpu, layer.biases, layer.n); if (layer.batch_normalize) { cuda_pull_array(layer.scales_gpu, layer.scales, layer.n); cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } } void push_convolutional_layer(convolutional_layer layer) { cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size); cuda_push_array(layer.biases_gpu, layer.biases, layer.n); if (layer.batch_normalize) { cuda_push_array(layer.scales_gpu, layer.scales, layer.n); cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } } // -------------------- CUDA functions ------------------- // add BIAS __global__ void add_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if (offset < size) output[(batch*n + filter)*size + offset] += biases[filter]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size - 1) / BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); add_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size); check_error(hipPeekAtLastError()); } // normalization __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index / spatial) % filters; x[index] = (x[index] - mean[f]) / (sqrtf(variance[f]) + .000001f); } void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; normalize_kernel << <cuda_gridsize(N), BLOCK >> >(N, x, mean, variance, batch, filters, spatial); check_error(hipPeekAtLastError()); } // fill array __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i*INCX] = ALPHA; } void fill_ongpu(int N, float ALPHA, float * X, int INCX) { fill_kernel << <cuda_gridsize(N), BLOCK >> >(N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } // scale BIAS __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if (offset < size) output[(batch*n + filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size - 1) / BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); scale_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size); check_error(hipPeekAtLastError()); } // max-pool layer __global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes) { int h = (in_h + pad - size) / stride + 1; int w = (in_w + pad - size) / stride + 1; int c = in_c; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -pad / 2; int h_offset = -pad / 2; int out_index = j + w*(i + h*(k + c*b)); float max = -INFINITY; int max_i = -1; int l, m; for (l = 0; l < size; ++l) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + l; int cur_w = w_offset + j*stride + m; int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c)); int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w); float val = (valid != 0) ? input[index] : -INFINITY; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } output[out_index] = max; indexes[out_index] = max_i; } void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state) { if (layer.stride == layer.size) { //if(1) { cudnnStatus_t maxpool_status; float alpha = 1, beta = 0; maxpool_status = cudnnPoolingForward( cudnn_handle(), layer.poolingDesc, &alpha, layer.srcTensorDesc, state.input, &beta, layer.dstTensorDesc, layer.output_gpu); //maxpool_status = cudnnDestroyPoolingDescriptor(poolingDesc); //cudnnDestroyTensorDescriptor(layer.srcTensorDesc); //cudnnDestroyTensorDescriptor(layer.dstTensorDesc); } else { int h = layer.out_h; int w = layer.out_w; int c = layer.c; size_t n = h*w*c*layer.batch; forward_maxpool_layer_kernel << <cuda_gridsize(n), BLOCK >> > (n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu); check_error(hipPeekAtLastError()); } } // flatten __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int in_s = i%spatial; i = i / spatial; int in_c = i%layers; i = i / layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out) { int size = spatial*batch*layers; flatten_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, spatial, layers, batch, forward, out); check_error(hipPeekAtLastError()); } // activations __device__ float lhtan_activate_kernel(float x) { if (x < 0) return .001*x; if (x > 1) return .001*(x - 1) + 1; return x; } __device__ float lhtan_gradient_kernel(float x) { if (x > 0 && x < 1) return 1; return .001; } __device__ float hardtan_activate_kernel(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } __device__ float linear_activate_kernel(float x) { return x; } __device__ float logistic_activate_kernel(float x) { return 1. / (1. + exp(-x)); } __device__ float loggy_activate_kernel(float x) { return 2. / (1. + exp(-x)) - 1; } __device__ float relu_activate_kernel(float x) { return x*(x>0); } __device__ float elu_activate_kernel(float x) { return (x >= 0)*x + (x < 0)*(exp(x) - 1); } __device__ float relie_activate_kernel(float x) { return (x>0) ? x : .01*x; } __device__ float ramp_activate_kernel(float x) { return x*(x>0) + .1*x; } __device__ float leaky_activate_kernel(float x) { return (x>0) ? x : .1*x; } __device__ float tanh_activate_kernel(float x) { return (2 / (1 + exp(-2 * x)) - 1); } __device__ float plse_activate_kernel(float x) { if (x < -4) return .01 * (x + 4); if (x > 4) return .01 * (x - 4) + 1; return .125*x + .5; } __device__ float stair_activate_kernel(float x) { int n = floor(x); if (n % 2 == 0) return floor(x / 2.); else return (x - n) + floor(x / 2.); } __device__ float activate_kernel(float x, ACTIVATION a) { switch (a) { case LINEAR: return linear_activate_kernel(x); case LOGISTIC: return logistic_activate_kernel(x); case LOGGY: return loggy_activate_kernel(x); case RELU: return relu_activate_kernel(x); case ELU: return elu_activate_kernel(x); case RELIE: return relie_activate_kernel(x); case RAMP: return ramp_activate_kernel(x); case LEAKY: return leaky_activate_kernel(x); case TANH: return tanh_activate_kernel(x); case PLSE: return plse_activate_kernel(x); case STAIR: return stair_activate_kernel(x); case HARDTAN: return hardtan_activate_kernel(x); case LHTAN: return lhtan_activate_kernel(x); } return 0; } __global__ void activate_array_kernel(float *x, int n, ACTIVATION a) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) x[i] = activate_kernel(x[i], a); } __global__ void activate_array_leaky_kernel(float *x, int n) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < n) { float val = x[index]; x[index] = (val > 0) ? val : val / 10; } } extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a) { if (a == LEAKY) activate_array_leaky_kernel << <(n / BLOCK + 1), BLOCK, 0, 0 >> >(x, n); else activate_array_kernel << <cuda_gridsize(n), BLOCK, 0, 0 >> >(x, n, a); check_error(hipPeekAtLastError()); } // softmax layer __device__ void softmax_device(int n, float *input, float temp, float *output) { int i; float sum = 0; float largest = -INFINITY; for (i = 0; i < n; ++i) { int val = input[i]; largest = (val>largest) ? val : largest; } for (i = 0; i < n; ++i) { float e = expf(input[i] / temp - largest / temp); sum += e; output[i] = e; } for (i = 0; i < n; ++i) { output[i] /= sum; } } __global__ void softmax_kernel(int n, int offset, int batch, float *input, float temp, float *output) { int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (b >= batch) return; softmax_device(n, input + b*offset, temp, output + b*offset); } void softmax_gpu(float *input, int n, int offset, int groups, float temp, float *output) { int inputs = n; int batch = groups; softmax_kernel << <cuda_gridsize(batch), BLOCK >> >(inputs, offset, batch, input, temp, output); check_error(hipPeekAtLastError()); } // reorg layer __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int in_index = i; int in_w = i%w; i = i / w; int in_h = i%h; i = i / h; int in_c = i%c; i = i / c; int b = i%batch; int out_c = c / (stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); out[in_index] = x[out_index]; } void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int size = w*h*c*batch; reorg_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, w, h, c, batch, stride, forward, out); check_error(hipPeekAtLastError()); } // upsample layer __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int out_index = i; int out_w = i % (w*stride); i = i / (w*stride); int out_h = i % (h*stride); i = i / (h*stride); int out_c = i%c; i = i / c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if (forward) out[out_index] += scale * x[in_index]; else atomicAdd(x + in_index, scale * out[out_index]); } extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t size = w*h*c*batch*stride*stride; upsample_kernel << <cuda_gridsize(size), BLOCK >> >(size, in, w, h, c, batch, stride, forward, scale, out); check_error(hipPeekAtLastError()); } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { copy_kernel << <cuda_gridsize(N), BLOCK>> >(N, X, OFFX, INCX, Y, OFFY, INCY); check_error(hipPeekAtLastError()); } extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY) { copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY); } // shortcut layer __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] += add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int stride = w1 / w2; int sample = w2 / w1; assert(stride == h1 / h2); assert(sample == h2 / h1); if (stride < 1) stride = 1; if (sample < 1) sample = 1; int size = batch * minw * minh * minc; shortcut_kernel << <cuda_gridsize(size), BLOCK>> >(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out); check_error(hipPeekAtLastError()); } // ----------- Quantinization -------------- __host__ __device__ int max_abs(int src, int max_val) { if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val; return src; } __global__ void cuda_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_int8[idx] = max_abs(input_f32[idx] * multipler, max_val); // 7-bit (1-bit sign) } void cuda_convert_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val) { cuda_f32_to_int8 << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler, max_val); } __global__ void cuda_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_int8[idx] = input_f32[idx] * multipler; // 7-bit (1-bit sign) } void cuda_convert_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler) { cuda_f32_to_int8_nomax << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler); } __global__ void cuda_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_f32[idx] = input_int8[idx] * multipler; // 7-bit (1-bit sign) } void cuda_convert_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler) { cuda_int8_to_f32 << < size / BLOCK + 1, BLOCK >> >(input_int8, size, output_f32, multipler); } __global__ void cuda_multiply_f32(float *input_output, size_t size, float multipler) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) input_output[idx] = input_output[idx] * multipler; // 7-bit (1-bit sign) } void cuda_do_multiply_f32(float *input_output, size_t size, float multipler) { cuda_multiply_f32 << < size / BLOCK + 1, BLOCK >> >(input_output, size, multipler); } // -------------------------------- // ------------- XNOR ------------- // -------------------------------- __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for (i = 0; i < size; ++i) { mean += fabs(weights[f*size + i]); } mean = mean / size; for (i = 0; i < size; ++i) { binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { binarize_weights_kernel << <cuda_gridsize(n), BLOCK >> >(weights, n, size, binary); check_error(hipPeekAtLastError()); } // -------------------------------- __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { binarize_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, binary); check_error(hipPeekAtLastError()); } // -------------------------------- void swap_binary(convolutional_layer *l) { float *swap = l->weights; l->weights = l->binary_weights; l->binary_weights = swap; #ifdef GPU swap = l->weights_gpu; l->weights_gpu = l->binary_weights_gpu; l->binary_weights_gpu = swap; #endif } // -------------------------------- #define WARP_SIZE 32 __global__ void im2col_align_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col, const int bit_align) { int index = blockIdx.x*blockDim.x + threadIdx.x; for (; index < n; index += blockDim.x*gridDim.x) { int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; float* data_col_ptr = data_col; //data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; data_col_ptr += channel_out * bit_align + h_out * width_col + w_out; float* data_col_ptr_32 = data_col + (channel_out * bit_align + h_out * width_col + w_out) / 32; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ksize; ++i) { for (int j = 0; j < ksize; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; //float src_val = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; //unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0); //if (threadIdx.x % WARP_SIZE == 0) *((unsigned int*)data_col_ptr_32) = bit_mask; //data_col_ptr_32 += bit_align / 32; //data_col_ptr += height_col * width_col; data_col_ptr += bit_align; } } } } void im2col_align_ongpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col, int bit_align) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height_col * width_col; im2col_align_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK, BLOCK, 0, 0>> >( num_kernels, im, height, width, ksize, pad, stride, height_col, width_col, data_col, bit_align); } // -------------------------------- // binary im2col - stride=1 __global__ void im2col_align_bin_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int channels, const int pad, const int stride, const int height_col, const int width_col, float *data_col, const int bit_align) { __shared__ float tmp_s[1]; __shared__ ulonglong4 tmp256_s[1]; //#define SHRED_VALS ((BLOCK / 169) * ) //__shared__ float dst_s[1024]; //__shared__ float dst_s[1024]; //__shared__ uint32_t bit_s[32]; //__shared__ uint8_t bit_s[128]; int index = blockIdx.x*blockDim.x + threadIdx.x; //for (; index < n; index += blockDim.x*gridDim.x) { int c_index = index; int channel_in = c_index % channels; //int h_out = index % height_col; //int c_index = index / height_col; //int channel_in = c_index % channels; int channel_out = channel_in * ksize * ksize; int j_index = c_index / channels; int j = j_index % ksize; int i = j_index / ksize; int pre_out_index = (channel_out + i*ksize + j) * bit_align; int j_pad = (j - pad); int i_pad = (i - pad); for (int wh_index = 0; wh_index < (height_col*width_col); wh_index += 32) //for (int h_out = 0; h_out < height_col; ++h_out) { // the end of padding //if(0) //for (int w_out = 0; w_out < (width_col); w_out += 32) { const int w_out = wh_index % width_col; const int h_out = wh_index / width_col; const int w = w_out + j_pad; const int h = h_out + i_pad; int pre_in_index = channel_in * height * width; int pre_in_wh_index = h * width + w; int send_wh_index = wh_index; if (i >= ksize) send_wh_index = height_col*width_col; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int cur_wh_index = __shfl(send_wh_index, t) + lane_id; if (cur_wh_index < (width_col*height_col))// && (cur_i_pad+pad) < ksize) { const int cur_pre_out_index = __shfl(pre_out_index, t); const int cur_pre_in_index = __shfl(pre_in_index, t); const int cur_pre_in_wh_index = __shfl(pre_in_wh_index, t) + lane_id; int w = cur_pre_in_wh_index % width; int h = cur_pre_in_wh_index / width; int in_index = cur_pre_in_index + cur_pre_in_wh_index; int out_index = cur_pre_out_index + cur_wh_index; float val = (w >= 0 && w < width && h >= 0 && h < height) ? data_im[in_index] : float(); //data_col[out_index] = val; //tmp_s[0] = val; uint32_t bit_mask = __ballot(val > 0); if (lane_id == 0) { uint8_t *bit8_ptr = &(((uint8_t *)data_col)[out_index / 8]); uint32_t *bit32_ptr = (uint32_t *)bit8_ptr; *bit32_ptr = bit_mask; } } } }// w_out } } } void im2col_align_bin_ongpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col, int bit_align) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; //int num_kernels = channels * height_col * width_col * ksize * ksize; //int num_kernels = channels * ksize * ksize * height_col; int num_kernels = channels * ksize * ksize; int num_blocks = num_kernels / BLOCK + 1; //im2col_align_bin_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK, im2col_align_bin_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >( num_kernels, im, height, width, ksize, channels, pad, stride, height_col, width_col, data_col, bit_align); } // -------------------------------- __global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t size) { //const int size_aligned = size + (WARP_SIZE - size % WARP_SIZE); int index = blockIdx.x*blockDim.x + threadIdx.x; float src_val; //for (; index < size_aligned; index += blockDim.x*gridDim.x) { //src_val = src[index]; if (index < size) src_val = src[index]; else src_val = 0; //unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0); unsigned int bit_mask = __ballot(src_val > 0); if (threadIdx.x % WARP_SIZE == 0) ((unsigned int*)dst)[index / 32] = bit_mask; } } void float_to_bit_gpu(float *src, unsigned char *dst, size_t size) { const int num_blocks = size / BLOCK + 1; float_to_bit_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, dst, size); } // -------------------------------- __device__ __host__ static inline void remove_bit(unsigned char *const dst, size_t index) { size_t dst_i = index / 8; int dst_shift = index % 8; dst[dst_i] &= ~(1 << dst_shift); } __device__ __host__ static inline void set_bit(unsigned char *const dst, size_t index) { size_t dst_i = index / 8; int dst_shift = index % 8; dst[dst_i] |= 1 << dst_shift; //dst[dst_i] |= 1 << (8 - dst_shift); } __device__ __host__ static inline unsigned char get_bit(unsigned char const*const src, size_t index) { size_t src_i = index / 8; int src_shift = index % 8; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; //unsigned char val = (src[src_i] & (1 << (8 - src_shift))) > 0; return val; } // Intel CPUs and nVidia CUDA GPU are little endian __device__ __host__ unsigned char reverse_byte(unsigned char a) { return ((a & 0x1) << 7) | ((a & 0x2) << 5) | ((a & 0x4) << 3) | ((a & 0x8) << 1) | ((a & 0x10) >> 1) | ((a & 0x20) >> 3) | ((a & 0x40) >> 5) | ((a & 0x80) >> 7); } __device__ unsigned char reverse_byte_CUDA(unsigned char a) { uint32_t tmp = __brev(a); return tmp >> 24; } __device__ __host__ unsigned char reverse_byte_2(unsigned char a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } __device__ void transpose8rS32_reversed_diagonale(unsigned char* A, int m, int n, unsigned char* B) { unsigned x, y, t; // Load the array and pack it into x and y. x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]; y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]; t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7); t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7); t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14); t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14); t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F); y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F); x = t; B[7 * n] = reverse_byte_CUDA(x >> 24); B[6 * n] = reverse_byte_CUDA(x >> 16); B[5 * n] = reverse_byte_CUDA(x >> 8); B[4 * n] = reverse_byte_CUDA(x); B[3 * n] = reverse_byte_CUDA(y >> 24); B[2 * n] = reverse_byte_CUDA(y >> 16); B[1 * n] = reverse_byte_CUDA(y >> 8); B[0 * n] = reverse_byte_CUDA(y); } __global__ void transpose_bin_gpu_kernel(unsigned char *A, unsigned char *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; int index = blockIdx.x*blockDim.x + threadIdx.x; //for (i = 0; i < n; i += 8) { i = (index * 8) % n; int j; //for (j = 0; j < m - 8; j += 8) { j = ((index * 8) / n) * 8; if (j < m - 8) { int a_index = i*lda + j; int b_index = j*ldb + i; transpose8rS32_reversed_diagonale(&A[a_index / 8], lda / 8, ldb / 8, &B[b_index / 8]); } else if (j < m) { for (; j < m; ++j) { if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i); else remove_bit(B, j*ldb + i); } } } } } __device__ __host__ uint8_t reverse_8_bit(uint8_t a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } __device__ uint32_t reverse_32_bit(uint32_t a) { // __device__ unsigned int __brev(unsigned int x) // CUDA // unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input)); return __brev(a); //return (reverse_8_bit(a >> 24) << 0) | // (reverse_8_bit(a >> 16) << 8) | // (reverse_8_bit(a >> 8) << 16) | // (reverse_8_bit(a >> 0) << 24); } #define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j); __device__ void transpose32_optimized(uint32_t A[32]) { int j, k; unsigned m, t; //m = 0x0000FFFF; //for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) { // for (k = 0; k < 32; k = (k + j + 1) & ~j) { // t = (A[k] ^ (A[k + j] >> j)) & m; // A[k] = A[k] ^ t; // A[k + j] = A[k + j] ^ (t << j); // } //} j = 16; m = 0x0000FFFF; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 8; m = 0x00ff00ff; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 4; m = 0x0f0f0f0f; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 2; m = 0x33333333; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 1; m = 0x55555555; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } // reverse Y for (j = 0; j < 16; ++j) { uint32_t tmp = A[j]; A[j] = reverse_32_bit(A[31 - j]); A[31 - j] = reverse_32_bit(tmp); } } #define BLOCK_TRANSPOSE32 256 __device__ void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n) { //unsigned A_tmp[32]; //int i; //#pragma unroll //for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m]; //transpose32_optimized(A_tmp); //#pragma unroll //for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i]; __shared__ uint32_t A_shared[32 * BLOCK_TRANSPOSE32]; uint32_t *A_tmp = &A_shared[32 * threadIdx.x]; int i; #pragma unroll 32 for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m]; transpose32_optimized(A_tmp); #pragma unroll 32 for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i]; } // transpose 32x32 bit __global__ void transpose_bin_gpu_kernel_32(uint32_t *A, uint32_t *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; int index = (blockIdx.x*blockDim.x + threadIdx.x) * 32; //for (i = 0; i < n; i += 8) { i = index % n; int j; //for (j = 0; j < m - 8; j += 8) { j = (index / n) * 32; if (j < m) { int a_index = i*lda + j; int b_index = j*ldb + i; transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32); } } } } void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const int m, const int lda, const int ldb, const int block_size) { size_t size = n*m / (8 * 8) + 1; size_t size32 = n*m / (32 * 32) + 1; const int num_blocks = size / BLOCK + 1; const int num_blocks32 = size32 / BLOCK_TRANSPOSE32 + 1; transpose_bin_gpu_kernel_32 << <num_blocks32, BLOCK_TRANSPOSE32, 0, 0 >> >((uint32_t *)A, (uint32_t *)B, n, m, lda, ldb, block_size); //transpose_bin_gpu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(A, B, n, m, lda, ldb, block_size); } // -------------------------------- __global__ void fill_int8_gpu_kernel(unsigned char *src, unsigned char val, size_t size) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < size) src[index] = 0; } void fill_int8_gpu(unsigned char *src, unsigned char val, size_t size) { const int num_blocks = size / BLOCK + 1; fill_int8_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, val, size); } // -------------------------------- //typedef unsigned long long int uint64_t; //typedef unsigned int uint32_t; //typedef unsigned char uint8_t; //typedef char int8_t; __device__ __host__ static inline uint64_t broadcast_bit_1_to_64(uint8_t src) { return (src > 0) ? 0xFFFFFFFFFFFFFFFF : 0; } __device__ __host__ static inline uint8_t xnor_bit1(uint8_t a, uint8_t b) { return ~(a^b) & 0b1; } __device__ __host__ static inline uint32_t xnor_int32(uint32_t a, uint32_t b) { return ~(a^b); } __device__ __host__ static inline uint64_t xnor_int64(uint64_t a, uint64_t b) { return ~(a^b); } __device__ __host__ static inline uint4 xnor_int128(uint4 a, uint4 b) { uint4 res; res.w = ~(a.w^b.w); res.x = ~(a.x^b.x); res.y = ~(a.y^b.y); res.z = ~(a.z^b.z); return res; } __device__ __host__ static inline ulonglong4 xnor_int256(ulonglong4 a, ulonglong4 b) { ulonglong4 res; res.w = ~(a.w^b.w); res.x = ~(a.x^b.x); res.y = ~(a.y^b.y); res.z = ~(a.z^b.z); return res; } /* // A (weights) in the shared_memory __global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int index = blockIdx.x*blockDim.x + threadIdx.x; __shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`] //__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`] int start_i = blockIdx.x*blockDim.x / N; int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1; size_t shared_size = lda * (end_i - start_i); int i_cur = index / N; int local_i = i_cur - start_i; for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) { int x = start_i*lda + k; if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8)); } //if (i_cur < M && (index % N == 0 || threadIdx.x == 0)) { //for (int k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] //*((uint64_t *)(A_s + (local_i*lda + k) / 8)) = *((uint64_t *)(A + (i_cur*lda + k) / 8)); // weights // } //} __syncthreads(); int i, j, k, h; j = index % N; { // out_h*out_w - one channel output size [169 - 173056] i = index / N; if (i < M) // l.n - filters [16 - 55 - 1024] { float mean_val = mean_arr[i]; int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] //uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = __popcll(c_bit64); if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; } C[i*ldc + j] = (2 * count - K) * mean_val; } } } #include <cstdio> void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { size_t size = M*N; const int num_blocks = size / BLOCK + 1; gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >( M, N, K, A, lda, B, ldb, C, ldc, mean_arr); } */ // -------------------------------- __inline__ __device__ int warpAllReduceSum(int val) { for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2) val += __shfl_xor(val, mask); return val; } // Coalesced memory access // A (weights) in the shared_memory - GOOD __global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr, float *bias_arr) { int index = blockIdx.x*blockDim.x + threadIdx.x; __shared__ uint8_t A_s[6144 * 8 / 4]; //__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`] //__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`] int start_i = blockIdx.x*blockDim.x / N; int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1; size_t shared_size = lda * (end_i - start_i); int i_cur = index / N; int local_i = i_cur - start_i; for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) { int x = start_i*lda + k; if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8)); } __syncthreads(); int i, j, k, h; j = index % N; { // out_h*out_w - one channel output size [169 - 173056] i = index / N; //if (i < M) // l.n - filters [16 - 55 - 1024] { int count = 0; k = 0; #ifdef NOT_USED // 32 thread X 256 bit = 8192 bit for (; k < (K - 8192); k += 8192) { // l.size*l.size*l.c - one filter size [27 - 9216] ulonglong4 c_bit256; //int64_t A_cur_index = (i*lda + k) / 8; int64_t A_cur_index = (local_i*lda + k) / 8; int64_t B_cur_index = (j*ldb + k) / 8; if (i >= M) A_cur_index = 0; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int64_t A_i = __shfl(A_cur_index, t) + 32 * lane_id; const int64_t B_i = __shfl(B_cur_index, t) + 32 * lane_id; { //ulonglong4 a_bit256 = *((ulonglong4 *)(A + A_i)); // weights ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + A_i)); // weights ulonglong4 b_bit256 = *((ulonglong4 *)(B + B_i)); // input c_bit256 = xnor_int256(a_bit256, b_bit256); int tmp_count = __popcll(c_bit256.w) + __popcll(c_bit256.x) + __popcll(c_bit256.y) + __popcll(c_bit256.z); int sum_count = warpAllReduceSum(tmp_count); if (lane_id == t) count += sum_count; } } } #endif //#ifdef NOT_USED // 32 thread X 64 bit = 2048 bit for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t c_bit64; //int64_t A_cur_index = (i*lda + k) / 8; int64_t A_cur_index = (local_i*lda + k) / 8; int64_t B_cur_index = (j*ldb + k) / 8; if (i >= M) A_cur_index = 0; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id; const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id; { //uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights uint64_t a_bit64 = *((uint64_t *)(A_s + A_i)); // weights uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = __popcll(c_bit64); int sum_count = warpAllReduceSum(tmp_count); if (lane_id == t) count += sum_count; } } } //#endif //#ifdef NOT_USED // 32 thread X 32 bit = 1024 bit for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216] //int64_t A_cur_index = (i*lda + k) / 8; int64_t A_cur_index = (local_i*lda + k) / 8; int64_t B_cur_index = (j*ldb + k) / 8; if (i >= M) A_cur_index = 0; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id; const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id; { //uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights uint32_t a_bit32 = *((uint32_t *)(A_s + A_i)); // weights uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32); int tmp_count = __popc(c_bit32); int sum_count = warpAllReduceSum(tmp_count); if (lane_id == t) count += sum_count; } } } //#endif if (i < M) { float mean_val = mean_arr[i]; float bias_val = bias_arr[i]; //#ifdef NOT_USED for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216] //ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + (local_i*lda + k) / 8)); // weights ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256); count += __popcll(c_bit256.w) + __popcll(c_bit256.x) + __popcll(c_bit256.y) + __popcll(c_bit256.z); } //#endif #ifdef NOT_USED for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] //uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); count += __popcll(c_bit64); } #endif const int bit_step = 256; int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[i*ldc + j] = (2 * count - K) *mean_val + bias_val; } } } } /* // Coalescing // B (input) in the shared_memory - GOOD __global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr, float *bias_arr) { int index = blockIdx.x*blockDim.x + threadIdx.x; __shared__ uint8_t B_s[4096*8]; // 32 KB // [ldb x N`] // max = 262 144 bits //__shared__ uint64_t B_s[4096]; // 32 KB // [ldb x N`] // max = 262 144 bits int start_j = blockIdx.x*blockDim.x / M; int end_j = (blockIdx.x*blockDim.x + blockDim.x) / M + 1; size_t shared_size = ldb * (end_j - start_j); int j_cur = index / M; int local_j = j_cur - start_j; for (int k = threadIdx.x * 256; k < shared_size; k += blockDim.x * 256) { int x = start_j*ldb + k; if (x < (N*ldb)) *((ulonglong4 *)(B_s + k / 8)) = *((ulonglong4 *)(B + x / 8)); } __syncthreads(); int i, j, k; i = index % M; // l.n - filters [16 - 55 - 1024] { j = index / M; // out_h*out_w - one channel output size [169 - 173056] if (j < N) { int count = 0; k = 0; //#ifdef NOT_USED // 32 thread X 64 bit = 2048 bit for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t c_bit64; int64_t A_cur_index = (i*lda + k) / 8; //int64_t B_cur_index = (j*ldb + k) / 8; int64_t B_cur_index = (local_j*ldb + k) / 8; if (i >= M) A_cur_index = 0; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id; const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id; { uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights //uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input uint64_t b_bit64 = *((uint64_t *)(B_s + B_i)); // input c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = __popcll(c_bit64); int sum_count = warpAllReduceSum(tmp_count); if (lane_id == t) count += sum_count; } } } //#endif //#ifdef NOT_USED // 32 thread X 32 bit = 1024 bit for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216] int64_t A_cur_index = (i*lda + k) / 8; //int64_t B_cur_index = (j*ldb + k) / 8; int64_t B_cur_index = (local_j*ldb + k) / 8; if (i >= M) A_cur_index = 0; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id; const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id; { uint32_t a_bit32 = *((uint32_t *)(A + A_i)); // weights //uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input uint32_t b_bit32 = *((uint32_t *)(B_s + B_i)); // input uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32); int tmp_count = __popc(c_bit32); int sum_count = warpAllReduceSum(tmp_count); if (lane_id == t) count += sum_count; } } } //#endif if (i < M) { float mean_val = mean_arr[i]; float bias_val = bias_arr[i]; //#ifdef NOT_USED for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216] ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights //ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input ulonglong4 b_bit256 = *((ulonglong4 *)(B_s + (local_j*ldb + k) / 8)); // input ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256); count += __popcll(c_bit256.w) + __popcll(c_bit256.x) + __popcll(c_bit256.y) + __popcll(c_bit256.z); } //#endif #ifdef NOT_USED for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights //uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input uint64_t b_bit64 = *((uint64_t *)(B_s + (local_j*ldb + k) / 8)); // input uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); count += __popcll(c_bit64); } #endif const int bit_step = 256; int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[i*ldc + j] = (2 * count - K) * mean_val + bias_val; } } } } */ // Coalesced memory access - GOOD void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr, float *bias) { size_t size = M*N; const int num_blocks = size / BLOCK + 1; /* printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n", size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024); printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512); */ //printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda); gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >( M, N, K, A, lda, B, ldb, C, ldc, mean_arr, bias); } // -------------------------------- // -------------------------------- // -------------------------------- // sequentially - B (input) in the shared_memory - BAD // -------------------------------- __global__ void gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { //__shared__ float mean_shared[32]; //__shared__ uint32_t B_s[8192]; // 32 KB // [ldb x N`] // max = 262 144 bits //__shared__ uint32_t B_s[4096]; // 16 KB // [ldb x N`] // max = 131 072 bits __shared__ uint8_t B_s[4096 * 4]; // 16 KB // [ldb x N`] // max = 131 072 bits const int K_items = WARP_SIZE; int start_j = blockIdx.x*blockDim.x / (K_items * M); { int end_j = (blockIdx.x*blockDim.x + blockDim.x) / (K_items * M) + 1; if (end_j > N) end_j = N; size_t shared_size = ldb * (end_j - start_j); if (shared_size != 0) { //if(threadIdx.x == 0) printf(" start_j = %d, end_j = %d, shared_size = %d \n", start_j, end_j, shared_size); int k; for (int k = threadIdx.x * 32; k < shared_size; k += blockDim.x * 32) { int x = start_j*ldb + k; if (x < (N*ldb)) *((uint32_t *)(B_s + k / 8)) = *((uint32_t *)(B + x / 8)); } } } __syncthreads(); int index = blockIdx.x*blockDim.x + threadIdx.x; { int i; // l.n int j; // out_h*out_w int k; // l.size * l.size * l.c const int index2 = index / K_items; i = index2 % M; // max M j = index2 / M; // max N int local_j = j - start_j; //if (i <= 1 && j <= 1 ) printf(" k = %d, K = %d, K_items = %d, i = %d, j = %d, lda = %d, ldb = %d, ldc = %d \n", // k, K, K_items, i, j, lda, ldb, ldc); { // l.n - filters [16 - 55 - 1024] // further improvements: for (l.n == 1024) iterate several (j) if (j < N) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; const int bit_step = 32; for (k = (threadIdx.x % WARP_SIZE) * bit_step; k < K; k += bit_step*WARP_SIZE) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216] uint32_t a_bit32 = *((uint32_t *)(A + (i*lda + k) / 8)); // weights //uint32_t b_bit32 = *((uint32_t *)(B + (j*ldb + k) / 8)); // input uint32_t b_bit32 = *((uint32_t *)(B_s + (local_j*ldb + k) / 8)); // input uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32); count += __popc(c_bit32); } for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) count += __shfl_down(count, offset); if (threadIdx.x % WARP_SIZE == 0) { int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; float mean_val = mean_arr[i]; C[i*ldc + j] = (2 * count - K) * mean_val; //B_s[threadIdx.x / WARP_SIZE] = (2 * count - K) * mean_val; } } } } } // sequentially - BAD void gemm_nn_custom_bin_mean_transposed_sequentially_gpu(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { //size_t size = M*N; size_t size = M*N * 32; const int num_blocks = size / BLOCK + 1; //printf(" K = %d \n", K); /* printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n", size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024); printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512); */ //printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda); gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >( M, N, K, A, lda, B, ldb, C, ldc, mean_arr); } // --------------------------------
8369c12fca7c51f7cf174ea36c28c1082e4a94f5.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "additionally.h" #include "gpu.h" extern int gpu_index; #define BLOCK 512 void pull_batchnorm_layer(layer l) {} // not required now void push_batchnorm_layer(layer l) {} // not required now void pull_local_layer(local_layer l) {} // not required now void push_local_layer(local_layer l) {} // not required now void pull_connected_layer(local_layer l) {} // not required now void push_connected_layer(local_layer l) {} // not required now void check_error(cudaError_t status) { //cudaDeviceSynchronize(); cudaError_t status2 = cudaGetLastError(); if (status != cudaSuccess) { const char *s = cudaGetErrorString(status); char buffer[256]; printf("CUDA Error: %s\n", s); assert(0); snprintf(buffer, 256, "CUDA Error: %s", s); error(buffer); } if (status2 != cudaSuccess) { const char *s = cudaGetErrorString(status); char buffer[256]; printf("CUDA Error Prev: %s\n", s); assert(0); snprintf(buffer, 256, "CUDA Error Prev: %s", s); error(buffer); } } void cuda_set_device(int n) { gpu_index = n; cudaError_t status = cudaSetDevice(n); check_error(status); } int cuda_get_device() { int n = 0; cudaError_t status = cudaGetDevice(&n); check_error(status); return n; } #ifdef CUDNN cudnnHandle_t cudnn_handle() { static int init[16] = { 0 }; static cudnnHandle_t handle[16]; int i = cuda_get_device(); if (!init[i]) { cudnnCreate(&handle[i]); init[i] = 1; } return handle[i]; } #endif float *cuda_make_array(float *x, size_t n) { float *x_gpu; size_t size = sizeof(float)*n; cudaError_t status = cudaMalloc((void **)&x_gpu, size); check_error(status); if (x) { status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); check_error(status); } if (!x_gpu) error("Cuda malloc failed\n"); return x_gpu; } int *cuda_make_int_array(size_t n) { int *x_gpu; size_t size = sizeof(int)*n; cudaError_t status = cudaMalloc((void **)&x_gpu, size); check_error(status); return x_gpu; } void cuda_free(float *x_gpu) { cudaError_t status = cudaFree(x_gpu); check_error(status); } void cuda_push_array(float *x_gpu, float *x, size_t n) { size_t size = sizeof(float)*n; cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); check_error(status); } void cuda_pull_array(float *x_gpu, float *x, size_t n) { size_t size = sizeof(float)*n; cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost); check_error(status); } float *get_network_output_layer_gpu(network net, int i) { layer l = net.layers[i]; if (l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); return l.output; } float *get_network_output_gpu(network net) { int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return get_network_output_layer_gpu(net, i); } dim3 cuda_gridsize(size_t n) { size_t k = (n - 1) / BLOCK + 1; size_t x = k; size_t y = 1; if (x > 65535) { x = ceil(sqrtf(k)); y = (n - 1) / (x*BLOCK) + 1; } dim3 d; d.x = x; d.y = y; d.z = 1; //printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK); return d; } void pull_convolutional_layer(convolutional_layer layer) { cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size); cuda_pull_array(layer.biases_gpu, layer.biases, layer.n); if (layer.batch_normalize) { cuda_pull_array(layer.scales_gpu, layer.scales, layer.n); cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } } void push_convolutional_layer(convolutional_layer layer) { cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size); cuda_push_array(layer.biases_gpu, layer.biases, layer.n); if (layer.batch_normalize) { cuda_push_array(layer.scales_gpu, layer.scales, layer.n); cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } } // -------------------- CUDA functions ------------------- // add BIAS __global__ void add_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if (offset < size) output[(batch*n + filter)*size + offset] += biases[filter]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size - 1) / BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); add_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size); check_error(cudaPeekAtLastError()); } // normalization __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index / spatial) % filters; x[index] = (x[index] - mean[f]) / (sqrtf(variance[f]) + .000001f); } void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; normalize_kernel << <cuda_gridsize(N), BLOCK >> >(N, x, mean, variance, batch, filters, spatial); check_error(cudaPeekAtLastError()); } // fill array __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i*INCX] = ALPHA; } void fill_ongpu(int N, float ALPHA, float * X, int INCX) { fill_kernel << <cuda_gridsize(N), BLOCK >> >(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } // scale BIAS __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if (offset < size) output[(batch*n + filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size - 1) / BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); scale_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size); check_error(cudaPeekAtLastError()); } // max-pool layer __global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes) { int h = (in_h + pad - size) / stride + 1; int w = (in_w + pad - size) / stride + 1; int c = in_c; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -pad / 2; int h_offset = -pad / 2; int out_index = j + w*(i + h*(k + c*b)); float max = -INFINITY; int max_i = -1; int l, m; for (l = 0; l < size; ++l) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + l; int cur_w = w_offset + j*stride + m; int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c)); int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w); float val = (valid != 0) ? input[index] : -INFINITY; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } output[out_index] = max; indexes[out_index] = max_i; } void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state) { if (layer.stride == layer.size) { //if(1) { cudnnStatus_t maxpool_status; float alpha = 1, beta = 0; maxpool_status = cudnnPoolingForward( cudnn_handle(), layer.poolingDesc, &alpha, layer.srcTensorDesc, state.input, &beta, layer.dstTensorDesc, layer.output_gpu); //maxpool_status = cudnnDestroyPoolingDescriptor(poolingDesc); //cudnnDestroyTensorDescriptor(layer.srcTensorDesc); //cudnnDestroyTensorDescriptor(layer.dstTensorDesc); } else { int h = layer.out_h; int w = layer.out_w; int c = layer.c; size_t n = h*w*c*layer.batch; forward_maxpool_layer_kernel << <cuda_gridsize(n), BLOCK >> > (n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu); check_error(cudaPeekAtLastError()); } } // flatten __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int in_s = i%spatial; i = i / spatial; int in_c = i%layers; i = i / layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out) { int size = spatial*batch*layers; flatten_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, spatial, layers, batch, forward, out); check_error(cudaPeekAtLastError()); } // activations __device__ float lhtan_activate_kernel(float x) { if (x < 0) return .001*x; if (x > 1) return .001*(x - 1) + 1; return x; } __device__ float lhtan_gradient_kernel(float x) { if (x > 0 && x < 1) return 1; return .001; } __device__ float hardtan_activate_kernel(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } __device__ float linear_activate_kernel(float x) { return x; } __device__ float logistic_activate_kernel(float x) { return 1. / (1. + exp(-x)); } __device__ float loggy_activate_kernel(float x) { return 2. / (1. + exp(-x)) - 1; } __device__ float relu_activate_kernel(float x) { return x*(x>0); } __device__ float elu_activate_kernel(float x) { return (x >= 0)*x + (x < 0)*(exp(x) - 1); } __device__ float relie_activate_kernel(float x) { return (x>0) ? x : .01*x; } __device__ float ramp_activate_kernel(float x) { return x*(x>0) + .1*x; } __device__ float leaky_activate_kernel(float x) { return (x>0) ? x : .1*x; } __device__ float tanh_activate_kernel(float x) { return (2 / (1 + exp(-2 * x)) - 1); } __device__ float plse_activate_kernel(float x) { if (x < -4) return .01 * (x + 4); if (x > 4) return .01 * (x - 4) + 1; return .125*x + .5; } __device__ float stair_activate_kernel(float x) { int n = floor(x); if (n % 2 == 0) return floor(x / 2.); else return (x - n) + floor(x / 2.); } __device__ float activate_kernel(float x, ACTIVATION a) { switch (a) { case LINEAR: return linear_activate_kernel(x); case LOGISTIC: return logistic_activate_kernel(x); case LOGGY: return loggy_activate_kernel(x); case RELU: return relu_activate_kernel(x); case ELU: return elu_activate_kernel(x); case RELIE: return relie_activate_kernel(x); case RAMP: return ramp_activate_kernel(x); case LEAKY: return leaky_activate_kernel(x); case TANH: return tanh_activate_kernel(x); case PLSE: return plse_activate_kernel(x); case STAIR: return stair_activate_kernel(x); case HARDTAN: return hardtan_activate_kernel(x); case LHTAN: return lhtan_activate_kernel(x); } return 0; } __global__ void activate_array_kernel(float *x, int n, ACTIVATION a) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < n) x[i] = activate_kernel(x[i], a); } __global__ void activate_array_leaky_kernel(float *x, int n) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < n) { float val = x[index]; x[index] = (val > 0) ? val : val / 10; } } extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a) { if (a == LEAKY) activate_array_leaky_kernel << <(n / BLOCK + 1), BLOCK, 0, 0 >> >(x, n); else activate_array_kernel << <cuda_gridsize(n), BLOCK, 0, 0 >> >(x, n, a); check_error(cudaPeekAtLastError()); } // softmax layer __device__ void softmax_device(int n, float *input, float temp, float *output) { int i; float sum = 0; float largest = -INFINITY; for (i = 0; i < n; ++i) { int val = input[i]; largest = (val>largest) ? val : largest; } for (i = 0; i < n; ++i) { float e = expf(input[i] / temp - largest / temp); sum += e; output[i] = e; } for (i = 0; i < n; ++i) { output[i] /= sum; } } __global__ void softmax_kernel(int n, int offset, int batch, float *input, float temp, float *output) { int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (b >= batch) return; softmax_device(n, input + b*offset, temp, output + b*offset); } void softmax_gpu(float *input, int n, int offset, int groups, float temp, float *output) { int inputs = n; int batch = groups; softmax_kernel << <cuda_gridsize(batch), BLOCK >> >(inputs, offset, batch, input, temp, output); check_error(cudaPeekAtLastError()); } // reorg layer __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int in_index = i; int in_w = i%w; i = i / w; int in_h = i%h; i = i / h; int in_c = i%c; i = i / c; int b = i%batch; int out_c = c / (stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); out[in_index] = x[out_index]; } void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int size = w*h*c*batch; reorg_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, w, h, c, batch, stride, forward, out); check_error(cudaPeekAtLastError()); } // upsample layer __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int out_index = i; int out_w = i % (w*stride); i = i / (w*stride); int out_h = i % (h*stride); i = i / (h*stride); int out_c = i%c; i = i / c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if (forward) out[out_index] += scale * x[in_index]; else atomicAdd(x + in_index, scale * out[out_index]); } extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t size = w*h*c*batch*stride*stride; upsample_kernel << <cuda_gridsize(size), BLOCK >> >(size, in, w, h, c, batch, stride, forward, scale, out); check_error(cudaPeekAtLastError()); } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { copy_kernel << <cuda_gridsize(N), BLOCK>> >(N, X, OFFX, INCX, Y, OFFY, INCY); check_error(cudaPeekAtLastError()); } extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY) { copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY); } // shortcut layer __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] += add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int stride = w1 / w2; int sample = w2 / w1; assert(stride == h1 / h2); assert(sample == h2 / h1); if (stride < 1) stride = 1; if (sample < 1) sample = 1; int size = batch * minw * minh * minc; shortcut_kernel << <cuda_gridsize(size), BLOCK>> >(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out); check_error(cudaPeekAtLastError()); } // ----------- Quantinization -------------- __host__ __device__ int max_abs(int src, int max_val) { if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val; return src; } __global__ void cuda_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_int8[idx] = max_abs(input_f32[idx] * multipler, max_val); // 7-bit (1-bit sign) } void cuda_convert_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val) { cuda_f32_to_int8 << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler, max_val); } __global__ void cuda_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_int8[idx] = input_f32[idx] * multipler; // 7-bit (1-bit sign) } void cuda_convert_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler) { cuda_f32_to_int8_nomax << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler); } __global__ void cuda_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_f32[idx] = input_int8[idx] * multipler; // 7-bit (1-bit sign) } void cuda_convert_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler) { cuda_int8_to_f32 << < size / BLOCK + 1, BLOCK >> >(input_int8, size, output_f32, multipler); } __global__ void cuda_multiply_f32(float *input_output, size_t size, float multipler) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) input_output[idx] = input_output[idx] * multipler; // 7-bit (1-bit sign) } void cuda_do_multiply_f32(float *input_output, size_t size, float multipler) { cuda_multiply_f32 << < size / BLOCK + 1, BLOCK >> >(input_output, size, multipler); } // -------------------------------- // ------------- XNOR ------------- // -------------------------------- __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for (i = 0; i < size; ++i) { mean += fabs(weights[f*size + i]); } mean = mean / size; for (i = 0; i < size; ++i) { binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { binarize_weights_kernel << <cuda_gridsize(n), BLOCK >> >(weights, n, size, binary); check_error(cudaPeekAtLastError()); } // -------------------------------- __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { binarize_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, binary); check_error(cudaPeekAtLastError()); } // -------------------------------- void swap_binary(convolutional_layer *l) { float *swap = l->weights; l->weights = l->binary_weights; l->binary_weights = swap; #ifdef GPU swap = l->weights_gpu; l->weights_gpu = l->binary_weights_gpu; l->binary_weights_gpu = swap; #endif } // -------------------------------- #define WARP_SIZE 32 __global__ void im2col_align_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col, const int bit_align) { int index = blockIdx.x*blockDim.x + threadIdx.x; for (; index < n; index += blockDim.x*gridDim.x) { int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; float* data_col_ptr = data_col; //data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; data_col_ptr += channel_out * bit_align + h_out * width_col + w_out; float* data_col_ptr_32 = data_col + (channel_out * bit_align + h_out * width_col + w_out) / 32; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ksize; ++i) { for (int j = 0; j < ksize; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; //float src_val = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; //unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0); //if (threadIdx.x % WARP_SIZE == 0) *((unsigned int*)data_col_ptr_32) = bit_mask; //data_col_ptr_32 += bit_align / 32; //data_col_ptr += height_col * width_col; data_col_ptr += bit_align; } } } } void im2col_align_ongpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col, int bit_align) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height_col * width_col; im2col_align_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK, BLOCK, 0, 0>> >( num_kernels, im, height, width, ksize, pad, stride, height_col, width_col, data_col, bit_align); } // -------------------------------- // binary im2col - stride=1 __global__ void im2col_align_bin_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int channels, const int pad, const int stride, const int height_col, const int width_col, float *data_col, const int bit_align) { __shared__ float tmp_s[1]; __shared__ ulonglong4 tmp256_s[1]; //#define SHRED_VALS ((BLOCK / 169) * ) //__shared__ float dst_s[1024]; //__shared__ float dst_s[1024]; //__shared__ uint32_t bit_s[32]; //__shared__ uint8_t bit_s[128]; int index = blockIdx.x*blockDim.x + threadIdx.x; //for (; index < n; index += blockDim.x*gridDim.x) { int c_index = index; int channel_in = c_index % channels; //int h_out = index % height_col; //int c_index = index / height_col; //int channel_in = c_index % channels; int channel_out = channel_in * ksize * ksize; int j_index = c_index / channels; int j = j_index % ksize; int i = j_index / ksize; int pre_out_index = (channel_out + i*ksize + j) * bit_align; int j_pad = (j - pad); int i_pad = (i - pad); for (int wh_index = 0; wh_index < (height_col*width_col); wh_index += 32) //for (int h_out = 0; h_out < height_col; ++h_out) { // the end of padding //if(0) //for (int w_out = 0; w_out < (width_col); w_out += 32) { const int w_out = wh_index % width_col; const int h_out = wh_index / width_col; const int w = w_out + j_pad; const int h = h_out + i_pad; int pre_in_index = channel_in * height * width; int pre_in_wh_index = h * width + w; int send_wh_index = wh_index; if (i >= ksize) send_wh_index = height_col*width_col; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int cur_wh_index = __shfl(send_wh_index, t) + lane_id; if (cur_wh_index < (width_col*height_col))// && (cur_i_pad+pad) < ksize) { const int cur_pre_out_index = __shfl(pre_out_index, t); const int cur_pre_in_index = __shfl(pre_in_index, t); const int cur_pre_in_wh_index = __shfl(pre_in_wh_index, t) + lane_id; int w = cur_pre_in_wh_index % width; int h = cur_pre_in_wh_index / width; int in_index = cur_pre_in_index + cur_pre_in_wh_index; int out_index = cur_pre_out_index + cur_wh_index; float val = (w >= 0 && w < width && h >= 0 && h < height) ? data_im[in_index] : float(); //data_col[out_index] = val; //tmp_s[0] = val; uint32_t bit_mask = __ballot(val > 0); if (lane_id == 0) { uint8_t *bit8_ptr = &(((uint8_t *)data_col)[out_index / 8]); uint32_t *bit32_ptr = (uint32_t *)bit8_ptr; *bit32_ptr = bit_mask; } } } }// w_out } } } void im2col_align_bin_ongpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col, int bit_align) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; //int num_kernels = channels * height_col * width_col * ksize * ksize; //int num_kernels = channels * ksize * ksize * height_col; int num_kernels = channels * ksize * ksize; int num_blocks = num_kernels / BLOCK + 1; //im2col_align_bin_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK, im2col_align_bin_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >( num_kernels, im, height, width, ksize, channels, pad, stride, height_col, width_col, data_col, bit_align); } // -------------------------------- __global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t size) { //const int size_aligned = size + (WARP_SIZE - size % WARP_SIZE); int index = blockIdx.x*blockDim.x + threadIdx.x; float src_val; //for (; index < size_aligned; index += blockDim.x*gridDim.x) { //src_val = src[index]; if (index < size) src_val = src[index]; else src_val = 0; //unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0); unsigned int bit_mask = __ballot(src_val > 0); if (threadIdx.x % WARP_SIZE == 0) ((unsigned int*)dst)[index / 32] = bit_mask; } } void float_to_bit_gpu(float *src, unsigned char *dst, size_t size) { const int num_blocks = size / BLOCK + 1; float_to_bit_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, dst, size); } // -------------------------------- __device__ __host__ static inline void remove_bit(unsigned char *const dst, size_t index) { size_t dst_i = index / 8; int dst_shift = index % 8; dst[dst_i] &= ~(1 << dst_shift); } __device__ __host__ static inline void set_bit(unsigned char *const dst, size_t index) { size_t dst_i = index / 8; int dst_shift = index % 8; dst[dst_i] |= 1 << dst_shift; //dst[dst_i] |= 1 << (8 - dst_shift); } __device__ __host__ static inline unsigned char get_bit(unsigned char const*const src, size_t index) { size_t src_i = index / 8; int src_shift = index % 8; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; //unsigned char val = (src[src_i] & (1 << (8 - src_shift))) > 0; return val; } // Intel CPUs and nVidia CUDA GPU are little endian __device__ __host__ unsigned char reverse_byte(unsigned char a) { return ((a & 0x1) << 7) | ((a & 0x2) << 5) | ((a & 0x4) << 3) | ((a & 0x8) << 1) | ((a & 0x10) >> 1) | ((a & 0x20) >> 3) | ((a & 0x40) >> 5) | ((a & 0x80) >> 7); } __device__ unsigned char reverse_byte_CUDA(unsigned char a) { uint32_t tmp = __brev(a); return tmp >> 24; } __device__ __host__ unsigned char reverse_byte_2(unsigned char a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } __device__ void transpose8rS32_reversed_diagonale(unsigned char* A, int m, int n, unsigned char* B) { unsigned x, y, t; // Load the array and pack it into x and y. x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]; y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]; t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7); t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7); t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14); t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14); t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F); y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F); x = t; B[7 * n] = reverse_byte_CUDA(x >> 24); B[6 * n] = reverse_byte_CUDA(x >> 16); B[5 * n] = reverse_byte_CUDA(x >> 8); B[4 * n] = reverse_byte_CUDA(x); B[3 * n] = reverse_byte_CUDA(y >> 24); B[2 * n] = reverse_byte_CUDA(y >> 16); B[1 * n] = reverse_byte_CUDA(y >> 8); B[0 * n] = reverse_byte_CUDA(y); } __global__ void transpose_bin_gpu_kernel(unsigned char *A, unsigned char *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; int index = blockIdx.x*blockDim.x + threadIdx.x; //for (i = 0; i < n; i += 8) { i = (index * 8) % n; int j; //for (j = 0; j < m - 8; j += 8) { j = ((index * 8) / n) * 8; if (j < m - 8) { int a_index = i*lda + j; int b_index = j*ldb + i; transpose8rS32_reversed_diagonale(&A[a_index / 8], lda / 8, ldb / 8, &B[b_index / 8]); } else if (j < m) { for (; j < m; ++j) { if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i); else remove_bit(B, j*ldb + i); } } } } } __device__ __host__ uint8_t reverse_8_bit(uint8_t a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } __device__ uint32_t reverse_32_bit(uint32_t a) { // __device__ ​ unsigned int __brev(unsigned int x) // CUDA // unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input)); return __brev(a); //return (reverse_8_bit(a >> 24) << 0) | // (reverse_8_bit(a >> 16) << 8) | // (reverse_8_bit(a >> 8) << 16) | // (reverse_8_bit(a >> 0) << 24); } #define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j); __device__ void transpose32_optimized(uint32_t A[32]) { int j, k; unsigned m, t; //m = 0x0000FFFF; //for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) { // for (k = 0; k < 32; k = (k + j + 1) & ~j) { // t = (A[k] ^ (A[k + j] >> j)) & m; // A[k] = A[k] ^ t; // A[k + j] = A[k + j] ^ (t << j); // } //} j = 16; m = 0x0000FFFF; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 8; m = 0x00ff00ff; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 4; m = 0x0f0f0f0f; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 2; m = 0x33333333; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 1; m = 0x55555555; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } // reverse Y for (j = 0; j < 16; ++j) { uint32_t tmp = A[j]; A[j] = reverse_32_bit(A[31 - j]); A[31 - j] = reverse_32_bit(tmp); } } #define BLOCK_TRANSPOSE32 256 __device__ void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n) { //unsigned A_tmp[32]; //int i; //#pragma unroll //for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m]; //transpose32_optimized(A_tmp); //#pragma unroll //for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i]; __shared__ uint32_t A_shared[32 * BLOCK_TRANSPOSE32]; uint32_t *A_tmp = &A_shared[32 * threadIdx.x]; int i; #pragma unroll 32 for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m]; transpose32_optimized(A_tmp); #pragma unroll 32 for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i]; } // transpose 32x32 bit __global__ void transpose_bin_gpu_kernel_32(uint32_t *A, uint32_t *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; int index = (blockIdx.x*blockDim.x + threadIdx.x) * 32; //for (i = 0; i < n; i += 8) { i = index % n; int j; //for (j = 0; j < m - 8; j += 8) { j = (index / n) * 32; if (j < m) { int a_index = i*lda + j; int b_index = j*ldb + i; transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32); } } } } void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const int m, const int lda, const int ldb, const int block_size) { size_t size = n*m / (8 * 8) + 1; size_t size32 = n*m / (32 * 32) + 1; const int num_blocks = size / BLOCK + 1; const int num_blocks32 = size32 / BLOCK_TRANSPOSE32 + 1; transpose_bin_gpu_kernel_32 << <num_blocks32, BLOCK_TRANSPOSE32, 0, 0 >> >((uint32_t *)A, (uint32_t *)B, n, m, lda, ldb, block_size); //transpose_bin_gpu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(A, B, n, m, lda, ldb, block_size); } // -------------------------------- __global__ void fill_int8_gpu_kernel(unsigned char *src, unsigned char val, size_t size) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < size) src[index] = 0; } void fill_int8_gpu(unsigned char *src, unsigned char val, size_t size) { const int num_blocks = size / BLOCK + 1; fill_int8_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, val, size); } // -------------------------------- //typedef unsigned long long int uint64_t; //typedef unsigned int uint32_t; //typedef unsigned char uint8_t; //typedef char int8_t; __device__ __host__ static inline uint64_t broadcast_bit_1_to_64(uint8_t src) { return (src > 0) ? 0xFFFFFFFFFFFFFFFF : 0; } __device__ __host__ static inline uint8_t xnor_bit1(uint8_t a, uint8_t b) { return ~(a^b) & 0b1; } __device__ __host__ static inline uint32_t xnor_int32(uint32_t a, uint32_t b) { return ~(a^b); } __device__ __host__ static inline uint64_t xnor_int64(uint64_t a, uint64_t b) { return ~(a^b); } __device__ __host__ static inline uint4 xnor_int128(uint4 a, uint4 b) { uint4 res; res.w = ~(a.w^b.w); res.x = ~(a.x^b.x); res.y = ~(a.y^b.y); res.z = ~(a.z^b.z); return res; } __device__ __host__ static inline ulonglong4 xnor_int256(ulonglong4 a, ulonglong4 b) { ulonglong4 res; res.w = ~(a.w^b.w); res.x = ~(a.x^b.x); res.y = ~(a.y^b.y); res.z = ~(a.z^b.z); return res; } /* // A (weights) in the shared_memory __global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int index = blockIdx.x*blockDim.x + threadIdx.x; __shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`] //__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`] int start_i = blockIdx.x*blockDim.x / N; int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1; size_t shared_size = lda * (end_i - start_i); int i_cur = index / N; int local_i = i_cur - start_i; for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) { int x = start_i*lda + k; if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8)); } //if (i_cur < M && (index % N == 0 || threadIdx.x == 0)) { //for (int k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] //*((uint64_t *)(A_s + (local_i*lda + k) / 8)) = *((uint64_t *)(A + (i_cur*lda + k) / 8)); // weights // } //} __syncthreads(); int i, j, k, h; j = index % N; { // out_h*out_w - one channel output size [169 - 173056] i = index / N; if (i < M) // l.n - filters [16 - 55 - 1024] { float mean_val = mean_arr[i]; int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] //uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = __popcll(c_bit64); if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; } C[i*ldc + j] = (2 * count - K) * mean_val; } } } #include <cstdio> void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { size_t size = M*N; const int num_blocks = size / BLOCK + 1; gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >( M, N, K, A, lda, B, ldb, C, ldc, mean_arr); } */ // -------------------------------- __inline__ __device__ int warpAllReduceSum(int val) { for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2) val += __shfl_xor(val, mask); return val; } // Coalesced memory access // A (weights) in the shared_memory - GOOD __global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr, float *bias_arr) { int index = blockIdx.x*blockDim.x + threadIdx.x; __shared__ uint8_t A_s[6144 * 8 / 4]; //__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`] //__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`] int start_i = blockIdx.x*blockDim.x / N; int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1; size_t shared_size = lda * (end_i - start_i); int i_cur = index / N; int local_i = i_cur - start_i; for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) { int x = start_i*lda + k; if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8)); } __syncthreads(); int i, j, k, h; j = index % N; { // out_h*out_w - one channel output size [169 - 173056] i = index / N; //if (i < M) // l.n - filters [16 - 55 - 1024] { int count = 0; k = 0; #ifdef NOT_USED // 32 thread X 256 bit = 8192 bit for (; k < (K - 8192); k += 8192) { // l.size*l.size*l.c - one filter size [27 - 9216] ulonglong4 c_bit256; //int64_t A_cur_index = (i*lda + k) / 8; int64_t A_cur_index = (local_i*lda + k) / 8; int64_t B_cur_index = (j*ldb + k) / 8; if (i >= M) A_cur_index = 0; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int64_t A_i = __shfl(A_cur_index, t) + 32 * lane_id; const int64_t B_i = __shfl(B_cur_index, t) + 32 * lane_id; { //ulonglong4 a_bit256 = *((ulonglong4 *)(A + A_i)); // weights ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + A_i)); // weights ulonglong4 b_bit256 = *((ulonglong4 *)(B + B_i)); // input c_bit256 = xnor_int256(a_bit256, b_bit256); int tmp_count = __popcll(c_bit256.w) + __popcll(c_bit256.x) + __popcll(c_bit256.y) + __popcll(c_bit256.z); int sum_count = warpAllReduceSum(tmp_count); if (lane_id == t) count += sum_count; } } } #endif //#ifdef NOT_USED // 32 thread X 64 bit = 2048 bit for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t c_bit64; //int64_t A_cur_index = (i*lda + k) / 8; int64_t A_cur_index = (local_i*lda + k) / 8; int64_t B_cur_index = (j*ldb + k) / 8; if (i >= M) A_cur_index = 0; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id; const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id; { //uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights uint64_t a_bit64 = *((uint64_t *)(A_s + A_i)); // weights uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = __popcll(c_bit64); int sum_count = warpAllReduceSum(tmp_count); if (lane_id == t) count += sum_count; } } } //#endif //#ifdef NOT_USED // 32 thread X 32 bit = 1024 bit for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216] //int64_t A_cur_index = (i*lda + k) / 8; int64_t A_cur_index = (local_i*lda + k) / 8; int64_t B_cur_index = (j*ldb + k) / 8; if (i >= M) A_cur_index = 0; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id; const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id; { //uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights uint32_t a_bit32 = *((uint32_t *)(A_s + A_i)); // weights uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32); int tmp_count = __popc(c_bit32); int sum_count = warpAllReduceSum(tmp_count); if (lane_id == t) count += sum_count; } } } //#endif if (i < M) { float mean_val = mean_arr[i]; float bias_val = bias_arr[i]; //#ifdef NOT_USED for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216] //ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + (local_i*lda + k) / 8)); // weights ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256); count += __popcll(c_bit256.w) + __popcll(c_bit256.x) + __popcll(c_bit256.y) + __popcll(c_bit256.z); } //#endif #ifdef NOT_USED for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] //uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); count += __popcll(c_bit64); } #endif const int bit_step = 256; int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[i*ldc + j] = (2 * count - K) *mean_val + bias_val; } } } } /* // Coalescing // B (input) in the shared_memory - GOOD __global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr, float *bias_arr) { int index = blockIdx.x*blockDim.x + threadIdx.x; __shared__ uint8_t B_s[4096*8]; // 32 KB // [ldb x N`] // max = 262 144 bits //__shared__ uint64_t B_s[4096]; // 32 KB // [ldb x N`] // max = 262 144 bits int start_j = blockIdx.x*blockDim.x / M; int end_j = (blockIdx.x*blockDim.x + blockDim.x) / M + 1; size_t shared_size = ldb * (end_j - start_j); int j_cur = index / M; int local_j = j_cur - start_j; for (int k = threadIdx.x * 256; k < shared_size; k += blockDim.x * 256) { int x = start_j*ldb + k; if (x < (N*ldb)) *((ulonglong4 *)(B_s + k / 8)) = *((ulonglong4 *)(B + x / 8)); } __syncthreads(); int i, j, k; i = index % M; // l.n - filters [16 - 55 - 1024] { j = index / M; // out_h*out_w - one channel output size [169 - 173056] if (j < N) { int count = 0; k = 0; //#ifdef NOT_USED // 32 thread X 64 bit = 2048 bit for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t c_bit64; int64_t A_cur_index = (i*lda + k) / 8; //int64_t B_cur_index = (j*ldb + k) / 8; int64_t B_cur_index = (local_j*ldb + k) / 8; if (i >= M) A_cur_index = 0; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int64_t A_i = __shfl(A_cur_index, t) + 8 * lane_id; const int64_t B_i = __shfl(B_cur_index, t) + 8 * lane_id; { uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights //uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input uint64_t b_bit64 = *((uint64_t *)(B_s + B_i)); // input c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = __popcll(c_bit64); int sum_count = warpAllReduceSum(tmp_count); if (lane_id == t) count += sum_count; } } } //#endif //#ifdef NOT_USED // 32 thread X 32 bit = 1024 bit for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216] int64_t A_cur_index = (i*lda + k) / 8; //int64_t B_cur_index = (j*ldb + k) / 8; int64_t B_cur_index = (local_j*ldb + k) / 8; if (i >= M) A_cur_index = 0; #pragma unroll for (int t = 0; t < WARP_SIZE; ++t) { const int lane_id = threadIdx.x % WARP_SIZE; const int64_t A_i = __shfl(A_cur_index, t) + 4 * lane_id; const int64_t B_i = __shfl(B_cur_index, t) + 4 * lane_id; { uint32_t a_bit32 = *((uint32_t *)(A + A_i)); // weights //uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input uint32_t b_bit32 = *((uint32_t *)(B_s + B_i)); // input uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32); int tmp_count = __popc(c_bit32); int sum_count = warpAllReduceSum(tmp_count); if (lane_id == t) count += sum_count; } } } //#endif if (i < M) { float mean_val = mean_arr[i]; float bias_val = bias_arr[i]; //#ifdef NOT_USED for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216] ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights //ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input ulonglong4 b_bit256 = *((ulonglong4 *)(B_s + (local_j*ldb + k) / 8)); // input ulonglong4 c_bit256 = xnor_int256(a_bit256, b_bit256); count += __popcll(c_bit256.w) + __popcll(c_bit256.x) + __popcll(c_bit256.y) + __popcll(c_bit256.z); } //#endif #ifdef NOT_USED for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights //uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input uint64_t b_bit64 = *((uint64_t *)(B_s + (local_j*ldb + k) / 8)); // input uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); count += __popcll(c_bit64); } #endif const int bit_step = 256; int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[i*ldc + j] = (2 * count - K) * mean_val + bias_val; } } } } */ // Coalesced memory access - GOOD void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr, float *bias) { size_t size = M*N; const int num_blocks = size / BLOCK + 1; /* printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n", size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024); printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512); */ //printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda); gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >( M, N, K, A, lda, B, ldb, C, ldc, mean_arr, bias); } // -------------------------------- // -------------------------------- // -------------------------------- // sequentially - B (input) in the shared_memory - BAD // -------------------------------- __global__ void gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { //__shared__ float mean_shared[32]; //__shared__ uint32_t B_s[8192]; // 32 KB // [ldb x N`] // max = 262 144 bits //__shared__ uint32_t B_s[4096]; // 16 KB // [ldb x N`] // max = 131 072 bits __shared__ uint8_t B_s[4096 * 4]; // 16 KB // [ldb x N`] // max = 131 072 bits const int K_items = WARP_SIZE; int start_j = blockIdx.x*blockDim.x / (K_items * M); { int end_j = (blockIdx.x*blockDim.x + blockDim.x) / (K_items * M) + 1; if (end_j > N) end_j = N; size_t shared_size = ldb * (end_j - start_j); if (shared_size != 0) { //if(threadIdx.x == 0) printf(" start_j = %d, end_j = %d, shared_size = %d \n", start_j, end_j, shared_size); int k; for (int k = threadIdx.x * 32; k < shared_size; k += blockDim.x * 32) { int x = start_j*ldb + k; if (x < (N*ldb)) *((uint32_t *)(B_s + k / 8)) = *((uint32_t *)(B + x / 8)); } } } __syncthreads(); int index = blockIdx.x*blockDim.x + threadIdx.x; { int i; // l.n int j; // out_h*out_w int k; // l.size * l.size * l.c const int index2 = index / K_items; i = index2 % M; // max M j = index2 / M; // max N int local_j = j - start_j; //if (i <= 1 && j <= 1 ) printf(" k = %d, K = %d, K_items = %d, i = %d, j = %d, lda = %d, ldb = %d, ldc = %d \n", // k, K, K_items, i, j, lda, ldb, ldc); { // l.n - filters [16 - 55 - 1024] // further improvements: for (l.n == 1024) iterate several (j) if (j < N) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; const int bit_step = 32; for (k = (threadIdx.x % WARP_SIZE) * bit_step; k < K; k += bit_step*WARP_SIZE) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216] uint32_t a_bit32 = *((uint32_t *)(A + (i*lda + k) / 8)); // weights //uint32_t b_bit32 = *((uint32_t *)(B + (j*ldb + k) / 8)); // input uint32_t b_bit32 = *((uint32_t *)(B_s + (local_j*ldb + k) / 8)); // input uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32); count += __popc(c_bit32); } for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) count += __shfl_down(count, offset); if (threadIdx.x % WARP_SIZE == 0) { int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; float mean_val = mean_arr[i]; C[i*ldc + j] = (2 * count - K) * mean_val; //B_s[threadIdx.x / WARP_SIZE] = (2 * count - K) * mean_val; } } } } } // sequentially - BAD void gemm_nn_custom_bin_mean_transposed_sequentially_gpu(int M, int N, int K, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { //size_t size = M*N; size_t size = M*N * 32; const int num_blocks = size / BLOCK + 1; //printf(" K = %d \n", K); /* printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n", size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024); printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512); */ //printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda); gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >( M, N, K, A, lda, B, ldb, C, ldc, mean_arr); } // --------------------------------
046bc09510b10c9658909e5fd19b0ee860a9f7a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "model.h" __global__ void addKernel(int* c, const int* a, const int* b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int cudaTestEntry() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size) { int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size) , 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
046bc09510b10c9658909e5fd19b0ee860a9f7a0.cu
#include "model.h" __global__ void addKernel(int* c, const int* a, const int* b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int cudaTestEntry() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size) { int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size >>> (dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
ad108f9ce22a3d60ee424477c736193a759f83cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/L2Select.cuh> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/Pair.cuh> #include <faiss/gpu/utils/Reductions.cuh> #include <faiss/gpu/utils/Select.cuh> #include <faiss/gpu/utils/Tensor.cuh> #include <faiss/gpu/utils/StaticUtils.h> namespace faiss { namespace gpu { // L2 + select kernel for k == 1, implements re-use of ||c||^2 template <typename T, int kRowsPerBlock, int kBlockSize> __global__ void l2SelectMin1(Tensor<T, 2, true> productDistances, Tensor<T, 1, true> centroidDistances, Tensor<uint8_t, 1, true> bitset, Tensor<T, 2, true> outDistances, Tensor<int, 2, true> outIndices) { // Each block handles kRowsPerBlock rows of the distances (results) Pair<T, int> threadMin[kRowsPerBlock]; __shared__ Pair<T, int> blockMin[kRowsPerBlock * (kBlockSize / kWarpSize)]; T distance[kRowsPerBlock]; #pragma unroll for (int i = 0; i < kRowsPerBlock; ++i) { threadMin[i].k = Limits<T>::getMax(); threadMin[i].v = -1; } // blockIdx.x: which chunk of rows we are responsible for updating int rowStart = blockIdx.x * kRowsPerBlock; // FIXME: if we have exact multiples, don't need this bool endRow = (blockIdx.x == gridDim.x - 1); bool bitsetIsEmpty = (bitset.getSize(0) == 0); if (endRow) { if (productDistances.getSize(0) % kRowsPerBlock == 0) { endRow = false; } } if (endRow) { for (int row = rowStart; row < productDistances.getSize(0); ++row) { for (int col = threadIdx.x; col < productDistances.getSize(1); col += blockDim.x) { if (bitsetIsEmpty || (!(bitset[col >> 3] & (0x1 << (col & 0x7))))) { distance[0] = Math<T>::add(centroidDistances[col], productDistances[row][col]); } else { distance[0] = (T)(1.0 / 0.0); } if (Math<T>::lt(distance[0], threadMin[0].k)) { threadMin[0].k = distance[0]; threadMin[0].v = col; } } // Reduce within the block threadMin[0] = blockReduceAll<Pair<T, int>, Min<Pair<T, int> >, false, false>( threadMin[0], Min<Pair<T, int> >(), blockMin); if (threadIdx.x == 0) { outDistances[row][0] = threadMin[0].k; outIndices[row][0] = threadMin[0].v; } // so we can use the shared memory again __syncthreads(); threadMin[0].k = Limits<T>::getMax(); threadMin[0].v = -1; } } else { for (int col = threadIdx.x; col < productDistances.getSize(1); col += blockDim.x) { T centroidDistance = centroidDistances[col]; #pragma unroll for (int row = 0; row < kRowsPerBlock; ++row) { distance[row] = productDistances[rowStart + row][col]; } #pragma unroll for (int row = 0; row < kRowsPerBlock; ++row) { distance[row] = Math<T>::add(distance[row], centroidDistance); } #pragma unroll for (int row = 0; row < kRowsPerBlock; ++row) { if (Math<T>::lt(distance[row], threadMin[row].k)) { threadMin[row].k = distance[row]; threadMin[row].v = col; } } } // Reduce within the block blockReduceAll<kRowsPerBlock, Pair<T, int>, Min<Pair<T, int> >, false, false>( threadMin, Min<Pair<T, int> >(), blockMin); if (threadIdx.x == 0) { #pragma unroll for (int row = 0; row < kRowsPerBlock; ++row) { outDistances[rowStart + row][0] = threadMin[row].k; outIndices[rowStart + row][0] = threadMin[row].v; } } } } // With bitset included // L2 + select kernel for k > 1, no re-use of ||c||^2 template <typename T, int NumWarpQ, int NumThreadQ, int ThreadsPerBlock> __global__ void l2SelectMinK(Tensor<T, 2, true> productDistances, Tensor<T, 1, true> centroidDistances, Tensor<uint8_t, 1, true> bitset, Tensor<T, 2, true> outDistances, Tensor<int, 2, true> outIndices, int k, T initK) { // Each block handles a single row of the distances (results) constexpr int kNumWarps = ThreadsPerBlock / kWarpSize; __shared__ T smemK[kNumWarps * NumWarpQ]; __shared__ int smemV[kNumWarps * NumWarpQ]; BlockSelect<T, int, false, Comparator<T>, NumWarpQ, NumThreadQ, ThreadsPerBlock> heap(initK, -1, smemK, smemV, k); int row = blockIdx.x; // Whole warps must participate in the selection int limit = utils::roundDown(productDistances.getSize(1), kWarpSize); int i = threadIdx.x; bool bitsetIsEmpty = (bitset.getSize(0) == 0); T v; for (; i < limit; i += blockDim.x) { if (bitsetIsEmpty || (!(bitset[i >> 3] & (0x1 << (i & 0x7))))) { v = Math<T>::add(centroidDistances[i], productDistances[row][i]); } else { v = (T)(1.0 / 0.0); } heap.add(v, i); } if (i < productDistances.getSize(1)) { if (bitsetIsEmpty || (!(bitset[i >> 3] & (0x1 << (i & 0x7))))) { v = Math<T>::add(centroidDistances[i], productDistances[row][i]); } else { v = (T)(1.0 / 0.0); } heap.addThreadQ(v, i); } heap.reduce(); for (int i = threadIdx.x; i < k; i += blockDim.x) { outDistances[row][i] = smemK[i]; outIndices[row][i] = smemV[i]; } } template <typename T> void runL2SelectMin(Tensor<T, 2, true>& productDistances, Tensor<T, 1, true>& centroidDistances, Tensor<uint8_t, 1, true>& bitset, Tensor<T, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, int k, hipStream_t stream) { FAISS_ASSERT(productDistances.getSize(0) == outDistances.getSize(0)); FAISS_ASSERT(productDistances.getSize(0) == outIndices.getSize(0)); FAISS_ASSERT(centroidDistances.getSize(0) == productDistances.getSize(1)); FAISS_ASSERT(outDistances.getSize(1) == k); FAISS_ASSERT(outIndices.getSize(1) == k); FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); if (k == 1) { constexpr int kThreadsPerBlock = 256; constexpr int kRowsPerBlock = 8; auto block = dim3(kThreadsPerBlock); auto grid = dim3(utils::divUp(outDistances.getSize(0), kRowsPerBlock)); hipLaunchKernelGGL(( l2SelectMin1<T, kRowsPerBlock, kThreadsPerBlock>) , dim3(grid), dim3(block), 0, stream, productDistances, centroidDistances, bitset, outDistances, outIndices); } else { auto grid = dim3(outDistances.getSize(0)); #define RUN_L2_SELECT(BLOCK, NUM_WARP_Q, NUM_THREAD_Q) \ do { \ hipLaunchKernelGGL(( l2SelectMinK<T, NUM_WARP_Q, NUM_THREAD_Q, BLOCK>) \ , dim3(grid), dim3(BLOCK), 0, stream, productDistances, centroidDistances, \ bitset, outDistances, outIndices, \ k, Limits<T>::getMax()); \ } while (0) // block size 128 for everything <= 1024 if (k <= 32) { RUN_L2_SELECT(128, 32, 2); } else if (k <= 64) { RUN_L2_SELECT(128, 64, 3); } else if (k <= 128) { RUN_L2_SELECT(128, 128, 3); } else if (k <= 256) { RUN_L2_SELECT(128, 256, 4); } else if (k <= 512) { RUN_L2_SELECT(128, 512, 8); } else if (k <= 1024) { RUN_L2_SELECT(128, 1024, 8); #if GPU_MAX_SELECTION_K >= 2048 } else if (k <= 2048) { // smaller block for less shared memory RUN_L2_SELECT(64, 2048, 8); #endif } else { FAISS_ASSERT(false); } } CUDA_TEST_ERROR(); } void runL2SelectMin(Tensor<float, 2, true>& productDistances, Tensor<float, 1, true>& centroidDistances, Tensor<uint8_t, 1, true>& bitset, Tensor<float, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, int k, hipStream_t stream) { runL2SelectMin<float>(productDistances, centroidDistances, bitset, outDistances, outIndices, k, stream); } void runL2SelectMin(Tensor<half, 2, true>& productDistances, Tensor<half, 1, true>& centroidDistances, Tensor<uint8_t, 1, true>& bitset, Tensor<half, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, int k, hipStream_t stream) { runL2SelectMin<half>(productDistances, centroidDistances, bitset, outDistances, outIndices, k, stream); } } } // namespace
ad108f9ce22a3d60ee424477c736193a759f83cc.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/L2Select.cuh> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/Pair.cuh> #include <faiss/gpu/utils/Reductions.cuh> #include <faiss/gpu/utils/Select.cuh> #include <faiss/gpu/utils/Tensor.cuh> #include <faiss/gpu/utils/StaticUtils.h> namespace faiss { namespace gpu { // L2 + select kernel for k == 1, implements re-use of ||c||^2 template <typename T, int kRowsPerBlock, int kBlockSize> __global__ void l2SelectMin1(Tensor<T, 2, true> productDistances, Tensor<T, 1, true> centroidDistances, Tensor<uint8_t, 1, true> bitset, Tensor<T, 2, true> outDistances, Tensor<int, 2, true> outIndices) { // Each block handles kRowsPerBlock rows of the distances (results) Pair<T, int> threadMin[kRowsPerBlock]; __shared__ Pair<T, int> blockMin[kRowsPerBlock * (kBlockSize / kWarpSize)]; T distance[kRowsPerBlock]; #pragma unroll for (int i = 0; i < kRowsPerBlock; ++i) { threadMin[i].k = Limits<T>::getMax(); threadMin[i].v = -1; } // blockIdx.x: which chunk of rows we are responsible for updating int rowStart = blockIdx.x * kRowsPerBlock; // FIXME: if we have exact multiples, don't need this bool endRow = (blockIdx.x == gridDim.x - 1); bool bitsetIsEmpty = (bitset.getSize(0) == 0); if (endRow) { if (productDistances.getSize(0) % kRowsPerBlock == 0) { endRow = false; } } if (endRow) { for (int row = rowStart; row < productDistances.getSize(0); ++row) { for (int col = threadIdx.x; col < productDistances.getSize(1); col += blockDim.x) { if (bitsetIsEmpty || (!(bitset[col >> 3] & (0x1 << (col & 0x7))))) { distance[0] = Math<T>::add(centroidDistances[col], productDistances[row][col]); } else { distance[0] = (T)(1.0 / 0.0); } if (Math<T>::lt(distance[0], threadMin[0].k)) { threadMin[0].k = distance[0]; threadMin[0].v = col; } } // Reduce within the block threadMin[0] = blockReduceAll<Pair<T, int>, Min<Pair<T, int> >, false, false>( threadMin[0], Min<Pair<T, int> >(), blockMin); if (threadIdx.x == 0) { outDistances[row][0] = threadMin[0].k; outIndices[row][0] = threadMin[0].v; } // so we can use the shared memory again __syncthreads(); threadMin[0].k = Limits<T>::getMax(); threadMin[0].v = -1; } } else { for (int col = threadIdx.x; col < productDistances.getSize(1); col += blockDim.x) { T centroidDistance = centroidDistances[col]; #pragma unroll for (int row = 0; row < kRowsPerBlock; ++row) { distance[row] = productDistances[rowStart + row][col]; } #pragma unroll for (int row = 0; row < kRowsPerBlock; ++row) { distance[row] = Math<T>::add(distance[row], centroidDistance); } #pragma unroll for (int row = 0; row < kRowsPerBlock; ++row) { if (Math<T>::lt(distance[row], threadMin[row].k)) { threadMin[row].k = distance[row]; threadMin[row].v = col; } } } // Reduce within the block blockReduceAll<kRowsPerBlock, Pair<T, int>, Min<Pair<T, int> >, false, false>( threadMin, Min<Pair<T, int> >(), blockMin); if (threadIdx.x == 0) { #pragma unroll for (int row = 0; row < kRowsPerBlock; ++row) { outDistances[rowStart + row][0] = threadMin[row].k; outIndices[rowStart + row][0] = threadMin[row].v; } } } } // With bitset included // L2 + select kernel for k > 1, no re-use of ||c||^2 template <typename T, int NumWarpQ, int NumThreadQ, int ThreadsPerBlock> __global__ void l2SelectMinK(Tensor<T, 2, true> productDistances, Tensor<T, 1, true> centroidDistances, Tensor<uint8_t, 1, true> bitset, Tensor<T, 2, true> outDistances, Tensor<int, 2, true> outIndices, int k, T initK) { // Each block handles a single row of the distances (results) constexpr int kNumWarps = ThreadsPerBlock / kWarpSize; __shared__ T smemK[kNumWarps * NumWarpQ]; __shared__ int smemV[kNumWarps * NumWarpQ]; BlockSelect<T, int, false, Comparator<T>, NumWarpQ, NumThreadQ, ThreadsPerBlock> heap(initK, -1, smemK, smemV, k); int row = blockIdx.x; // Whole warps must participate in the selection int limit = utils::roundDown(productDistances.getSize(1), kWarpSize); int i = threadIdx.x; bool bitsetIsEmpty = (bitset.getSize(0) == 0); T v; for (; i < limit; i += blockDim.x) { if (bitsetIsEmpty || (!(bitset[i >> 3] & (0x1 << (i & 0x7))))) { v = Math<T>::add(centroidDistances[i], productDistances[row][i]); } else { v = (T)(1.0 / 0.0); } heap.add(v, i); } if (i < productDistances.getSize(1)) { if (bitsetIsEmpty || (!(bitset[i >> 3] & (0x1 << (i & 0x7))))) { v = Math<T>::add(centroidDistances[i], productDistances[row][i]); } else { v = (T)(1.0 / 0.0); } heap.addThreadQ(v, i); } heap.reduce(); for (int i = threadIdx.x; i < k; i += blockDim.x) { outDistances[row][i] = smemK[i]; outIndices[row][i] = smemV[i]; } } template <typename T> void runL2SelectMin(Tensor<T, 2, true>& productDistances, Tensor<T, 1, true>& centroidDistances, Tensor<uint8_t, 1, true>& bitset, Tensor<T, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, int k, cudaStream_t stream) { FAISS_ASSERT(productDistances.getSize(0) == outDistances.getSize(0)); FAISS_ASSERT(productDistances.getSize(0) == outIndices.getSize(0)); FAISS_ASSERT(centroidDistances.getSize(0) == productDistances.getSize(1)); FAISS_ASSERT(outDistances.getSize(1) == k); FAISS_ASSERT(outIndices.getSize(1) == k); FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); if (k == 1) { constexpr int kThreadsPerBlock = 256; constexpr int kRowsPerBlock = 8; auto block = dim3(kThreadsPerBlock); auto grid = dim3(utils::divUp(outDistances.getSize(0), kRowsPerBlock)); l2SelectMin1<T, kRowsPerBlock, kThreadsPerBlock> <<<grid, block, 0, stream>>>(productDistances, centroidDistances, bitset, outDistances, outIndices); } else { auto grid = dim3(outDistances.getSize(0)); #define RUN_L2_SELECT(BLOCK, NUM_WARP_Q, NUM_THREAD_Q) \ do { \ l2SelectMinK<T, NUM_WARP_Q, NUM_THREAD_Q, BLOCK> \ <<<grid, BLOCK, 0, stream>>>(productDistances, centroidDistances, \ bitset, outDistances, outIndices, \ k, Limits<T>::getMax()); \ } while (0) // block size 128 for everything <= 1024 if (k <= 32) { RUN_L2_SELECT(128, 32, 2); } else if (k <= 64) { RUN_L2_SELECT(128, 64, 3); } else if (k <= 128) { RUN_L2_SELECT(128, 128, 3); } else if (k <= 256) { RUN_L2_SELECT(128, 256, 4); } else if (k <= 512) { RUN_L2_SELECT(128, 512, 8); } else if (k <= 1024) { RUN_L2_SELECT(128, 1024, 8); #if GPU_MAX_SELECTION_K >= 2048 } else if (k <= 2048) { // smaller block for less shared memory RUN_L2_SELECT(64, 2048, 8); #endif } else { FAISS_ASSERT(false); } } CUDA_TEST_ERROR(); } void runL2SelectMin(Tensor<float, 2, true>& productDistances, Tensor<float, 1, true>& centroidDistances, Tensor<uint8_t, 1, true>& bitset, Tensor<float, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, int k, cudaStream_t stream) { runL2SelectMin<float>(productDistances, centroidDistances, bitset, outDistances, outIndices, k, stream); } void runL2SelectMin(Tensor<half, 2, true>& productDistances, Tensor<half, 1, true>& centroidDistances, Tensor<uint8_t, 1, true>& bitset, Tensor<half, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, int k, cudaStream_t stream) { runL2SelectMin<half>(productDistances, centroidDistances, bitset, outDistances, outIndices, k, stream); } } } // namespace
499a73a38b8d17fb38cb996c80923f22dba01729.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * The MIT License * * Copyright (c) 1997-2018 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /* GPU DataWarehouse device & host access*/ #include <CCA/Components/Schedulers/GPUDataWarehouse.h> #include <CCA/Components/Schedulers/GPUMemoryPool.h> #include <CCA/Components/Schedulers/SchedulerCommon.h> #include <CCA/Components/Schedulers/UnifiedScheduler.h> #include <Core/Grid/Variables/GPUVariable.h> #include <Core/Grid/Variables/GPUGridVariable.h> #include <Core/Grid/Variables/GPUReductionVariable.h> #include <Core/Grid/Variables/GPUPerPatch.h> #include <Core/Parallel/MasterLock.h> #include <Core/Parallel/Parallel.h> #include <Core/Parallel/ProcessorGroup.h> #include <Core/Util/DebugStream.h> #include <sci_defs/cuda_defs.h> #ifndef __CUDA_ARCH__ #include <string.h> #include <string> #endif #include <map> extern Uintah::MasterLock cerrLock; namespace Uintah { extern DebugStream gpu_stats; } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::get(const GPUGridVariableBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ //device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setArray3(item->var_offset, item->var_size, item->var_ptr); } else { printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo vp = varPointers->at(lpml); var.setArray3(vp.var->device_offset, vp.var->device_size, vp.var->device_ptr); } else { printf("I'm GPUDW with name: \"%s\" at %p \n", _internalName, this); printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE bool GPUDataWarehouse::stagingVarExists(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size) { #ifdef __CUDA_ARCH__ // device code printError("This method not defined for the device.", "stagingVarExists", label, patchID, matlIndx, levelIndx); return false; #else // host code varLock->lock(); bool retval = false; labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { stagingVar sv; sv.device_offset = offset; sv.device_size = size; std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.find(sv); retval = (staging_it != it->second.var->stagingVars.end()); } varLock->unlock(); return retval; #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::getStagingVar(const GPUGridVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size) { #ifdef __CUDA_ARCH__ // device code printError("This method not defined for the device.", "getStagingVar", label, patchID, matlIndx, levelIndx); #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { stagingVar sv; sv.device_offset = offset; sv.device_size = size; std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.find(sv); if (staging_it != it->second.var->stagingVars.end()) { var.setArray3(offset, size, staging_it->second.device_ptr); } else { printf("GPUDataWarehouse::getStagingVar() - Didn't find a staging variable from the device for label %s patch %d matl %d level %d offset (%d, %d, %d) size (%d, %d, %d).", label, patchID, matlIndx, levelIndx, offset.x, offset.y, offset.z, size.x, size.y, size.z); exit(-1); } } else { printError("Didn't find a staging variable from the device.", "getStagingVar", label, patchID, matlIndx, levelIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::getLevel(const GPUGridVariableBase& var, char const* label, int8_t matlIndx, int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code get(var, label, -99999999, matlIndx, levelIndx); #else // host code get(var, label, -99999999, matlIndx, levelIndx); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::get(const GPUReductionVariableBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setData(item->var_ptr); } else { printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo vp = varPointers->at(lpml); var.setData(vp.var->device_ptr); } else { printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::get(const GPUPerPatchBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setData(item->var_ptr); } else { printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo vp = varPointers->at(lpml); var.setData(vp.var->device_ptr); } else { printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::getModifiable(GPUGridVariableBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setArray3(item->var_offset, item->var_size, item->var_ptr); } else { printGetError("GPUDataWarehouse::getModifiable(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { var.setArray3(it->second.var->device_offset, it->second.var->device_size, it->second.var->device_ptr); } else { printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::getModifiable(GPUReductionVariableBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setData(item->var_ptr); } else { printGetError("GPUDataWarehouse::getModifiable(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo vp = varPointers->at(lpml); var.setData(vp.var->device_ptr); } else { printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::getModifiable(GPUPerPatchBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setData(item->var_ptr); } else { printGetError("GPUDataWarehouse::getModifiable(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo vp = varPointers->at(lpml); var.setData(vp.var->device_ptr); } else { printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ //This method assumes the base patch in a superpatch region has already been allocated. //This is a shallow copy. It copies all datawarehouse metadata entries (except the status) //from that item into this patch's item in the GPU DW. __host__ void GPUDataWarehouse::copySuperPatchInfo(char const* label, int superPatchBaseID, int superPatchDestinationID, int matlIndx, int levelIndx) { if (superPatchBaseID == superPatchDestinationID) { //don't handle shallow copying itself return; } //Possible TODO: Add in offsets so the variable could be accessed in a non-superpatch manner. labelPatchMatlLevel lpml_source(label, superPatchBaseID, matlIndx, levelIndx); labelPatchMatlLevel lpml_dest(label, superPatchDestinationID, matlIndx, levelIndx); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator source_iter = varPointers->find(lpml_source); if (source_iter != varPointers->end()) { std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator dest_iter = varPointers->find(lpml_dest); if (dest_iter != varPointers->end()) { if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::copySuperPatchInfo() - " << " label " << label << " matl " << matlIndx << " level " << levelIndx << " Forming a superpatch by merging/shallowcopying metadata for patch " << superPatchDestinationID << " into patch " << superPatchBaseID << " with source status codes " << getDisplayableStatusCodes(source_iter->second.var->atomicStatusInGpuMemory) << " and dest status codes " << getDisplayableStatusCodes(dest_iter->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } //They now share the variable. The magic of this happens because the var is a C++ shared_ptr //TODO: They don't share the same offset. When offsets are added in, this should be updated //to manage offsets. dest_iter->second.var = source_iter->second.var; } else { printf("ERROR: GPUDataWarehouse::copySuperPatchInfo() - Didn't find a the destination ID at %d to copy into label %s patch %d matl %d level %d\n", superPatchDestinationID, label, superPatchDestinationID, matlIndx, levelIndx); varLock->unlock(); exit(-1); } } else { printf("ERROR: GPUDataWarehouse::copySuperPatchInfo() - Didn't find a base superPatch ID at %d to copy into label %s patch %d matl %d level %d\n", superPatchBaseID, label, superPatchDestinationID, matlIndx, levelIndx); varLock->unlock(); exit(-1); } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::put(GPUGridVariableBase &var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, GhostType gtype, int numGhostCells, void* host_ptr) { varLock->lock(); int3 var_offset; // offset int3 var_size; // dimensions of GPUGridVariable void* var_ptr; // raw pointer to the memory var.getArray3(var_offset, var_size, var_ptr); // See if it already exists. Also see if we need to update this into d_varDB. labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml); std::map<stagingVar, stagingVarInfo>::iterator staging_it; //sanity checks if (iter == varPointers->end()) { printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database.\n"); exit(-1); } else if (staging) { stagingVar sv; sv.device_offset = var_offset; sv.device_size = var_size; staging_it = iter->second.var->stagingVars.find(sv); if (staging_it == iter->second.var->stagingVars.end()) { printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without this staging var first existing in the internal database.\n"); exit(-1); } } if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::put( " << label << " ) - " << " Attempting to put a variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx; if (staging) { gpu_stats << " staging: true"; } else { gpu_stats << " staging: false"; } gpu_stats << " at device address " << var_ptr << " with status codes "; if (!staging) { gpu_stats << getDisplayableStatusCodes(iter->second.var->atomicStatusInGpuMemory); } else { gpu_stats << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory); } gpu_stats << " datatype size " << sizeOfDataType << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << " current varPointers size is: " << varPointers->size() << " low (" << var_offset.x << ", " << var_offset.y << ", " << var_offset.z << ") " << std::endl; } cerrLock.unlock(); } if (staging == false) { iter->second.varDB_index = -1; iter->second.var->device_ptr = var_ptr; iter->second.var->device_offset = var_offset; iter->second.var->device_size = var_size; iter->second.var->sizeOfDataType = sizeOfDataType; iter->second.var->gtype = gtype; iter->second.var->numGhostCells = numGhostCells; iter->second.var->host_contiguousArrayPtr = host_ptr; iter->second.var->atomicStatusInHostMemory = UNKNOWN; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::put( " << label << " ) - " << " Put a regular non-staging variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " at device address " << var_ptr << " with datatype size " << iter->second.var->sizeOfDataType << " with status codes " << getDisplayableStatusCodes(iter->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << " current varPointers size is: " << varPointers->size() << std::endl; } cerrLock.unlock(); } } else { // if (staging == true) staging_it->second.device_ptr = var_ptr; staging_it->second.host_contiguousArrayPtr = host_ptr; staging_it->second.varDB_index = -1; staging_it->second.atomicStatusInHostMemory = UNKNOWN; // Update the non-staging var's sizeOfDataType. The staging var uses this number. // It's possible that a staging var can exist and an empty placeholder non-staging var also exist, // if so, then then empty placeholder non-staging var won't have correct data type size. // So we grab it here. iter->second.var->sizeOfDataType = sizeOfDataType; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::put( " << label << " ) - " << " Put a staging variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with offset (" << var_offset.x << ", " << var_offset.y << ", " << var_offset.z << ")" << " and size (" << var_size.x << ", " << var_size.y << ", " << var_size.z << ")" << " at device address " << var_ptr << " with datatype size " << iter->second.var->sizeOfDataType << " with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } } varLock->unlock(); } //______________________________________________________________________ // This method puts an empty placeholder entry into the GPUDW database and marks it as unallocated __host__ void GPUDataWarehouse::putUnallocatedIfNotExists(char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 offset, int3 size) { // If it's a normal non-staging variable, check if doesn't exist. If so, add an "unallocated" entry. // If it's a staging variable, then still check if the non-staging part exists. A staging must exist within a non-staging variable. // A scenario where this can get a staging variable without a non-staging variable is receiving data from neighbor nodes. // For example, suppose node A has patch 0, and node B has patch 1, and A's patch 0 needs ghost cells from B's patch 1. Node A will // receive those ghost cells, but they will be marked as belonging to patch 1. Since A doesn't have the regular non-staging var // for patch 1, we make an empty placeholder for patch 1 so A can have a staging var to hold the ghost cell for patch 1. varLock->lock(); //Lock this entire section labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if ( it == varPointers->end()) { // Do not place size information. The Data Warehouse should not declare its current size until after the allocation is complete. // Further, no scheduler thread should attempt to determine an entry's size until the allocated flag has been marked as true. allVarPointersInfo vp; vp.varDB_index = -1; vp.var->device_ptr = nullptr; vp.var->atomicStatusInHostMemory = UNKNOWN; vp.var->atomicStatusInGpuMemory = UNALLOCATED; vp.var->host_contiguousArrayPtr = nullptr; vp.var->sizeOfDataType = 0; std::pair<std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator, bool> ret = varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) ); if (!ret.second) { printf("ERROR:\nGPUDataWarehouse::putUnallocatedIfNotExists( ) Failure inserting into varPointers map.\n"); varLock->unlock(); exit(-1); } it = ret.first; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::putUnallocatedIfNotExists( " << label << " ) - " << " Put an unallocated non-staging variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } } if (staging) { std::map<stagingVar, stagingVarInfo>::iterator staging_it; stagingVar sv; sv.device_offset = offset; sv.device_size = size; staging_it = it->second.var->stagingVars.find(sv); if (staging_it == it->second.var->stagingVars.end()){ stagingVarInfo svi; svi.varDB_index = -1; svi.device_ptr = nullptr; svi.host_contiguousArrayPtr = nullptr; svi.atomicStatusInHostMemory = UNKNOWN; svi.atomicStatusInGpuMemory = UNALLOCATED; std::pair<stagingVar, stagingVarInfo> p = std::make_pair( sv, svi ); it->second.var->stagingVars.insert( p ); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::putUnallocatedIfNotExists( " << label << " ) - " << " Put an unallocated staging variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " size (" << size.x << ", " << size.y << ", " << size.z << ")" << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } } } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::allocateAndPut(GPUGridVariableBase &var, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 low, int3 high, size_t sizeOfDataType, GhostType gtype, int numGhostCells) { // Allocate space on the GPU and declare a variable onto the GPU. // Check if it exists prior to allocating memory for it. // If it has already been allocated, just use that. // If it hasn't, this is lock free and the first thread to request allocating gets to allocate // If another thread sees that allocating is in process, it loops and waits until the allocation complete. bool allocationNeeded = false; int3 size = make_int3(high.x-low.x, high.y-low.y, high.z-low.z); int3 offset = low; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " staging: " << std::boolalpha << staging << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } // This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry. putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, staging, offset, size); varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); std::map<stagingVar, stagingVarInfo>::iterator staging_it; if (staging) { stagingVar sv; sv.device_offset = offset; sv.device_size = size; staging_it = it->second.var->stagingVars.find(sv); } varLock->unlock(); // Locking not needed from here on in this method. STL maps ensure that iterators point to correct values // even if other threads add nodes. We just can't remove values, but that shouldn't ever happen. // This prepares the var with the offset and size. Any possible allocation will come later. // If it needs to go into the database, that will also come later void* addr = nullptr; var.setArray3(offset, size, addr); // Now see if we allocate the variable or use a previous existing allocation. if (staging == false) { // See if someone has stated they are allocating it allocationNeeded = compareAndSwapAllocating(it->second.var->atomicStatusInGpuMemory); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " allocationNeeded is " << std::boolalpha << allocationNeeded << " for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << std::endl; } cerrLock.unlock(); } if (!allocationNeeded) { // Someone else is allocating it or it has already been allocated. Wait until they are done. bool allocated = false; while (!allocated) { allocated = checkAllocated(it->second.var->atomicStatusInGpuMemory); } // Sanity check to ensure we have correct size information. varLock->lock(); it = varPointers->find(lpml); varLock->unlock(); if (it->second.var->device_offset.x == low.x && it->second.var->device_offset.y == low.y && it->second.var->device_offset.z == low.z && it->second.var->device_size.x == size.x && it->second.var->device_size.y == size.y && it->second.var->device_size.z == size.z) { // Space for this var already exists. Use that and return. if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " This non-staging/regular variable already exists. No need to allocate another. GPUDW has a variable for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " on device " << d_device_id << " with data pointer " << it->second.var->device_ptr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } // Have this var use the existing memory address. var.setArray3(it->second.var->device_offset, it->second.var->device_size, it->second.var->device_ptr); } else if (it->second.var->device_offset.x <= low.x && it->second.var->device_offset.y <= low.y && it->second.var->device_offset.z <= low.z && it->second.var->device_size.x >= size.x && it->second.var->device_size.y >= size.y && it->second.var->device_size.z >= size.z) { //It fits inside. Just use it. if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " This non-staging/regular variable fits inside another variable that already exists. No need to allocate another. GPUDW has a variable for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " on device " << d_device_id << " with data pointer " << it->second.var->device_ptr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } var.setArray3(it->second.var->device_offset, it->second.var->device_size, it->second.var->device_ptr); } else { printf("ERROR:\nGPUDataWarehouse::allocateAndPut( %s ) Variable in database but of the wrong size. This shouldn't ever happen. This needs low (%d, %d, %d) and size (%d, %d, %d), but in the database it is low (%d, %d, %d) and size (%d, %d, %d)\n", label, low.x, low.y, low.z, size.x, size.y, size.z, it->second.var->device_offset.x, it->second.var->device_offset.y, it->second.var->device_offset.z, it->second.var->device_size.x, it->second.var->device_size.y, it->second.var->device_size.z); exit(-1); } } } else { // it's a staging variable if (staging_it != it->second.var->stagingVars.end()) { // This variable exists in the database, no need to "put" it in again. // See if someone has stated they are allocating it allocationNeeded = compareAndSwapAllocating(staging_it->second.atomicStatusInGpuMemory); if (!allocationNeeded) { if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " This staging variable already exists. No need to allocate another. For label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " on device " << d_device_id << " with data pointer " << staging_it->second.device_ptr << " with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory) << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } // We need the pointer. We can't move on until we get the pointer. // Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it // but not completed that action. If that's the case, wait until it's done so we can get the pointer. bool allocated = false; while (!allocated) { allocated = checkAllocated(staging_it->second.atomicStatusInGpuMemory); } //Have this var use the existing memory address. var.setArray3(offset, size, staging_it->second.device_ptr); } } } //Now allocate it if (allocationNeeded) { OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id); unsigned int memSize = var.getMemSize(); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool" << " for " << label << " patch " << patchID << " material " << matlIndx << " level " << levelIndx << " staging: " << std::boolalpha << staging << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " at " << addr << " with status codes "; if (!staging) { gpu_stats << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory); } else { gpu_stats << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory); } gpu_stats << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize); // Also update the var object itself var.setArray3(offset, size, addr); // Put all remaining information about the variable into the the database. put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx, staging, gtype, numGhostCells); // Now that we have the pointer and that it has been inserted into the database, // Update the status from allocating to allocated if (!staging) { compareAndSwapAllocate(it->second.var->atomicStatusInGpuMemory); } else { compareAndSwapAllocate(staging_it->second.atomicStatusInGpuMemory); } if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut(), complete" << " for " << label << " patch " << patchID << " material " << matlIndx << " level " << levelIndx << " staging: " << std::boolalpha << staging << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " at " << addr << " with status codes "; if (!staging) { gpu_stats << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory); } else { gpu_stats << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory); } gpu_stats << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } } } //______________________________________________________________________ // This method is meant to take an entry from the host side DW and copy it into // the task datawarehouse whose job is to eventually live GPU side. __host__ void GPUDataWarehouse::copyItemIntoTaskDW(GPUDataWarehouse *hostSideGPUDW, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 offset, int3 size) { if (d_device_copy == nullptr) { // sanity check printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW() - This method should only be called from a task data warehouse.\n"); exit(-1); } varLock->lock(); if (d_numVarDBItems==MAX_VARDB_ITEMS) { printf("ERROR: Out of GPUDataWarehouse space"); varLock->unlock(); exit(-1); } varLock->unlock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); stagingVar sv; sv.device_offset = offset; sv.device_size = size; // Get the iterator(s) from the host side GPUDW. hostSideGPUDW->varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator hostSideGPUDW_iter = hostSideGPUDW->varPointers->find(lpml); std::map<stagingVar, stagingVarInfo>::iterator hostSideGPUDW_staging_iter; if (staging) { hostSideGPUDW_staging_iter = hostSideGPUDW_iter->second.var->stagingVars.find(sv); if (hostSideGPUDW_staging_iter == hostSideGPUDW_iter->second.var->stagingVars.end()) { printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW() - No staging var was found for for %s patch %d material %d level %d offset (%d, %d, %d) size (%d, %d, %d) in the DW located at %p\n", label, patchID, matlIndx, levelIndx, offset.x, offset.y, offset.z, size.x, size.y, size.z, hostSideGPUDW); varLock->unlock(); exit(-1); } } hostSideGPUDW->varLock->unlock(); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml); //sanity check if (iter != varPointers->end() && !staging) { printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW() - This task datawarehouse already had an entry for %s patch %d material %d level %d\n", label, patchID, matlIndx, levelIndx); varLock->unlock(); exit(-1); } // If it's staging, there should already be a non-staging var in the host-side GPUDW (even if it's just a placeholder) // Inserting into this task DW, it is a requirement that non-staging variables get inserted first // then any staging variables can come in later. This won't handle any scenario where a staging variable is requested // into the task DW without a non-staging variable already existing here. //TODO: Replace with an atomic counter. int d_varDB_index=d_numVarDBItems; d_numVarDBItems++; int i = d_varDB_index; if (!staging) { // Create a new allVarPointersInfo object, copying over the offset. allVarPointersInfo vp; vp.device_offset = hostSideGPUDW_iter->second.device_offset; // Give it a d_varDB index vp.varDB_index = d_varDB_index; // insert it in varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) ); strncpy(d_varDB[i].label, label, MAX_NAME_LENGTH); d_varDB[i].domainID = patchID; d_varDB[i].matlIndx = matlIndx; d_varDB[i].levelIndx = levelIndx; d_varDB[i].sizeOfDataType = hostSideGPUDW_iter->second.var->sizeOfDataType; d_varDB[i].varItem.gtype = hostSideGPUDW_iter->second.var->gtype; d_varDB[i].varItem.numGhostCells = hostSideGPUDW_iter->second.var->numGhostCells; d_varDB[i].varItem.staging = staging; d_varDB[i].ghostItem.dest_varDB_index = -1; //Signify that this d_varDB item is NOT meta data to copy a ghost cell. d_varDB[i].var_offset = hostSideGPUDW_iter->second.var->device_offset; d_varDB[i].var_size = hostSideGPUDW_iter->second.var->device_size; d_varDB[i].var_ptr = hostSideGPUDW_iter->second.var->device_ptr; } else { if (iter == varPointers->end()) { // A staging item was requested but there's no regular variable for it to piggy back in. // So create an empty placeholder regular variable. // Create a new allVarPointersInfo object, copying over the offset. allVarPointersInfo vp; vp.device_offset = hostSideGPUDW_iter->second.device_offset; // Empty placeholders won't be placed in the d_varDB array. vp.varDB_index = -1; // insert it in std::pair<std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator, bool> ret = varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) ); if (!ret.second) { printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW( ) Failure inserting into varPointers map.\n"); varLock->unlock(); exit(-1); } iter = ret.first; } //copy the item stagingVarInfo svi = hostSideGPUDW_staging_iter->second; //Give it a d_varDB index svi.varDB_index = d_varDB_index; //insert it in std::map<stagingVar, stagingVarInfo>::iterator staging_iter = iter->second.var->stagingVars.find(sv); if (staging_iter != iter->second.var->stagingVars.end()) { printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW( ) This staging var already exists in this task DW\n"); } std::pair<stagingVar, stagingVarInfo> p = std::make_pair( sv, svi ); iter->second.var->stagingVars.insert( p ); strncpy(d_varDB[i].label, label, MAX_NAME_LENGTH); d_varDB[i].domainID = patchID; d_varDB[i].matlIndx = matlIndx; d_varDB[i].levelIndx = levelIndx; d_varDB[i].sizeOfDataType = hostSideGPUDW_iter->second.var->sizeOfDataType; d_varDB[i].varItem.gtype = hostSideGPUDW_iter->second.var->gtype; d_varDB[i].varItem.numGhostCells = hostSideGPUDW_iter->second.var->numGhostCells; d_varDB[i].varItem.staging = staging; d_varDB[i].ghostItem.dest_varDB_index = -1; //Signify that this d_varDB item is NOT meta data to copy a ghost cell. d_varDB[i].var_offset = hostSideGPUDW_staging_iter->first.device_offset; d_varDB[i].var_size = hostSideGPUDW_staging_iter->first.device_size; d_varDB[i].var_ptr = hostSideGPUDW_staging_iter->second.device_ptr; } d_dirty=true; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::copyItemIntoTaskDW( " << label << " ) - " << " Put into d_varDB at index " << i << " of max index " << d_maxdVarDBItems - 1 << " label " << label << " patch " << d_varDB[i].domainID << " matl " << matlIndx << " level " << levelIndx << " staging: " << std::boolalpha << staging << " datatype size " <<d_varDB[i].sizeOfDataType << " into address " << d_varDB[i].var_ptr << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " size [" << d_varDB[i].var_size.x << ", " << d_varDB[i].var_size.y << ", " << d_varDB[i].var_size.z << "]" << " offset [" << d_varDB[i].var_offset.x << ", " << d_varDB[i].var_offset.y << ", " << d_varDB[i].var_offset.z << "]" << std::endl; } cerrLock.unlock(); } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::putContiguous(GPUGridVariableBase &var, const char* indexID, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 low, int3 high, size_t sizeOfDataType, GridVariableBase* gridVar, bool stageOnHost) { /* #ifdef __CUDA_ARCH__ //Should not put from device side as all memory allocation should be done on CPU side through CUDAMalloc() #else varLock->lock(); //first check if this patch/var/matl is in the process of loading in. labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { //Space for this patch already exists. Use that and return. if (d_debug){ printf("GPUDataWarehouse::putContiguous( %s ). This gpudw database has a variable for label %s patch %d matl %d level %d staging %s on device %d. Reusing it.\n", label, label, patchID, matlIndx, levelIndx, staging ? "true" : "false", d_device_id); } var.setArray3(varPointers->at(lpml).device_offset, varPointers->at(lpml).device_size, varPointers->at(lpml).device_ptr); varLock->unlock(); return; } int3 size=make_int3(high.x-low.x, high.y-low.y, high.z-low.z); int3 offset=low; void* device_ptr=nullptr; var.setArray3(offset, size, device_ptr); allocateLock->lock(); contiguousArrayInfo *ca = &(contiguousArrays->at(indexID)); allocateLock->unlock(); if ( (ca->allocatedDeviceMemory == nullptr || ca->sizeOfAllocatedMemory - ca->assignedOffset < var.getMemSize()) && stageOnHost) { printf("ERROR: No room left on device to be assigned address space\n"); if (ca->allocatedDeviceMemory != nullptr) { printf("There was %lu bytes allocated, %lu has been assigned, and %lu more bytes were attempted to be assigned for %s patch %d matl %d level %d staging %s\n", ca->sizeOfAllocatedMemory, ca->assignedOffset, var.getMemSize(), label, patchID, matlIndx, levelIndx, staging ? "true" : "false"); } varLock->unlock(); exit(-1); } else { //There is already pre-allocated contiguous memory chunks with room available on //both the device and the host. Just assign pointers for both the device and host contiguous arrays. //This prepares the var with the offset and size. The actual address will come next. void* host_contiguousArrayPtr = nullptr; int varMemSize = var.getMemSize(); device_ptr = (void*)((uint8_t*)ca->allocatedDeviceMemory + ca->assignedOffset); var.setArray3(offset, size, device_ptr); host_contiguousArrayPtr = (void*)((uint8_t*)ca->allocatedHostMemory + ca->assignedOffset); //We ran into cuda misaligned errors previously when mixing different data types. We suspect the ints at 4 bytes //were the issue. So the engine previously computes buffer room for each variable as a multiple of UnifiedScheduler::bufferPadding. //So the contiguous array has been sized with extra padding. (For example, if a var holds 12 ints, then it would be 48 bytes in //size. But if UnifiedScheduler::bufferPadding = 32, then it should add 16 bytes for padding, for a total of 64 bytes). int memSizePlusPadding = ((UnifiedScheduler::bufferPadding - varMemSize % UnifiedScheduler::bufferPadding) % UnifiedScheduler::bufferPadding) + varMemSize; ca->assignedOffset += memSizePlusPadding; if (stageOnHost) { //Some GPU grid variable data doesn't need to be copied from the host //For example, computes vars are just uninitialized space. //Others grid vars need to be copied. This copies the data into a contiguous //array on the host so that copyDataHostToDevice() can copy the contiguous //host array to the device. //Data listed as required. Or compute data that was initialized as a copy of something else. ca->copiedOffset += memSizePlusPadding; memcpy(host_contiguousArrayPtr, gridVar->getBasePointer(), varMemSize); } varLock->unlock(); put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx, staging, None, 0, host_contiguousArrayPtr); } #endif */ } //______________________________________________________________________ // __host__ void GPUDataWarehouse::allocate(const char* indexID, size_t size) { /* #ifdef __CUDA_ARCH__ // Should not put from device side as all memory allocation should be done on CPU side through CUDAMalloc() #else if (size == 0) { return; } //This method allocates one big chunk of memory so that little allocations do not have to occur for each grid variable. //This is needed because devices often have substantial overhead for each device malloc and device copy. By putting it into one //chunk of memory, only one malloc and one copy to device should be needed. double *d_ptr = nullptr; double *h_ptr = nullptr; OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id); printf("Allocated GPU buffer of size %lu \n", (unsigned long)size); CUDA_RT_SAFE_CALL(hipMalloc(&d_ptr, size) ); //printf("In allocate(), cuda malloc for size %ld at %p on device %d\n", size, d_ptr, d_device_id); if (d_debug) { printf("In allocate(), hipMalloc for size %ld at %p on device %d\n", size, d_ptr, d_device_id); } //Now allocate that much also on the host. We want to do this because it's easier to pool up all the data on the host side //and then move it over to the device side later in one shot. It also allows for one copy doing a device to host later. //h_ptr = new double[size]; h_ptr = (double*)malloc(size); //Registering memory seems good in theory, but bad in practice for our purposes. //On the k20 device on beast.sci.utah.edu, this single register call was taking 0.1 seconds! //On my home GTX580 device, it was taking 0.015 seconds, better, but still substantial enough //we should avoid it for now. (If you want to use it, then also uncomment the hipHostUnregister call in clear()). //hipHostRegister(h_ptr, size, hipHostRegisterPortable); contiguousArrayInfo ca(d_ptr, h_ptr, size); allocateLock->lock(); contiguousArrays->insert( std::map<const char *, contiguousArrayInfo>::value_type( indexID, ca ) ); //for (std::map<std::string, contiguousArrayInfo>::iterator it = contiguousArrays->begin(); it != contiguousArrays->end(); ++it) // printf("%s\n", it->first.c_str()); allocateLock->unlock(); #endif */ } //______________________________________________________________________ // __host__ void GPUDataWarehouse::copyHostContiguousToHost(GPUGridVariableBase& device_var, GridVariableBase* host_var, char const* label, int patchID, int matlIndx, int levelIndx) { /* #ifdef __CUDA_ARCH__ //Should not called from device side as all memory allocation should be done on CPU side through CUDAMalloc() #else //see if this datawarehouse has anything for this patchGroupID. varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo info = varPointers->at(lpml); device_var.setArray3(varPointers->at(lpml).device_offset, varPointers->at(lpml).device_offset, info.device_ptr); varLock->unlock(); // size_t size = device_var.getMemSize(); //TODO: Instead of doing a memcpy, I bet the original host grid variable could just have its pointers updated //to work with what we were sent back. This would take some considerable work though to get all the details right //TODO: This needs to be a memcpy async memcpy(host_var->getBasePointer(), info.host_contiguousArrayPtr, device_var.getMemSize()); //Since we've moved it back into the host, lets mark it as being used. //It's possible in the future there could be a scenario where we want to bring it //back to the host but still retain it in the GPU. One scenario is //sending data to an output .ups file but not modifying it on the host. remove(label, patchID, matlIndx, levelIndx); } else { varLock->unlock(); printf("ERROR: host copyHostContiguoustoHost unknown variable on GPUDataWarehouse"); //for (std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it=varPointers->begin(); it!=varPointers->end(); ++it) // printf("%s %d %d => %d \n", it->first.label, it->first.patchID, it->first.matlIndx, it->second.varDB_index); exit(-1); } #endif */ } //______________________________________________________________________ // __host__ void GPUDataWarehouse::put(GPUReductionVariableBase &var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, void* host_ptr) { varLock->lock(); void* var_ptr; // raw pointer to the memory var.getData(var_ptr); //See if it already exists. Also see if we need to update this into d_varDB. labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml); //sanity check if (iter == varPointers->end()) { printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database.\n"); exit(-1); } iter->second.varDB_index = -1; iter->second.var->device_ptr = var_ptr; iter->second.var->sizeOfDataType = sizeOfDataType; iter->second.var->gtype = None; iter->second.var->numGhostCells = 0; iter->second.var->host_contiguousArrayPtr = host_ptr; iter->second.var->atomicStatusInHostMemory = UNKNOWN; int3 zeroValue; zeroValue.x = 0; zeroValue.y = 0; zeroValue.z = 0; iter->second.var->device_offset = zeroValue; iter->second.var->device_size = zeroValue; //previously set, do not set here //iter->second.var->atomicStatusInGpuMemory = if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::put( " << label << " ) - " << " Put a reduction variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " at device address " << var_ptr << " with datatype size " << iter->second.var->sizeOfDataType << " with status codes " << getDisplayableStatusCodes(iter->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << " current varPointers size is: " << varPointers->size() << std::endl; } cerrLock.unlock(); } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::put(GPUPerPatchBase& var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, void* host_ptr) { varLock->lock(); void* var_ptr; // raw pointer to the memory var.getData(var_ptr); //See if it already exists. Also see if we need to update this into d_varDB. labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml); //sanity check if (iter == varPointers->end()) { printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database for %s patch %d matl %d.\n", label, patchID, matlIndx); exit(-1); } iter->second.varDB_index = -1; iter->second.var->device_ptr = var_ptr; iter->second.var->sizeOfDataType = sizeOfDataType; iter->second.var->gtype = None; iter->second.var->numGhostCells = 0; iter->second.var->host_contiguousArrayPtr = host_ptr; iter->second.var->atomicStatusInHostMemory = UNKNOWN; int3 zeroValue; zeroValue.x = 0; zeroValue.y = 0; zeroValue.z = 0; iter->second.var->device_offset = zeroValue; iter->second.var->device_size = zeroValue; //previously set, do not set here //iter->second.atomicStatusInGputMemory = if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::put( " << label << " ) - " << " Put a patch variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " at device address " << var_ptr << " with datatype size " << iter->second.var->sizeOfDataType << " with status codes " << getDisplayableStatusCodes(iter->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << " current varPointers size is: " << varPointers->size() << std::endl; } cerrLock.unlock(); } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::allocateAndPut(GPUReductionVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx, size_t sizeOfDataType) { //Allocate space on the GPU and declare a variable onto the GPU. //This method does NOT stage everything in a big array. //Check if it exists prior to allocating memory for it. //If it has already been allocated, just use that. //If it hasn't, this is lock free and the first thread to request allocating gets to allocate //If another thread sees that allocating is in process, it loops and waits until the allocation complete. bool allocationNeeded = false; int3 size = make_int3(0,0,0); int3 offset = make_int3(0,0,0); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } //This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry. putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, false, offset, size); varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); varLock->unlock(); void* addr = nullptr; //Now see if we allocate the variable or use a previous existing allocation. //See if someone has stated they are allocating it allocationNeeded = compareAndSwapAllocating(it->second.var->atomicStatusInGpuMemory); if (!allocationNeeded) { //Someone else is allocating it or it has already been allocated. //Space for this var already exists. Use that and return. if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " This reduction variable already exists. No need to allocate another. GPUDW has a variable for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " on device " << d_device_id << " with data pointer " << it->second.var->device_ptr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } //We need the pointer. We can't move on until we get the pointer. //Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it //but not completed that action. If that's the case, wait until it's done so we can get the pointer. bool allocated = false; while (!allocated) { allocated = checkAllocated(it->second.var->atomicStatusInGpuMemory); addr = it->second.var->device_ptr; } //Have this var use the existing memory address. var.setData(addr); } else { //We are the first task to request allocation. Do it. OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id); size_t memSize = var.getMemSize(); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool" << " for reduction variable " << label << " patch " << patchID << " material " << matlIndx << " level " << levelIndx << " size " << var.getMemSize() << " at " << addr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize); //Also update the var object itself var.setData(addr); //Put all remaining information about the variable into the the database. put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx); //Now that the database knows of this and other threads can see the device pointer, update the status from allocating to allocated compareAndSwapAllocate(it->second.var->atomicStatusInGpuMemory); } } //______________________________________________________________________ // __host__ void GPUDataWarehouse::allocateAndPut(GPUPerPatchBase& var, char const* label, int patchID, int matlIndx, int levelIndx, size_t sizeOfDataType) { //Allocate space on the GPU and declare a variable onto the GPU. //This method does NOT stage everything in a big array. //Check if it exists prior to allocating memory for it. //If it has already been allocated, just use that. //If it hasn't, this is lock free and the first thread to request allocating gets to allocate //If another thread sees that allocating is in process, it loops and waits until the allocation complete. bool allocationNeeded = false; int3 size = make_int3(0,0,0); int3 offset = make_int3(0,0,0); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } //This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry. putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, false, offset, size); varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); varLock->unlock(); void* addr = nullptr; //Now see if we allocate the variable or use a previous existing allocation. //See if someone has stated they are allocating it allocationNeeded = compareAndSwapAllocating(it->second.var->atomicStatusInGpuMemory); if (!allocationNeeded) { //Someone else is allocating it or it has already been allocated. //Space for this var already exists. Use that and return. if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " This patch variable already exists. No need to allocate another. GPUDW has a variable for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " on device " << d_device_id << " with data pointer " << it->second.var->device_ptr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } //We need the pointer. We can't move on until we get the pointer. //Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it //but not completed that action. If that's the case, wait until it's done so we can get the pointer. bool allocated = false; while (!allocated) { allocated = checkAllocated(it->second.var->atomicStatusInGpuMemory); addr = it->second.var->device_ptr; } //Have this var use the existing memory address. var.setData(addr); } else { //We are the first task to request allocation. Do it. OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id); size_t memSize = var.getMemSize(); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool" << " for PerPatch variable " << label << " patch " << patchID << " material " << matlIndx << " level " << levelIndx << " size " << var.getMemSize() << " at " << addr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize); //Also update the var object itself var.setData(addr); //Put all remaining information about the variable into the the database. put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx); //Now that the database knows of this and other threads can see the device pointer, update the status from allocating to allocated compareAndSwapAllocate(it->second.var->atomicStatusInGpuMemory); } } //______________________________________________________________________ // __device__ GPUDataWarehouse::dataItem* GPUDataWarehouse::getItem(char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { //This upcoming __syncthreads is needed. With CUDA function calls are inlined. // If you don't have it this upcoming __syncthreads here's what I think can happen: // * The correct index was found by one of the threads. // * The last __syncthreads is called, all threads met up there. // * Some threads in the block then make a second "function" call and reset index to -1 // * Meanwhile, those other threads were still in the first "function" call and hadn't // yet processed if (index == -1). They now run that line. And see index is now -1. That's bad. // So to prevent this scenario, we have one more __syncthreads listed immediately below. __syncthreads(); //sync before get short numThreads = blockDim.x * blockDim.y * blockDim.z; //int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; //blockID on the grid int i = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; //threadID in the block //int threadID = i; //if (d_debug && threadID == 0 && blockID == 0) { // printf("device getting item \"%s\" from GPUDW %p", label, this); // printf("size (%d vars)\n Available labels:", d_numVarDBItems); //} //Have every thread try to find the label/patchId/matlIndx is a match in //array. This is a parallel approach so that instead of doing a simple //sequential search with one thread, we can let every thread search for it. Only the //winning thread gets to write to shared data. __shared__ int index; index = -1; __syncthreads(); //sync before get, making sure everyone set index to -1 while(i<d_numVarDBItems){ short strmatch=0; char const *s1 = label; //reset s1 and s2 back to the start char const *s2 = &(d_varDB[i].label[0]); //a one-line strcmp. This should keep branching down to a minimum. while (!(strmatch = *(unsigned char *) s1 - *(unsigned char *) s2) && *s1++ && *s2++); //only one thread will ever match this. //And nobody on the device side should ever access "staging" variables. if (strmatch == 0) { if (patchID ==-99999999 //Only getLevel calls should hit this (note, && d_varDB[i].matlIndx == matlIndx && d_varDB[i].levelIndx == levelIndx && d_varDB[i].varItem.staging == false /* we don't support staging/foregin vars for get() */ && d_varDB[i].ghostItem.dest_varDB_index == -1) { /*don't let ghost cell copy data mix in with normal variables for get() */ index = i; //we found it. } else if(d_varDB[i].domainID == patchID && d_varDB[i].matlIndx == matlIndx /*&& d_varDB[i].levelIndx == levelIndx*/ //No need for level lookups, label + patchID + matl is a unique tuple. && d_varDB[i].varItem.staging == false && d_varDB[i].ghostItem.dest_varDB_index == -1) { index = i; //we found it. //printf("I'm thread %d In DW at %p, We found it for var %s patch %d matl %d level %d. d_varDB has it at index %d var %s patch %d at its item address %p with var pointer %p\n", // threadID, this, label, patchID, matlIndx, levelIndx, index, &(d_varDB[index].label[0]), d_varDB[index].domainID, &d_varDB[index], d_varDB[index].var_ptr); } } i = i + numThreads; //Since every thread is involved in searching for the string, have this thread loop to the next possible item to check for. } //sync before return; __syncthreads(); if (index == -1) { printf("ERROR:\nGPUDataWarehouse::getItem() didn't find anything for %s patch %d matl %d\n", label, patchID, matlIndx); return nullptr; } return &d_varDB[index]; } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::remove(char const* label, int patchID, int matlIndx, int levelIndx) { /* //This is more of a stub. Remove hasn't been needed up until yet. If removing is needed, it //would likely be best to deallocate things but leave an entry in the collection. bool retVal = false; labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); varLock->lock(); if (varPointers->find(lpml) != varPointers->end()) { int i = varPointers->at(lpml).varDB_index; d_varDB[i].label[0] = '\0'; //leave a hole in the flat array, not deleted. varPointers->erase(lpml); //TODO: GPU Memory leak? retVal = true; d_dirty=true; } if (d_debug){ printf("GPUDataWarehouse::remove( %s ). Removed a variable for label %s patch %d matl %d level %d \n", label, label, patchID, matlIndx, levelIndx); } varLock->unlock(); return retVal; */ return false; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::init(int id, std::string internalName) { d_device_id = id; //this->_internalName = new std::string(internalName); strncpy(_internalName, internalName.c_str(), sizeof(_internalName)); objectSizeInBytes = 0; d_maxdVarDBItems = 0; //this->placementNewBuffer = placementNewBuffer; allocateLock = new Uintah::MasterLock{}; varLock = new Uintah::MasterLock{}; varPointers = new std::map<labelPatchMatlLevel, allVarPointersInfo>; contiguousArrays = new std::map<std::string, contiguousArrayInfo>; //other data members are initialized in the constructor d_numVarDBItems = 0; d_numMaterials = 0; d_debug = false; //d_numGhostCells = 0; d_device_copy = nullptr; d_dirty = true; objectSizeInBytes = 0; //resetdVarDB(); numGhostCellCopiesNeeded = 0; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::cleanup() { delete allocateLock; delete varLock; delete varPointers; delete contiguousArrays; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::init_device(size_t objectSizeInBytes, unsigned int d_maxdVarDBItems) { this->objectSizeInBytes = objectSizeInBytes; this->d_maxdVarDBItems = d_maxdVarDBItems; OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id ); void* temp = nullptr; //CUDA_RT_SAFE_CALL(hipMalloc(&temp, objectSizeInBytes)); temp = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, objectSizeInBytes); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::init_device() -" << " requested GPU space from GPUMemoryPool::allocateCudaSpaceFromPool for Task DW of size " << objectSizeInBytes << " bytes at " << temp << " on device " << d_device_id << " the host GPUDW is at " << this << std::endl; } cerrLock.unlock(); } d_device_copy = (GPUDataWarehouse*)temp; //hipHostRegister(this, sizeof(GPUDataWarehouse), hipHostRegisterPortable); d_dirty = true; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::syncto_device(void *cuda_stream) { if (!d_device_copy) { printf("ERROR:\nGPUDataWarehouse::syncto_device()\nNo device copy\n"); exit(-1); } varLock->lock(); if (d_dirty){ OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id ); //Even though this is in a writeLock state on the CPU, the nature of multiple threads //each with their own stream copying to a GPU means that one stream might seemingly go out //of order. This is ok for two reasons. 1) Nothing should ever be *removed* from a gpu data warehouse //2) Therefore, it doesn't matter if streams go out of order, each thread will still ensure it copies //exactly what it needs. Other streams may write additional data to the gpu data warehouse, but cpu //threads will only access their own data, not data copied in by other cpu threada via streams. //This approach does NOT require CUDA pinned memory. //unsigned int sizeToCopy = sizeof(GPUDataWarehouse); hipStream_t* stream = (hipStream_t*)(cuda_stream); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::syncto_device() - hipMemcpy -" << " sync GPUDW at " << d_device_copy << " with description " << _internalName << " to device " << d_device_id << " on stream " << stream << std::endl; } cerrLock.unlock(); } CUDA_RT_SAFE_CALL (hipMemcpyAsync( d_device_copy, this, objectSizeInBytes, hipMemcpyHostToDevice, *stream)); d_dirty=false; } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::clear() { OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id ); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator varIter; for (varIter = varPointers->begin(); varIter != varPointers->end(); ++varIter) { // clear out all the staging vars, if any std::map<stagingVar, stagingVarInfo>::iterator stagingIter; for (stagingIter = varIter->second.var->stagingVars.begin(); stagingIter != varIter->second.var->stagingVars.end(); ++stagingIter) { if (compareAndSwapDeallocating(stagingIter->second.atomicStatusInGpuMemory)) { //The counter hit zero, so lets deallocate the var. if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::clear() -" << " calling GPUMemoryPool::freeCudaSpaceFromPool() for staging var for " << varIter->first.label << " at device ptr " << stagingIter->second.device_ptr << " on device " << d_device_id << std::endl; } cerrLock.unlock(); } if (GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, stagingIter->second.device_ptr) ) { stagingIter->second.device_ptr = nullptr; compareAndSwapDeallocate(stagingIter->second.atomicStatusInGpuMemory); } else { printf("ERROR:\nGPUDataWarehouse::clear(), for a staging variable, couldn't find in the GPU memory pool the space starting at address %p\n", stagingIter->second.device_ptr); varLock->unlock(); exit(-1); } } } varIter->second.var->stagingVars.clear(); // clear out the regular vars // See if it's a placeholder var for staging vars. This happens if the non-staging var // had a device_ptr of nullptr, and it was only in the varPointers map to only hold staging vars if (compareAndSwapDeallocating(varIter->second.var->atomicStatusInGpuMemory)) { if (varIter->second.var->device_ptr) { if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::clear() -" << " calling GPUMemoryPool::freeCudaSpaceFromPool() for non-staging var for " << varIter->first.label << " at device ptr " << varIter->second.var->device_ptr << " on device " << d_device_id << std::endl; } cerrLock.unlock(); } if (GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, varIter->second.var->device_ptr)) { varIter->second.var->device_ptr = nullptr; compareAndSwapDeallocate(varIter->second.var->atomicStatusInGpuMemory); } else { printf("ERROR:\nGPUDataWarehouse::clear(), for a non-staging variable, couldn't find in the GPU memory pool the space starting at address %p\n", varIter->second.var->device_ptr); varLock->unlock(); exit(-1); } } } } varPointers->clear(); varLock->unlock(); init(d_device_id, _internalName); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::deleteSelfOnDevice() { if ( d_device_copy ) { OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id ); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::deleteSelfOnDevice - calling GPUMemoryPool::freeCudaSpaceFromPool for Task DW at " << std::hex << d_device_copy << " on device " << std::dec << d_device_id << std::endl; } cerrLock.unlock(); } GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, d_device_copy); } } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::resetdVarDB() { #ifdef __CUDA_ARCH__ //no meaning in device method #else if (d_device_copy != nullptr) { //TODO: When TaskDWs are removed, this section shouldn't be needed as there won't be concurrency problems //This is designed to help stop tricky race scenarios. One such scenario I encountered was as follows: //Thread A would call getItem() on the GPU, and look thruogh d_varDB for a matching label/patch/matl tuple //Thread B would have previously added a new item to the d_varDB, then called syncto_device. //Thread B would be partway through updating d_varDB on the GPU. It would increase the number of items by one //And it would write the label. But it wouldn't yet write the patch or matl part of the tuple. By coincidence //the old garbage data in the GPU would have exactly the patch and matl that matches thread A's query //For a very brief window, there would be 2 tuples matching that label/patch/matl pair in d_varDB because //thread B hasn't fully written in all of his data. //Thread A's getItem() would run exactly in this brief window, find the wrong match, and use the wrong //memory address, and the program would crash with an invalid address. //The answer is to initialize d_varDB to items that should never provide an accidental match. //This should also occur for all other arrays. //TODO: Should this be could be cleaned up to only reset as much as was used. for (int i = 0; i < MAX_VARDB_ITEMS; i++) { d_varDB[i].label[0] = '\0'; d_varDB[i].domainID = -1; d_varDB[i].matlIndx = -1; //d_varDB[i].staging = false; d_varDB[i].var_ptr = nullptr; d_varDB[i].ghostItem.dest_varDB_index = -1; } for (int i = 0; i < MAX_LEVELDB_ITEMS; i++) { d_levelDB[i].label[0] = '\0'; d_levelDB[i].domainID = -1; d_levelDB[i].matlIndx = -1; //d_varDB[i].staging = false; d_levelDB[i].var_ptr = nullptr; } for (int i = 0; i < MAX_MATERIALSDB_ITEMS; i++) { d_materialDB[i].simulationType[0] = '\0'; } } #endif } //______________________________________________________________________ //These material methods below needs more work. They haven't been tested. __host__ void GPUDataWarehouse::putMaterials( std::vector< std::string > materials) { varLock->lock(); //see if a thread has already supplied this datawarehouse with the material data int numMaterials = materials.size(); if (d_numMaterials != numMaterials) { //nobody has given us this material data yet, so lets add it in from the beginning. if (numMaterials > MAX_MATERIALSDB_ITEMS) { printf("ERROR: out of GPUDataWarehouse space for materials"); exit(-1); } for (int i = 0; i < numMaterials; i++) { if (strcmp(materials.at(i).c_str(), "ideal_gas") == 0) { d_materialDB[i].material = IDEAL_GAS; } else { printf("ERROR: This material has not yet been coded for GPU support\n."); exit(-1); } } d_numMaterials = numMaterials; } varLock->unlock(); } //______________________________________________________________________ // HOST_DEVICE int GPUDataWarehouse::getNumMaterials() const { #ifdef __CUDA_ARCH__ return d_numMaterials; #else //I don't know if it makes sense to write this for the host side, when it already exists elsewhere host side. return -1; #endif } //______________________________________________________________________ // HOST_DEVICE materialType GPUDataWarehouse::getMaterial(int i) const { #ifdef __CUDA_ARCH__ if (i >= d_numMaterials) { printf("ERROR: Attempting to access material past bounds\n"); assert(0); } return d_materialDB[i].material; #else //I don't know if it makes sense to write this for the host side, when it already exists elsewhere host side. printf("getMaterial() is only implemented as a GPU function"); return IDEAL_GAS; //returning something to prevent a compiler error #endif } //______________________________________________________________________ //TODO: This is too slow. It needs work. __device__ void GPUDataWarehouse::copyGpuGhostCellsToGpuVars() { //Copy all ghost cells from their source to their destination. //The ghost cells could either be only the data that needs to be copied, //or it could be on an edge of a bigger grid var. //I believe the x,y,z coordinates of everything should match. //This could probably be made more efficient by using only perhaps one block, //copying float 4s, and doing it with instruction level parallelism. int numThreads = blockDim.x*blockDim.y*blockDim.z; int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; //blockID on the grid int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; //threadID in the block int totalThreads = numThreads * gridDim.x * gridDim.y * gridDim.z; int assignedCellID; //go through every ghost cell var we need for (int i = 0; i < d_numVarDBItems; i++) { //if (threadID == 0) { // if (d_varDB[i].ghostItem.dest_varDB_index != -1) { // printf("d_varDB[%d].label is %s\n", i, d_varDB[d_varDB[i].ghostItem.dest_varDB_index].label, d_numVarDBItems); // } else { // printf("d_varDB[%d].label is %s\n", i, d_varDB[i].label, d_numVarDBItems); // } //} //some things in d_varDB are meta data for simulation variables //other things in d_varDB are meta data for how to copy ghost cells. //Make sure we're only dealing with ghost cells here if(d_varDB[i].ghostItem.dest_varDB_index != -1) { assignedCellID = blockID * numThreads + threadID; int destIndex = d_varDB[i].ghostItem.dest_varDB_index; int3 ghostCellSize; ghostCellSize.x = d_varDB[i].ghostItem.sharedHighCoordinates.x - d_varDB[i].ghostItem.sharedLowCoordinates.x; ghostCellSize.y = d_varDB[i].ghostItem.sharedHighCoordinates.y - d_varDB[i].ghostItem.sharedLowCoordinates.y; ghostCellSize.z = d_varDB[i].ghostItem.sharedHighCoordinates.z - d_varDB[i].ghostItem.sharedLowCoordinates.z; //while there's still work to do (this assigned ID is still within the ghost cell) while (assignedCellID < ghostCellSize.x * ghostCellSize.y * ghostCellSize.z ) { int z = assignedCellID / (ghostCellSize.x * ghostCellSize.y); int temp = assignedCellID % (ghostCellSize.x * ghostCellSize.y); int y = temp / ghostCellSize.x; int x = temp % ghostCellSize.x; assignedCellID += totalThreads; //if we're in a valid x,y,z space for the variable. (It's unlikely every cell will perfectly map onto every available thread.) if (x < ghostCellSize.x && y < ghostCellSize.y && z < ghostCellSize.z) { //offset them to their true array coordinates, not relative simulation cell coordinates //When using virtual addresses, the virtual offset is always applied to the source, but the destination is correct. int x_source_real = x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[i].ghostItem.virtualOffset.x - d_varDB[i].var_offset.x; int y_source_real = y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[i].ghostItem.virtualOffset.y - d_varDB[i].var_offset.y; int z_source_real = z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[i].ghostItem.virtualOffset.z - d_varDB[i].var_offset.z; //count over array slots. int sourceOffset = x_source_real + d_varDB[i].var_size.x * (y_source_real + z_source_real * d_varDB[i].var_size.y); int x_dest_real = x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[destIndex].var_offset.x; int y_dest_real = y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[destIndex].var_offset.y; int z_dest_real = z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[destIndex].var_offset.z; int destOffset = x_dest_real + d_varDB[destIndex].var_size.x * (y_dest_real + z_dest_real * d_varDB[destIndex].var_size.y); //if (threadID == 0) { // printf("Going to copy, between (%d, %d, %d) from offset %d to offset %d. From starts at (%d, %d, %d) with size (%d, %d, %d) at index %d pointer %p. To starts at (%d, %d, %d) with size (%d, %d, %d).\n", // d_varDB[i].ghostItem.sharedLowCoordinates.x, // d_varDB[i].ghostItem.sharedLowCoordinates.y, // d_varDB[i].ghostItem.sharedLowCoordinates.z, // sourceOffset, // destOffset, // d_varDB[i].var_offset.x, d_varDB[i].var_offset.y, d_varDB[i].var_offset.z, // d_varDB[i].var_size.x, d_varDB[i].var_size.y, d_varDB[i].var_size.z, // i, // d_varDB[i].var_ptr, // d_varDB[destIndex].var_offset.x, d_varDB[destIndex].var_offset.y, d_varDB[destIndex].var_offset.z, // d_varDB[destIndex].var_size.x, d_varDB[destIndex].var_size.y, d_varDB[destIndex].var_size.z); //} //copy all 8 bytes of a double in one shot if (d_varDB[i].sizeOfDataType == sizeof(double)) { *((double*)(d_varDB[destIndex].var_ptr) + destOffset) = *((double*)(d_varDB[i].var_ptr) + sourceOffset); //Note: Every now and then I've seen this printf statement get confused, a line will print with the wrong variables/offset variables... // printf("Thread %d - %s At (%d, %d, %d), real: (%d, %d, %d), copying within region between (%d, %d, %d) and (%d, %d, %d). Source d_varDB index (%d, %d, %d) varSize (%d, %d, %d) virtualOffset(%d, %d, %d), varOffset(%d, %d, %d), sourceOffset %d actual pointer %p, value %e. Dest d_varDB index %d ptr %p destOffset %d actual pointer. %p\n", // threadID, d_varDB[destIndex].label, x, y, z, x_source_real, y_source_real, z_source_real, // d_varDB[i].ghostItem.sharedLowCoordinates.x, d_varDB[i].ghostItem.sharedLowCoordinates.y, d_varDB[i].ghostItem.sharedLowCoordinates.z, // d_varDB[i].ghostItem.sharedHighCoordinates.x, d_varDB[i].ghostItem.sharedHighCoordinates.y, d_varDB[i].ghostItem.sharedHighCoordinates.z, // x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[i].ghostItem.virtualOffset.x, // y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[i].ghostItem.virtualOffset.y, // z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[i].ghostItem.virtualOffset.z, // d_varDB[i].var_size.x, d_varDB[i].var_size.y, d_varDB[i].var_size.z, // d_varDB[i].ghostItem.virtualOffset.x, d_varDB[i].ghostItem.virtualOffset.y, d_varDB[i].ghostItem.virtualOffset.z, // d_varDB[i].var_offset.x, d_varDB[i].var_offset.y, d_varDB[i].var_offset.z, // sourceOffset, (double*)(d_varDB[i].var_ptr) + sourceOffset, *((double*)(d_varDB[i].var_ptr) + sourceOffset), // destIndex, d_varDB[destIndex].var_ptr, destOffset, (double*)(d_varDB[destIndex].var_ptr) + destOffset); } //or copy all 4 bytes of an int in one shot. else if (d_varDB[i].sizeOfDataType == sizeof(int)) { *(((int*)d_varDB[destIndex].var_ptr) + destOffset) = *((int*)(d_varDB[i].var_ptr) + sourceOffset); //Copy each byte until we've copied all for this data type. } else { for (int j = 0; j < d_varDB[i].sizeOfDataType; j++) { *(((char*)d_varDB[destIndex].var_ptr) + (destOffset * d_varDB[destIndex].sizeOfDataType + j)) = *(((char*)d_varDB[i].var_ptr) + (sourceOffset * d_varDB[i].sizeOfDataType + j)); } } } } } } } //______________________________________________________________________ // __global__ void copyGpuGhostCellsToGpuVarsKernel( GPUDataWarehouse *gpudw) { gpudw->copyGpuGhostCellsToGpuVars(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::copyGpuGhostCellsToGpuVarsInvoker(hipStream_t* stream) { //see if this GPU datawarehouse has ghost cells in it. if (numGhostCellCopiesNeeded > 0) { //call a kernel which gets the copy process started. OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id); #if 0 // compiler warnings const int BLOCKSIZE = 1; int xblocks = 32; int yblocks = 1; int zblocks = 1; #endif dim3 dimBlock(32, 16, 1); dim3 dimGrid(1, 1, 1); //Give each ghost copying kernel 32 * 16 = 512 threads to copy //(32x32 was too much for a smaller laptop GPU, but was fine for the Titan X on Albion) if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::copyGpuGhostCellsToGpuVarsInvoker() - " << " Launching ghost cell copies kernel" << " on device " << d_device_id << " at GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } hipLaunchKernelGGL(( copyGpuGhostCellsToGpuVarsKernel), dim3(dimGrid), dim3(dimBlock), 0, *stream , this->d_device_copy); } } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::ghostCellCopiesNeeded() { //see if this GPU datawarehouse has ghost cells in it. return (numGhostCellCopiesNeeded > 0); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::putGhostCell(char const* label, int sourcePatchID, int destPatchID, int matlIndx, int levelIndx, bool sourceStaging, bool destStaging, int3 varOffset, int3 varSize, int3 sharedLowCoordinates, int3 sharedHighCoordinates, int3 virtualOffset) { //Add information describing a ghost cell that needs to be copied internally from //one chunk of data to the destination. This covers a GPU -> same GPU copy scenario. varLock->lock(); unsigned int i = d_numVarDBItems; if (i > d_maxdVarDBItems) { printf("ERROR: GPUDataWarehouse::putGhostCell( %s ). Exceeded maximum d_varDB entries. Index is %d and max items is %d\n", label, i, d_maxdVarDBItems); varLock->unlock(); exit(-1); } int index = -1; d_numVarDBItems++; numGhostCellCopiesNeeded++; d_varDB[i].ghostItem.sharedLowCoordinates = sharedLowCoordinates; d_varDB[i].ghostItem.sharedHighCoordinates = sharedHighCoordinates; d_varDB[i].ghostItem.virtualOffset = virtualOffset; //look up the source index and the destination index for these. //it may be an entire variable (in which case staging is false) //or it may be a staging variable. labelPatchMatlLevel lpml_source(label, sourcePatchID, matlIndx, levelIndx); if (!sourceStaging) { if (varPointers->find(lpml_source) != varPointers->end()) { index = varPointers->at(lpml_source).varDB_index; } } else { //Find the variable that contains the region in which our ghost cells exist. //Usually the sharedLowCoordinates and sharedHighCoordinates correspond //exactly to the size of the staging variable. //(TODO ? But sometimes the ghost data is found within larger staging variable. Not sure if there is a use case for this yet) stagingVar sv; sv.device_offset = varOffset; sv.device_size = varSize; std::map<stagingVar, stagingVarInfo>::iterator staging_it = varPointers->at(lpml_source).var->stagingVars.find(sv); if (staging_it != varPointers->at(lpml_source).var->stagingVars.end()) { index = staging_it->second.varDB_index; } else { int nStageVars = varPointers->at(lpml_source).var->stagingVars.size(); printf("ERROR: GPUDataWarehouse::putGhostCell( %s ). Number of staging vars for this var: %d, No staging variable found exactly matching all of the following: label %s patch %d matl %d level %d offset (%d, %d, %d) size (%d, %d, %d) on DW at %p.\n", label, nStageVars, label, sourcePatchID, matlIndx, levelIndx, sv.device_offset.x, sv.device_offset.y, sv.device_offset.z, sv.device_size.x, sv.device_size.y, sv.device_size.z, this); varLock->unlock(); exit(-1); } //Find the d_varDB entry for this specific one. } if (index < 0) { printf("ERROR:\nGPUDataWarehouse::putGhostCell, label %s, source patch ID %d, matlIndx %d, levelIndex %d staging %s not found in GPU DW %p\n", label, sourcePatchID, matlIndx, levelIndx, sourceStaging ? "true" : "false", this); varLock->unlock(); exit(-1); } d_varDB[i].var_offset = d_varDB[index].var_offset; d_varDB[i].var_size = d_varDB[index].var_size; d_varDB[i].var_ptr = d_varDB[index].var_ptr; d_varDB[i].sizeOfDataType = d_varDB[index].sizeOfDataType; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::putGhostCell() - " << " Placed into d_varDB at index " << i << " of max index " << d_maxdVarDBItems - 1 << " from patch " << sourcePatchID << " staging " << sourceStaging << " to patch " << destPatchID << " staging " << destStaging << " has shared coordinates (" << sharedLowCoordinates.x << ", " << sharedLowCoordinates.y << ", " << sharedLowCoordinates.z << ")," << " (" << sharedHighCoordinates.x << ", " << sharedHighCoordinates.y << ", " << sharedHighCoordinates.z << "), " << " from low/offset (" << d_varDB[i].var_offset.x << ", " << d_varDB[i].var_offset.y << ", " << d_varDB[i].var_offset.z << ") " << " size (" << d_varDB[i].var_size.x << ", " << d_varDB[i].var_size.y << ", " << d_varDB[i].var_size.z << ") " << " virtualOffset (" << d_varDB[i].ghostItem.virtualOffset.x << ", " << d_varDB[i].ghostItem.virtualOffset.y << ", " << d_varDB[i].ghostItem.virtualOffset.z << ") " << " datatype size " << d_varDB[i].sizeOfDataType << " on device " << d_device_id << " at GPUDW at " << std::hex << this<< std::dec << std::endl; } cerrLock.unlock(); } // Find where we are sending the ghost cell data to labelPatchMatlLevel lpml_dest(label, destPatchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml_dest); if (it != varPointers->end()) { if (destStaging) { //TODO: Do the same thing as the source. //If the destination is staging, then the shared coordinates are also the ghost coordinates. stagingVar sv; sv.device_offset = sharedLowCoordinates; sv.device_size = make_int3(sharedHighCoordinates.x-sharedLowCoordinates.x, sharedHighCoordinates.y-sharedLowCoordinates.y, sharedHighCoordinates.z-sharedLowCoordinates.z); std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.find(sv); if (staging_it != it->second.var->stagingVars.end()) { d_varDB[i].ghostItem.dest_varDB_index = staging_it->second.varDB_index; } else { printf("\nERROR:\nGPUDataWarehouse::putGhostCell() didn't find a staging variable from the device for offset (%d, %d, %d) and size (%d, %d, %d).\n", sharedLowCoordinates.x, sharedLowCoordinates.y, sharedLowCoordinates.z, sv.device_size.x, sv.device_size.y, sv.device_size.z); varLock->unlock(); exit(-1); } } else { d_varDB[i].ghostItem.dest_varDB_index = it->second.varDB_index; } } else { printf("ERROR:\nGPUDataWarehouse::putGhostCell(), label: %s destination patch ID %d, matlIndx %d, levelIndex %d, staging %s not found in GPU DW variable database\n", label, destPatchID, matlIndx, levelIndx, destStaging ? "true" : "false"); varLock->unlock(); exit(-1); } d_dirty=true; varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::getSizes(int3& low, int3& high, int3& siz, GhostType& gtype, int& numGhostCells, char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo info = varPointers->at(lpml); low = info.device_offset; high.x = info.var->device_size.x + info.var->device_offset.x; high.y = info.var->device_size.y + info.var->device_offset.y; high.z = info.var->device_size.z + info.var->device_offset.z; siz = info.var->device_size; gtype = info.var->gtype; numGhostCells = info.var->numGhostCells; } varLock->unlock(); } //______________________________________________________________________ //Deep copies (not shallow copies or moves) an entry from one data warehouse to another. //(Note: A deep copy is a full copy of data from one variable's memory space to another variable's memory space //A shallow copy is just a pointer copy and a ref counting //A move is a true std::move() reseating.) //RMCRT and Arches often keep a variable in the old data warehouse alive by copying it to the new data warehouse. //It can't be a move (it may be needed to use data from the old and the new) //It can't be a shallow copy (it may be needed to modify the new and also use the old) //So it must be a deep copy. //Both the source and destination variables must be in the GPU data warehouse, //both must be listed as "allocated". If these are not the case, the transferFrom doesn't proceed. //Both must have the same variable sizes. If this is not the case, the program will exit. //If all above conditions are met, then it will do a device to device memcopy call. //*Important*: For this to work, it needs a GPU stream. GPU streams are stored per task, every Uintah task is assigned //a possible stream to use. To get the stream you have to request it from the detailedTask object. //Normal CPU task callback functions do not have access to the detailedTask object, but it is possible to //extend the callack function parameter list so that it does. See UnifiedSchedulerTest::timeAdvanceUnified as an example. //*Also important*: For this to work, the destination variable *MUST* be listed as a computes in the task that's //calling transferFrom(). That allows for the computes data to have been preallocated ahead of time by the scheduler. //Uintah's scheduler is fine if it is able to allocate the space, so that it can allow the task developer to write data //into space it created. If it was a computes, then this method can copy data into the computes memory, and //when the task which called transferFrom is done, the scheduler will mark this computes variable as VALID. //Note: A shallow copy method has been requested by the Arches team. That hasn't been implemented yet. It would require //ref counting a variable, and perhaps some sanity checks to ensure a shallow copied variable is not called a computes and //then later listed as a modifies. __host__ bool GPUDataWarehouse::transferFrom(hipStream_t* stream, GPUGridVariableBase &var_source, GPUGridVariableBase &var_dest, GPUDataWarehouse * from, char const* label, int patchID, int matlIndx, int levelIndx){ from->varLock->lock(); this->varLock->lock(); //lock both data warehouses, no way to lock free this section, //you could get the dining philosophers problem. labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator source_it = from->varPointers->find(lpml); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator dest_it = this->varPointers->find(lpml); int proceed = true; if (source_it == from->varPointers->end()) { //It may just be there wasn't any requires in the GPU to begin with, so don't bother attempting to copy. //printf("GPU source not found\n"); proceed = false; } else if (dest_it == this->varPointers->end()) { //It may just be there wasn't any computes in the GPU to begin with, so don't bother attempting to copy. //printf("GPU dest not found in DW at %p for variable %s patch %d matl %d level %d\n", this, label, patchID, matlIndx, levelIndx); proceed = false; } else if (((__sync_fetch_and_or(&(source_it->second.var->atomicStatusInGpuMemory), 0) & ALLOCATED) != ALLOCATED)){ //It may just be there wasn't any computes in the GPU to begin with, so don't bother attempting to copy. //printf("GPU source not allocated for variable %s patch %d matl %d level %d, it has status codes %s\n", label, patchID, matlIndx, levelIndx, getDisplayableStatusCodes(source_it->second.atomicStatusInGpuMemory).c_str()); proceed = false; //Is this a problem? We know of this variable in the data warehouse, but we have no space for it. //printf("Error: GPUDataWarehouse::transferFrom() - No source variable device space found. Cannot proceed with deep copy. Exiting...\n"); //exit(-1); } else if (((__sync_fetch_and_or(&(dest_it->second.var->atomicStatusInGpuMemory), 0) & ALLOCATED) != ALLOCATED)){ //printf("GPU destination not allocated for variable %s patch %d matl %d level %d\n", label, patchID, matlIndx, levelIndx); //It may just be there wasn't any computes in the GPU to begin with, so don't bother attempting to copy. proceed = false; //Is a problem? We know of this variable in the data warehouse, but we have no space for it. //printf("Error: GPUDataWarehouse::transferFrom() - No destination variable device space found. Cannot proceed with deep copy. Exiting...\n"); //exit(-1); } if (!proceed) { from->varLock->unlock(); this->varLock->unlock(); return false; } if (!( source_it->second.var->device_offset.x == dest_it->second.var->device_offset.x && source_it->second.var->device_offset.y == dest_it->second.var->device_offset.y && source_it->second.var->device_offset.z == dest_it->second.var->device_offset.z && source_it->second.var->device_size.x == dest_it->second.var->device_size.x && source_it->second.var->device_size.y == dest_it->second.var->device_size.y && source_it->second.var->device_size.z == dest_it->second.var->device_size.z )) { printf("Error: GPUDataWarehouse::transferFrom() - The source and destination variables exists for variable %s patch %d matl %d level %d, but the sizes don't match. Cannot proceed with deep copy. Exiting...\n", label, patchID, matlIndx, levelIndx); printf("The source size is (%d, %d, %d) with offset (%d, %d, %d) and device size is (%d, %d, %d) with offset (%d, %d, %d)\n", source_it->second.var->device_size.x, source_it->second.var->device_size.y, source_it->second.var->device_size.z, source_it->second.var->device_offset.x, source_it->second.var->device_offset.y, source_it->second.var->device_offset.z, dest_it->second.var->device_size.x, dest_it->second.var->device_size.y, dest_it->second.var->device_size.z, dest_it->second.var->device_offset.x, dest_it->second.var->device_offset.y, dest_it->second.var->device_offset.z); from->varLock->unlock(); this->varLock->unlock(); exit(-1); } else if (!(source_it->second.var->device_ptr)) { //A couple more santiy checks, this may be overkill... printf("Error: GPUDataWarehouse::transferFrom() - No source variable pointer found for variable %s patch %d matl %d level %d\n", label, patchID, matlIndx, levelIndx); from->varLock->unlock(); this->varLock->unlock(); exit(-1); } else if (!(dest_it->second.var->device_ptr)) { printf("Error: GPUDataWarehouse::transferFrom() - No destination variable pointer found for variable %s patch %d matl %d level %d\n", label, patchID, matlIndx, levelIndx); from->varLock->unlock(); this->varLock->unlock(); exit(-1); } else if (!stream) { printf("ERROR: No stream associated with the detailed task. Cannot proceed with deep copy. Exiting...\n"); printf("If you get this message, the fix is not that rough. You need to change your CPU callback function to having the full set of parameters common for a GPU task. If you do that, the engine should pick up the rest of the details.\n"); from->varLock->unlock(); this->varLock->unlock(); exit(-1); } //We shouldn't need to allocate space on either the source or the datination. The source should have been listed as a requires, //and the destination should have been listed as a computes for the task. //And this solves a mess of problems, mainly deailing with when it is listed as allocated and when it's listed as valid. var_source.setArray3(source_it->second.var->device_offset, source_it->second.var->device_size, source_it->second.var->device_ptr); var_source.setArray3(dest_it->second.var->device_offset, dest_it->second.var->device_size, dest_it->second.var->device_ptr); hipMemcpyAsync(dest_it->second.var->device_ptr, source_it->second.var->device_ptr, source_it->second.var->device_size.x * source_it->second.var->device_size.y * source_it->second.var->device_size.z * source_it->second.var->sizeOfDataType, hipMemcpyDeviceToDevice, *stream); from->varLock->unlock(); this->varLock->unlock(); //Let the caller know we found and transferred something. return true; } //______________________________________________________________________ // Go through all staging vars for a var. See if they are all marked as valid. __host__ bool GPUDataWarehouse::areAllStagingVarsValid(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { for (std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.begin(); staging_it != it->second.var->stagingVars.end(); ++staging_it) { if (!checkValid(staging_it->second.atomicStatusInGpuMemory)) { varLock->unlock(); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::areAllStagingVarsValid() -" // Task: " << dtask->getName() << " Not all staging vars were ready for " << label << " patch " << patchID << " material " << matlIndx << " level " << levelIndx << " offset (" << staging_it->first.device_offset.x << ", " << staging_it->first.device_offset.y << ", " << staging_it->first.device_offset.z << ") and size (" << staging_it->first.device_size.x << ", " << staging_it->first.device_size.y << ", " << staging_it->first.device_size.z << ") with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory) << std::endl; } cerrLock.unlock(); } return false; } } } varLock->unlock(); return true; } //______________________________________________________________________ // Simply performs an atomic fetch on the status variable. //typedef int atomicDataStatus; //__host__ atomicDataStatus //GPUDataWarehouse::getStatus(atomicDataStatus& status) { // return __sync_or_and_fetch(&(status), 0); //} //______________________________________________________________________ // __host__ std::string GPUDataWarehouse::getDisplayableStatusCodes(atomicDataStatus& status) { atomicDataStatus varStatus = __sync_or_and_fetch(&status, 0); std::string retval = ""; if (varStatus == 0) { retval += "Unallocated "; } else { if ((varStatus & ALLOCATING) == ALLOCATING) { retval += "Allocating "; } if ((varStatus & ALLOCATED) == ALLOCATED) { retval += "Allocated "; } if ((varStatus & COPYING_IN) == COPYING_IN) { retval += "Copying-in "; } if ((varStatus & VALID) == VALID) { retval += "Valid "; } if ((varStatus & AWAITING_GHOST_COPY) == AWAITING_GHOST_COPY) { retval += "Awaiting-ghost-copy "; } if ((varStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS) { retval += "Valid-with-ghosts "; } if ((varStatus & DEALLOCATING) == DEALLOCATING) { retval += "Deallocating "; } if ((varStatus & FORMING_SUPERPATCH) == FORMING_SUPERPATCH) { retval += "Forming-superpatch "; } if ((varStatus & SUPERPATCH) == SUPERPATCH) { retval += "Superpatch "; } if ((varStatus & UNKNOWN) == UNKNOWN) { retval += "Unknown "; } } //trim whitespace retval.erase(std::find_if(retval.rbegin(), retval.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), retval.end()); return retval; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::getStatusFlagsForVariableOnGPU(bool& correctSize, bool& allocating, bool& allocated, bool& copyingIn, bool& validOnGPU, bool& gatheringGhostCells, bool& validWithGhostCellsOnGPU, bool& deallocating, bool& formingSuperPatch, bool& superPatch, char const* label, const int patchID, const int matlIndx, const int levelIndx, const int3& offset, const int3& size) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { //check the sizes allVarPointersInfo vp = varPointers->at(lpml); int3 device_offset = vp.var->device_offset; int3 device_size = vp.var->device_size; correctSize = (device_offset.x == offset.x && device_offset.y == offset.y && device_offset.z == offset.z && device_size.x == size.x && device_size.y == size.y && device_size.z == size.z); //get the value atomicDataStatus varStatus = __sync_or_and_fetch(&(vp.var->atomicStatusInGpuMemory), 0); allocating = ((varStatus & ALLOCATING) == ALLOCATING); allocated = ((varStatus & ALLOCATED) == ALLOCATED); copyingIn = ((varStatus & COPYING_IN) == COPYING_IN); validOnGPU = ((varStatus & VALID) == VALID); gatheringGhostCells = ((varStatus & AWAITING_GHOST_COPY) == AWAITING_GHOST_COPY); validWithGhostCellsOnGPU = ((varStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS); deallocating = ((varStatus & DEALLOCATING) == DEALLOCATING); formingSuperPatch = ((varStatus & FORMING_SUPERPATCH) == FORMING_SUPERPATCH); superPatch = ((varStatus & SUPERPATCH) == SUPERPATCH); } else { correctSize = false; allocating = false; allocated = false; copyingIn = false; validOnGPU = false; gatheringGhostCells = false; validWithGhostCellsOnGPU = false; formingSuperPatch = false; superPatch = false; } varLock->unlock(); } //______________________________________________________________________ // returns false if something else already allocated space and we don't have to. // returns true if we are the ones to allocate the space. // performs operations with atomic compare and swaps __host__ bool GPUDataWarehouse::compareAndSwapAllocating(atomicDataStatus& status) { bool allocating = false; while (!allocating) { //get the value atomicDataStatus oldVarStatus = __sync_or_and_fetch(&status, 0); unsigned int refCounter = (oldVarStatus >> 16); //if it's allocated, return true if (refCounter >= 1 ) { //Something else already took care of it, and it has moved beyond the allocating state into something else. return false; } else if ((oldVarStatus & UNALLOCATED) != UNALLOCATED) { //Sanity check. The ref counter was zero, but the variable isn't unallocated. We can't have this. printf("ERROR:\nGPUDataWarehouse::compareAndSwapAllocate( ) Something wrongly modified the atomic status while setting the allocated flag\n"); exit(-1); } else { //Attempt to claim we'll allocate it. If not go back into our loop and recheck short refCounter = 1; atomicDataStatus newVarStatus = (refCounter << 16) | (oldVarStatus & 0xFFFF); //Place in the reference counter and save the right 16 bits. newVarStatus = newVarStatus | ALLOCATING; //It's possible to preserve a flag, such as copying in ghost cells. allocating = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus); } } return true; } //______________________________________________________________________ // Sets the allocated flag on a variables atomicDataStatus // This is called after an allocating process completes. *Only* the thread that got a true from // compareAndSwapAllocating() should immediately call this. __host__ bool GPUDataWarehouse::compareAndSwapAllocate(atomicDataStatus& status) { bool allocated = false; //get the value atomicDataStatus oldVarStatus = __sync_or_and_fetch(&status, 0); if ((oldVarStatus & ALLOCATING) == 0) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapAllocate( ) Can't allocate a status if it wasn't previously marked as allocating.\n"); exit(-1); } else if ((oldVarStatus & ALLOCATED) == ALLOCATED) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapAllocate( ) Can't allocate a status if it's already allocated\n"); exit(-1); } else { //Attempt to claim we'll allocate it. Create what we want the status to look like //by turning off allocating and turning on allocated. //Note: No need to turn off UNALLOCATED, it's defined as all zero bits. //But the below is kept in just for readability's sake. atomicDataStatus newVarStatus = oldVarStatus & ~UNALLOCATED; newVarStatus = newVarStatus & ~ALLOCATING; newVarStatus = newVarStatus | ALLOCATED; //If we succeeded in our attempt to claim to allocate, this returns true. //If we failed, thats a real problem, and we crash the problem below. allocated = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus); } if (!allocated) { //Another sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapAllocate( ) Something wrongly modified the atomic status while setting the allocated flag\n"); exit(-1); } return allocated; } //______________________________________________________________________ // Simply determines if a variable has been marked as allocated. __host__ bool GPUDataWarehouse::checkAllocated(atomicDataStatus& status) { return ((__sync_or_and_fetch(&status, 0) & ALLOCATED) == ALLOCATED); } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::compareAndSwapDeallocating(atomicDataStatus& status) { bool deallocating = false; while (!deallocating) { //get the value atomicDataStatus oldVarStatus = __sync_or_and_fetch(&status, 0); unsigned int refCounter = (oldVarStatus >> 16); if (refCounter == 0 || ((oldVarStatus & DEALLOCATING) == DEALLOCATING) || ((oldVarStatus & 0xFFFF) == UNALLOCATED) || ((oldVarStatus & UNKNOWN) == UNKNOWN)) { //There's nothing to deallocate, or something else already deallocated it or is deallocating it. //So this thread won't do it. return false; } else if (refCounter == 1) { //Ref counter is 1, we can deallocate it. //Leave the refCounter at 1. atomicDataStatus newVarStatus = (refCounter << 16) | (oldVarStatus & 0xFFFF); //Place in the reference counter and save the right 16 bits. newVarStatus = newVarStatus | DEALLOCATING; //Set it to deallocating so nobody else can attempt to use it bool successfulUpdate = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus); if (successfulUpdate) { //Need to deallocate, let the caller know it. deallocating = true; } } else if (refCounter > 1) { //Something else is using this variable, don't deallocate, just decrement the counter refCounter--; atomicDataStatus newVarStatus = (refCounter << 16) | (oldVarStatus & 0xFFFF); bool successfulUpdate = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus); if (successfulUpdate) { //No need to deallocate, let the caller know it. return false; } } else { printf("ERROR:\nGPUDataWarehouse::compareAndSwapDeallocating( ) This variable's ref counter was 0, but its status said it was in use. This shouldn't happen\n"); exit(-1); } } return true; } //______________________________________________________________________ // Sets the allocated flag on a variables atomicDataStatus // This is called after a deallocating process completes. *Only* the thread that got a true from //compareAndSwapDeallocating() should immediately call this. __host__ bool GPUDataWarehouse::compareAndSwapDeallocate(atomicDataStatus& status) { bool allocated = false; //get the value atomicDataStatus oldVarStatus = __sync_or_and_fetch(&status, 0); unsigned int refCounter = (oldVarStatus >> 16); if ((oldVarStatus & DEALLOCATING) == 0) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapDeallocate( ) Can't deallocate a status if it wasn't previously marked as deallocating.\n"); exit(-1); } else if ((oldVarStatus & 0xFFFF) == UNALLOCATED) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapDeallocate( ) Can't deallocate a status if it's already deallocated\n"); exit(-1); } else if (refCounter != 1) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapDeallocate( ) Attemping to deallocate a variable but the ref counter isn't the required value of 1\n"); exit(-1); } else { //Attempt to claim we'll deallocate it. Create what we want the status to look like //by turning off all status flags (indicating unallocated), it should also zero out the reference counter. atomicDataStatus newVarStatus = UNALLOCATED; //If we succeeded in our attempt to claim to deallocate, this returns true. //If we failed, thats a real problem, and we crash the problem below. allocated = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus); } if (!allocated) { //Another sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapDeallocate( ) Something wrongly modified the atomic status while trying set the status flags to unallocated\n"); exit(-1); } return allocated; } //______________________________________________________________________ // Simply determines if a variable has been marked as valid. __host__ bool GPUDataWarehouse::checkValid(atomicDataStatus& status) { return ((__sync_or_and_fetch(&status, 0) & VALID) == VALID); } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isAllocatedOnGPU(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { bool retVal = ((__sync_fetch_and_or(&(varPointers->at(lpml).var->atomicStatusInGpuMemory), 0) & ALLOCATED) == ALLOCATED); varLock->unlock(); return retVal; } else { varLock->unlock(); return false; } } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isAllocatedOnGPU(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { //cout << "In isAllocatedOnGPU - For patchID " << patchID << " for the status is " << getDisplayableStatusCodes(varPointers->at(lpml).atomicStatusInGpuMemory) << endl; bool retVal = ((__sync_fetch_and_or(&(varPointers->at(lpml).var->atomicStatusInGpuMemory), 0) & ALLOCATED) == ALLOCATED); if (retVal) { //now check the sizes int3 device_offset = varPointers->at(lpml).var->device_offset; int3 device_size = varPointers->at(lpml).var->device_size; retVal = (device_offset.x == offset.x && device_offset.y == offset.y && device_offset.z == offset.z && device_size.x == size.x && device_size.y == size.y && device_size.z == size.z); } varLock->unlock(); return retVal; } else { varLock->unlock(); return false; } } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isValidOnGPU(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { bool retVal = ((__sync_fetch_and_or(&(varPointers->at(lpml).var->atomicStatusInGpuMemory), 0) & VALID) == VALID); varLock->unlock(); return retVal; } else { varLock->unlock(); return false; } } //______________________________________________________________________ __host__ bool GPUDataWarehouse::compareAndSwapSetValidOnGPU(char const* const label, const int patchID, const int matlIndx, const int levelIndx) { varLock->lock(); bool settingValid = false; while (!settingValid) { labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { atomicDataStatus *status = &(it->second.var->atomicStatusInGpuMemory); atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if ((oldVarStatus & VALID) == VALID) { //Something else already took care of it. So this task won't manage it. varLock->unlock(); return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus & ~COPYING_IN; newVarStatus = newVarStatus | VALID; settingValid = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } else { varLock->unlock(); printf("ERROR\nGPUDataWarehouse::compareAndSwapSetValidOnGPU() - Unknown variable %s on GPUDataWarehouse\n", label); exit(-1); } } varLock->unlock(); return true; } //______________________________________________________________________ __host__ bool GPUDataWarehouse::compareAndSwapSetValidOnGPUStaging(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size) { varLock->lock(); bool settingValidOnStaging = false; while (!settingValidOnStaging) { labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { stagingVar sv; sv.device_offset = offset; sv.device_size = size; std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.find(sv); if (staging_it != it->second.var->stagingVars.end()) { atomicDataStatus *status = &(staging_it->second.atomicStatusInGpuMemory); atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if ((oldVarStatus & VALID) == VALID) { //Something else already took care of it. So this task won't manage it. varLock->unlock(); return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus & ~COPYING_IN; newVarStatus = newVarStatus | VALID; settingValidOnStaging = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapSetValidOnGPUStaging( ) Staging variable %s not found.\n", label); exit(-1); } } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapSetValidOnGPUStaging( ) Variable %s not found.\n", label); exit(-1); } } varLock->unlock(); return true; } //______________________________________________________________________ // We have an entry for this item in the GPU DW, and it's not unknown. Therefore // if this returns true it means this GPU DW specifically knows something about the // state of this variable. (The reason for the unknown check is currently when a // var is added to the GPUDW, we also need to state what we know about its data in // host memory. Since it doesn't know, it marks it as unknown, meaning, the host // side DW is possibly managing the data.) __host__ bool GPUDataWarehouse::dwEntryExistsOnCPU(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); bool retVal = false; labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { if ((it->second.var->atomicStatusInHostMemory & UNKNOWN) != UNKNOWN) { retVal = true; } } varLock->unlock(); return retVal; } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isValidOnCPU(char const* label, const int patchID, const int matlIndx, const int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { bool retVal = ((__sync_fetch_and_or(&(varPointers->at(lpml).var->atomicStatusInHostMemory), 0) & VALID) == VALID); varLock->unlock(); return retVal; } else { varLock->unlock(); return false; } } //______________________________________________________________________ //TODO: This needs to be turned into a compare and swap operation //______________________________________________________________________ __host__ bool GPUDataWarehouse::compareAndSwapSetValidOnCPU(char const* const label, const int patchID, const int matlIndx, const int levelIndx) { varLock->lock(); bool settingValid = false; while (!settingValid) { labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { atomicDataStatus *status = &(it->second.var->atomicStatusInHostMemory); atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if ((oldVarStatus & VALID) == VALID) { //Something else already took care of it. So this task won't manage it. varLock->unlock(); return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus & ~COPYING_IN; newVarStatus = newVarStatus | VALID; settingValid = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } else { varLock->unlock(); printf("ERROR\nGPUDataWarehouse::compareAndSwapSetValidOnCPU() - Unknown variable %s on GPUDataWarehouse\n", label); exit(-1); } } varLock->unlock(); return true; } //______________________________________________________________________ // returns false if something else already changed a valid variable to valid awaiting ghost data // returns true if we are the ones to manage this variable's ghost data. __host__ bool GPUDataWarehouse::compareAndSwapAwaitingGhostDataOnGPU(char const* label, int patchID, int matlIndx, int levelIndx) { bool allocating = false; varLock->lock(); while (!allocating) { //get the address labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { atomicDataStatus *status = &(varPointers->at(lpml).var->atomicStatusInGpuMemory); atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (((oldVarStatus & AWAITING_GHOST_COPY) == AWAITING_GHOST_COPY) || ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) { //Something else already took care of it. So this task won't manage it. varLock->unlock(); return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus | AWAITING_GHOST_COPY; allocating = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapAwaitingGhostDataOnGPU( ) Variable %s not found.\n", label); exit(-1); return false; } } varLock->unlock(); return true; } //______________________________________________________________________ // returns false if something else already claimed to copy or has copied data into the GPU. // returns true if we are the ones to manage this variable's ghost data. __host__ bool GPUDataWarehouse::compareAndSwapCopyingIntoGPU(char const* label, int patchID, int matlIndx, int levelIndx) { atomicDataStatus* status = nullptr; // get the status labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { status = &(it->second.var->atomicStatusInGpuMemory); } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPU( ) Variable %s not found.\n", label); exit(-1); return false; } varLock->unlock(); bool copyingin = false; while (!copyingin) { atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (oldVarStatus == UNALLOCATED) { printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPU( ) Variable %s is unallocated.\n", label); exit(-1); } if (((oldVarStatus & COPYING_IN) == COPYING_IN) || ((oldVarStatus & VALID) == VALID) || ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) { // Something else already took care of it. So this task won't manage it. return false; } else { // Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN; copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } return true; } //______________________________________________________________________ // returns false if something else already claimed to copy or has copied data into the CPU. // returns true if we are the ones to manage this variable's ghost data. __host__ bool GPUDataWarehouse::compareAndSwapCopyingIntoCPU(char const* label, int patchID, int matlIndx, int levelIndx) { atomicDataStatus* status = nullptr; // get the status labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (varPointers->find(lpml) != varPointers->end()) { status = &(it->second.var->atomicStatusInHostMemory); } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoCPU( ) Variable %s not found.\n", label); exit(-1); return false; } varLock->unlock(); bool copyingin = false; while (!copyingin) { // get the address atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (((oldVarStatus & COPYING_IN) == COPYING_IN) || ((oldVarStatus & VALID) == VALID) || ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) { // Something else already took care of it. So this task won't manage it. return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN; newVarStatus = newVarStatus & ~UNKNOWN; copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } return true; } //______________________________________________________________________ // returns false if something else already claimed to copy or has copied data into the GPU. // returns true if we are the ones to manage this variable's ghost data. __host__ bool GPUDataWarehouse::compareAndSwapCopyingIntoGPUStaging(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size) { atomicDataStatus* status; // get the status labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { stagingVar sv; sv.device_offset = offset; sv.device_size = size; std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.find(sv); if (staging_it != it->second.var->stagingVars.end()) { status = &(staging_it->second.atomicStatusInGpuMemory); } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPUStaging( ) Staging variable %s not found.\n", label); exit(-1); return false; } } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPUStaging( ) Variable %s not found.\n", label); exit(-1); return false; } varLock->unlock(); bool copyingin = false; while (!copyingin) { //get the address atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (oldVarStatus == UNALLOCATED) { printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPUStaging( ) Variable %s is unallocated.\n", label); exit(-1); } else if ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS) { printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPUStaging( ) Variable %s is marked as valid with ghosts, that should never happen with staging vars.\n", label); exit(-1); } else if (((oldVarStatus & COPYING_IN) == COPYING_IN) || ((oldVarStatus & VALID) == VALID)) { //Something else already took care of it. So this task won't manage it. return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN; copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } return true; } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isValidWithGhostsOnGPU(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { bool retVal = ((__sync_fetch_and_or(&(it->second.var->atomicStatusInGpuMemory), 0) & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS); varLock->unlock(); return retVal; } else { varLock->unlock(); return false; } } //______________________________________________________________________ //TODO: This needs to be turned into a compare and swap operation __host__ void GPUDataWarehouse::setValidWithGhostsOnGPU(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { //UNKNOWN //make sure the valid is still turned on __sync_or_and_fetch(&(it->second.var->atomicStatusInGpuMemory), VALID); //turn off AWAITING_GHOST_COPY __sync_and_and_fetch(&(it->second.var->atomicStatusInGpuMemory), ~AWAITING_GHOST_COPY); //turn on VALID_WITH_GHOSTS __sync_or_and_fetch(&(it->second.var->atomicStatusInGpuMemory), VALID_WITH_GHOSTS); varLock->unlock(); } else { varLock->unlock(); exit(-1); } } //______________________________________________________________________ // returns true if successful if marking a variable as a superpatch. False otherwise. // Can only turn an unallocated variable into a superpatch. __host__ bool GPUDataWarehouse::compareAndSwapFormASuperPatchGPU(char const* label, int patchID, int matlIndx, int levelIndx) { bool compareAndSwapSucceeded = false; //get the status atomicDataStatus* status = nullptr; varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { status = &(varPointers->at(lpml).var->atomicStatusInGpuMemory); } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapFormASuperPatchGPU( ) Variable %s patch %d material %d levelIndx %d not found.\n", label, patchID, matlIndx, levelIndx); exit(-1); return false; } varLock->unlock(); while (!compareAndSwapSucceeded) { atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::compareAndSwapFormASuperPatchGPU() - " << " Attempting to set a superpatch flag for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with status codes " << getDisplayableStatusCodes(oldVarStatus) << std::endl; } cerrLock.unlock(); } if ( (oldVarStatus & FORMING_SUPERPATCH) == FORMING_SUPERPATCH || ((oldVarStatus & SUPERPATCH) == SUPERPATCH)) { //Something else already took care of it. So this task won't manage it. return false; } else if (((oldVarStatus & ALLOCATING) == ALLOCATING) || ((oldVarStatus & ALLOCATED) == ALLOCATED) || ((oldVarStatus & ALLOCATING) == ALLOCATING) || ((oldVarStatus & COPYING_IN) == COPYING_IN) || ((oldVarStatus & VALID) == VALID) || ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS) || ((oldVarStatus & DEALLOCATING) == DEALLOCATING)) { //Note, we DO allow a variable to be set as AWAITING_GHOST_COPY before anything else. //At the time of implementation this scenario shouldn't ever happen. If so it means //Someone is requesting to take a variable already in memory that's not a superpatch //and turn it into a superpatch. It would require some kind of special deep copy mechanism printf("ERROR:\nGPUDataWarehouse::compareAndSwapFormASuperPatchGPU( ) Variable %s cannot be turned into a superpatch, it's in use already with status %s.\n", label, getDisplayableStatusCodes(oldVarStatus).c_str()); exit(-1); return false; } else { atomicDataStatus newVarStatus = oldVarStatus | FORMING_SUPERPATCH; compareAndSwapSucceeded = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::compareAndSwapFormASuperPatchGPU() - " << " Success for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with status codes " << getDisplayableStatusCodes(oldVarStatus) << std::endl; } cerrLock.unlock(); } return true; } //______________________________________________________________________ // Sets the allocated flag on a variables atomicDataStatus // This is called after a forming a superpatch process completes. *Only* the thread that got to set FORMING_SUPERPATCH can // set SUPERPATCH. Further, no other thread should modify the atomic status //compareAndSwapFormASuperPatchGPU() should immediately call this. __host__ bool GPUDataWarehouse::compareAndSwapSetSuperPatchGPU(char const* label, int patchID, int matlIndx, int levelIndx) { bool superpatched = false; //get the status atomicDataStatus* status = nullptr; varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { status = &(varPointers->at(lpml).var->atomicStatusInGpuMemory); } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapSetSuperPatchGPU( ) Variable %s patch %d material %d levelIndx %d not found.\n", label, patchID, matlIndx, levelIndx); exit(-1); return false; } varLock->unlock(); const atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if ((oldVarStatus & FORMING_SUPERPATCH) == 0) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapSetSuperPatchGPU( ) Can't set a superpatch status if it wasn't previously marked as forming a superpatch.\n"); exit(-1); } else { //Attempt to claim forming it into a superpatch. atomicDataStatus newVarStatus = oldVarStatus; newVarStatus = newVarStatus & ~FORMING_SUPERPATCH; newVarStatus = newVarStatus | SUPERPATCH; //If we succeeded in our attempt to claim to deallocate, this returns true. //If we failed, thats a real problem, and we crash below. //printf("current status is %s oldVarStatus is %s newVarStatus is %s\n", getDisplayableStatusCodes(status) superpatched = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } if (!superpatched) { //Another sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapSetSuperPatchGPU( ) Something modified the atomic status between the phases of forming a superpatch and setting a superpatch. This shouldn't happen\n"); exit(-1); } return superpatched; } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isSuperPatchGPU(char const* label, int patchID, int matlIndx, int levelIndx) { bool retVal = false; varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { retVal = ((__sync_fetch_and_or(&(varPointers->at(lpml).var->atomicStatusInGpuMemory), 0) & SUPERPATCH) == SUPERPATCH); } varLock->unlock(); return retVal; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::setSuperPatchLowAndSize(char const* const label, const int patchID, const int matlIndx, const int levelIndx, const int3& low, const int3& size){ varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if ( it == varPointers->end()) { printf("ERROR: GPUDataWarehouse::setSuperPatchLowAndSize - Didn't find a variable for label %s patch %d matl %d level %d\n", label, patchID, matlIndx, levelIndx); varLock->unlock(); exit(-1); } it->second.var->device_offset = low; it->second.var->device_size = size; varLock->unlock(); } //______________________________________________________________________ // __device__ void GPUDataWarehouse::print() { #ifdef __CUDA_ARCH__ __syncthreads(); if( isThread0_Blk0() ){ printf("\nVariables in GPUDataWarehouse\n"); for (int i = 0; i < d_numVarDBItems; i++) { dataItem me = d_varDB[i]; printf(" %-15s matl: %i, patchID: %i, L-%i, size:[%i,%i,%i] pointer: %p\n", me.label, me.matlIndx, me.domainID, me.levelIndx, me.var_size.x, me.var_size.y, me.var_size.z, me.var_ptr); } __syncthreads(); printThread(); printBlock(); printf("\n"); } #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::printError(const char* msg, const char* methodName, char const* label, const int patchID, int8_t matlIndx, int8_t levelIndx ) { #ifdef __CUDA_ARCH__ __syncthreads(); if( isThread0() ){ if (label[0] == '\0') { printf(" \nERROR GPU-side: GPUDataWarehouse::%s() - %s\n", methodName, msg ); } else { printf(" \nERROR GPU-side: GPUDataWarehouse::%s(), label: \"%s\", patch: %i, matlIndx: %i, levelIndx: %i - %s\n", methodName, label, patchID, matlIndx, levelIndx, msg); } //Should this just loop through the variable database and print out only items with a //levelIndx value greater than zero? -- Brad //for (int i = 0; i < d_numLevelItems; i++) { // printf(" Available levelDB labels(%i): \"%-15s\" matl: %i, L-%i \n", d_numLevelItems, d_levelDB[i].label, d_levelDB[i].matlIndx, d_levelDB[i].levelIndx); // } __syncthreads(); printThread(); printBlock(); // we know this is fatal and why, so just stop kernel execution __threadfence(); asm("trap;"); } #else //__________________________________ // CPU code if (label[0] == '\0') { printf(" \nERROR host-side: GPUDataWarehouse::%s() - %s\n", methodName, msg ); } else { printf(" \nERROR host-side: GPUDataWarehouse::%s(), label: \"%s\", patch: %i, matlIndx: %i, levelIndx: %i - %s\n", methodName, label, patchID, matlIndx, levelIndx, msg); } exit(-1); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::printGetLevelError(const char* msg, char const* label, int8_t levelIndx, int8_t matlIndx) { #ifdef __CUDA_ARCH__ __syncthreads(); if( isThread0() ){ printf(" \nERROR: %s( \"%s\", levelIndx: %i, matl: %i) unknown variable\n", msg, label, levelIndx, matlIndx); //Should this just loop through the variable database and print out only items with a //levelIndx value greater than zero? -- Brad __syncthreads(); printThread(); printBlock(); // we know this is fatal and why, so just stop kernel execution __threadfence(); asm("trap;"); } #else //__________________________________ // CPU code printf(" \nERROR: %s( \"%s\", levelIndx: %i, matl: %i) unknown variable\n", msg, label, levelIndx, matlIndx); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::printGetError(const char* msg, char const* label, int8_t levelIndx, const int patchID, int8_t matlIndx) { #ifdef __CUDA_ARCH__ __syncthreads(); if( isThread0() ) { printf(" \nERROR: %s( \"%s\", levelIndx: %i, patchID: %i, matl: %i) unknown variable\n", msg, label, levelIndx, patchID, matlIndx); for (int i = 0; i < d_numVarDBItems; i++) { printf(" Available varDB labels(%i of %i): \"%-15s\" matl: %i, patchID: %i, level: %i\n", i, d_numVarDBItems, d_varDB[i].label, d_varDB[i].matlIndx, d_varDB[i].domainID, d_varDB[i].levelIndx); } __syncthreads(); printThread(); printBlock(); printf("\n"); // we know this is fatal and why, so just stop kernel execution __threadfence(); asm("trap;"); } #else //__________________________________ // CPU code printf(" \nERROR: %s( \"%s\", levelIndx: %i, patchID: %i, matl: %i) unknown variable in DW %s\n", msg, label, levelIndx, patchID, matlIndx, _internalName); for (int i = 0; i < d_numVarDBItems; i++) { printf(" Available varDB labels(%i): \"%-15s\" matl: %i, patchID: %i, level: %i\n", d_numVarDBItems, d_varDB[i].label, d_varDB[i].matlIndx, d_varDB[i].domainID, d_varDB[i].levelIndx); } #endif } //______________________________________________________________________ // __host__ void* GPUDataWarehouse::getPlacementNewBuffer() { return placementNewBuffer; } //______________________________________________________________________ // Returns true if threadID and blockID are 0. // Useful in conditional statements for limiting output. // __device__ bool GPUDataWarehouse::isThread0_Blk0(){ int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; bool test (blockID == 0 && threadID == 0); return test; } //______________________________________________________________________ // Returns true if threadID = 0 for this block // Useful in conditional statements for limiting output. // __device__ bool GPUDataWarehouse::isThread0(){ int threadID = threadIdx.x + threadIdx.y + threadIdx.z; bool test (threadID == 0 ); return test; } //______________________________________________________________________ // Output the threadID // __device__ void GPUDataWarehouse::printThread(){ int threadID = threadIdx.x + threadIdx.y + threadIdx.z; printf( "Thread [%i,%i,%i], ID: %i\n", threadIdx.x,threadIdx.y,threadIdx.z, threadID); } //______________________________________________________________________ // Output the blockID // __device__ void GPUDataWarehouse::printBlock(){ int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; printf( "Block [%i,%i,%i], ID: %i\n", blockIdx.x,blockIdx.y,blockIdx.z, blockID); }
499a73a38b8d17fb38cb996c80923f22dba01729.cu
/* * The MIT License * * Copyright (c) 1997-2018 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /* GPU DataWarehouse device & host access*/ #include <CCA/Components/Schedulers/GPUDataWarehouse.h> #include <CCA/Components/Schedulers/GPUMemoryPool.h> #include <CCA/Components/Schedulers/SchedulerCommon.h> #include <CCA/Components/Schedulers/UnifiedScheduler.h> #include <Core/Grid/Variables/GPUVariable.h> #include <Core/Grid/Variables/GPUGridVariable.h> #include <Core/Grid/Variables/GPUReductionVariable.h> #include <Core/Grid/Variables/GPUPerPatch.h> #include <Core/Parallel/MasterLock.h> #include <Core/Parallel/Parallel.h> #include <Core/Parallel/ProcessorGroup.h> #include <Core/Util/DebugStream.h> #include <sci_defs/cuda_defs.h> #ifndef __CUDA_ARCH__ #include <string.h> #include <string> #endif #include <map> extern Uintah::MasterLock cerrLock; namespace Uintah { extern DebugStream gpu_stats; } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::get(const GPUGridVariableBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ //device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setArray3(item->var_offset, item->var_size, item->var_ptr); } else { printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo vp = varPointers->at(lpml); var.setArray3(vp.var->device_offset, vp.var->device_size, vp.var->device_ptr); } else { printf("I'm GPUDW with name: \"%s\" at %p \n", _internalName, this); printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE bool GPUDataWarehouse::stagingVarExists(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size) { #ifdef __CUDA_ARCH__ // device code printError("This method not defined for the device.", "stagingVarExists", label, patchID, matlIndx, levelIndx); return false; #else // host code varLock->lock(); bool retval = false; labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { stagingVar sv; sv.device_offset = offset; sv.device_size = size; std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.find(sv); retval = (staging_it != it->second.var->stagingVars.end()); } varLock->unlock(); return retval; #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::getStagingVar(const GPUGridVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size) { #ifdef __CUDA_ARCH__ // device code printError("This method not defined for the device.", "getStagingVar", label, patchID, matlIndx, levelIndx); #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { stagingVar sv; sv.device_offset = offset; sv.device_size = size; std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.find(sv); if (staging_it != it->second.var->stagingVars.end()) { var.setArray3(offset, size, staging_it->second.device_ptr); } else { printf("GPUDataWarehouse::getStagingVar() - Didn't find a staging variable from the device for label %s patch %d matl %d level %d offset (%d, %d, %d) size (%d, %d, %d).", label, patchID, matlIndx, levelIndx, offset.x, offset.y, offset.z, size.x, size.y, size.z); exit(-1); } } else { printError("Didn't find a staging variable from the device.", "getStagingVar", label, patchID, matlIndx, levelIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::getLevel(const GPUGridVariableBase& var, char const* label, int8_t matlIndx, int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code get(var, label, -99999999, matlIndx, levelIndx); #else // host code get(var, label, -99999999, matlIndx, levelIndx); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::get(const GPUReductionVariableBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setData(item->var_ptr); } else { printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo vp = varPointers->at(lpml); var.setData(vp.var->device_ptr); } else { printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::get(const GPUPerPatchBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setData(item->var_ptr); } else { printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo vp = varPointers->at(lpml); var.setData(vp.var->device_ptr); } else { printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::getModifiable(GPUGridVariableBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setArray3(item->var_offset, item->var_size, item->var_ptr); } else { printGetError("GPUDataWarehouse::getModifiable(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { var.setArray3(it->second.var->device_offset, it->second.var->device_size, it->second.var->device_ptr); } else { printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::getModifiable(GPUReductionVariableBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setData(item->var_ptr); } else { printGetError("GPUDataWarehouse::getModifiable(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo vp = varPointers->at(lpml); var.setData(vp.var->device_ptr); } else { printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::getModifiable(GPUPerPatchBase& var, char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { #ifdef __CUDA_ARCH__ // device code GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx); if (item) { var.setData(item->var_ptr); } else { printGetError("GPUDataWarehouse::getModifiable(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx); } #else // host code varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo vp = varPointers->at(lpml); var.setData(vp.var->device_ptr); } else { printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx); } varLock->unlock(); #endif } //______________________________________________________________________ //This method assumes the base patch in a superpatch region has already been allocated. //This is a shallow copy. It copies all datawarehouse metadata entries (except the status) //from that item into this patch's item in the GPU DW. __host__ void GPUDataWarehouse::copySuperPatchInfo(char const* label, int superPatchBaseID, int superPatchDestinationID, int matlIndx, int levelIndx) { if (superPatchBaseID == superPatchDestinationID) { //don't handle shallow copying itself return; } //Possible TODO: Add in offsets so the variable could be accessed in a non-superpatch manner. labelPatchMatlLevel lpml_source(label, superPatchBaseID, matlIndx, levelIndx); labelPatchMatlLevel lpml_dest(label, superPatchDestinationID, matlIndx, levelIndx); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator source_iter = varPointers->find(lpml_source); if (source_iter != varPointers->end()) { std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator dest_iter = varPointers->find(lpml_dest); if (dest_iter != varPointers->end()) { if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::copySuperPatchInfo() - " << " label " << label << " matl " << matlIndx << " level " << levelIndx << " Forming a superpatch by merging/shallowcopying metadata for patch " << superPatchDestinationID << " into patch " << superPatchBaseID << " with source status codes " << getDisplayableStatusCodes(source_iter->second.var->atomicStatusInGpuMemory) << " and dest status codes " << getDisplayableStatusCodes(dest_iter->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } //They now share the variable. The magic of this happens because the var is a C++ shared_ptr //TODO: They don't share the same offset. When offsets are added in, this should be updated //to manage offsets. dest_iter->second.var = source_iter->second.var; } else { printf("ERROR: GPUDataWarehouse::copySuperPatchInfo() - Didn't find a the destination ID at %d to copy into label %s patch %d matl %d level %d\n", superPatchDestinationID, label, superPatchDestinationID, matlIndx, levelIndx); varLock->unlock(); exit(-1); } } else { printf("ERROR: GPUDataWarehouse::copySuperPatchInfo() - Didn't find a base superPatch ID at %d to copy into label %s patch %d matl %d level %d\n", superPatchBaseID, label, superPatchDestinationID, matlIndx, levelIndx); varLock->unlock(); exit(-1); } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::put(GPUGridVariableBase &var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, GhostType gtype, int numGhostCells, void* host_ptr) { varLock->lock(); int3 var_offset; // offset int3 var_size; // dimensions of GPUGridVariable void* var_ptr; // raw pointer to the memory var.getArray3(var_offset, var_size, var_ptr); // See if it already exists. Also see if we need to update this into d_varDB. labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml); std::map<stagingVar, stagingVarInfo>::iterator staging_it; //sanity checks if (iter == varPointers->end()) { printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database.\n"); exit(-1); } else if (staging) { stagingVar sv; sv.device_offset = var_offset; sv.device_size = var_size; staging_it = iter->second.var->stagingVars.find(sv); if (staging_it == iter->second.var->stagingVars.end()) { printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without this staging var first existing in the internal database.\n"); exit(-1); } } if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::put( " << label << " ) - " << " Attempting to put a variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx; if (staging) { gpu_stats << " staging: true"; } else { gpu_stats << " staging: false"; } gpu_stats << " at device address " << var_ptr << " with status codes "; if (!staging) { gpu_stats << getDisplayableStatusCodes(iter->second.var->atomicStatusInGpuMemory); } else { gpu_stats << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory); } gpu_stats << " datatype size " << sizeOfDataType << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << " current varPointers size is: " << varPointers->size() << " low (" << var_offset.x << ", " << var_offset.y << ", " << var_offset.z << ") " << std::endl; } cerrLock.unlock(); } if (staging == false) { iter->second.varDB_index = -1; iter->second.var->device_ptr = var_ptr; iter->second.var->device_offset = var_offset; iter->second.var->device_size = var_size; iter->second.var->sizeOfDataType = sizeOfDataType; iter->second.var->gtype = gtype; iter->second.var->numGhostCells = numGhostCells; iter->second.var->host_contiguousArrayPtr = host_ptr; iter->second.var->atomicStatusInHostMemory = UNKNOWN; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::put( " << label << " ) - " << " Put a regular non-staging variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " at device address " << var_ptr << " with datatype size " << iter->second.var->sizeOfDataType << " with status codes " << getDisplayableStatusCodes(iter->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << " current varPointers size is: " << varPointers->size() << std::endl; } cerrLock.unlock(); } } else { // if (staging == true) staging_it->second.device_ptr = var_ptr; staging_it->second.host_contiguousArrayPtr = host_ptr; staging_it->second.varDB_index = -1; staging_it->second.atomicStatusInHostMemory = UNKNOWN; // Update the non-staging var's sizeOfDataType. The staging var uses this number. // It's possible that a staging var can exist and an empty placeholder non-staging var also exist, // if so, then then empty placeholder non-staging var won't have correct data type size. // So we grab it here. iter->second.var->sizeOfDataType = sizeOfDataType; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::put( " << label << " ) - " << " Put a staging variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with offset (" << var_offset.x << ", " << var_offset.y << ", " << var_offset.z << ")" << " and size (" << var_size.x << ", " << var_size.y << ", " << var_size.z << ")" << " at device address " << var_ptr << " with datatype size " << iter->second.var->sizeOfDataType << " with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } } varLock->unlock(); } //______________________________________________________________________ // This method puts an empty placeholder entry into the GPUDW database and marks it as unallocated __host__ void GPUDataWarehouse::putUnallocatedIfNotExists(char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 offset, int3 size) { // If it's a normal non-staging variable, check if doesn't exist. If so, add an "unallocated" entry. // If it's a staging variable, then still check if the non-staging part exists. A staging must exist within a non-staging variable. // A scenario where this can get a staging variable without a non-staging variable is receiving data from neighbor nodes. // For example, suppose node A has patch 0, and node B has patch 1, and A's patch 0 needs ghost cells from B's patch 1. Node A will // receive those ghost cells, but they will be marked as belonging to patch 1. Since A doesn't have the regular non-staging var // for patch 1, we make an empty placeholder for patch 1 so A can have a staging var to hold the ghost cell for patch 1. varLock->lock(); //Lock this entire section labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if ( it == varPointers->end()) { // Do not place size information. The Data Warehouse should not declare its current size until after the allocation is complete. // Further, no scheduler thread should attempt to determine an entry's size until the allocated flag has been marked as true. allVarPointersInfo vp; vp.varDB_index = -1; vp.var->device_ptr = nullptr; vp.var->atomicStatusInHostMemory = UNKNOWN; vp.var->atomicStatusInGpuMemory = UNALLOCATED; vp.var->host_contiguousArrayPtr = nullptr; vp.var->sizeOfDataType = 0; std::pair<std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator, bool> ret = varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) ); if (!ret.second) { printf("ERROR:\nGPUDataWarehouse::putUnallocatedIfNotExists( ) Failure inserting into varPointers map.\n"); varLock->unlock(); exit(-1); } it = ret.first; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::putUnallocatedIfNotExists( " << label << " ) - " << " Put an unallocated non-staging variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } } if (staging) { std::map<stagingVar, stagingVarInfo>::iterator staging_it; stagingVar sv; sv.device_offset = offset; sv.device_size = size; staging_it = it->second.var->stagingVars.find(sv); if (staging_it == it->second.var->stagingVars.end()){ stagingVarInfo svi; svi.varDB_index = -1; svi.device_ptr = nullptr; svi.host_contiguousArrayPtr = nullptr; svi.atomicStatusInHostMemory = UNKNOWN; svi.atomicStatusInGpuMemory = UNALLOCATED; std::pair<stagingVar, stagingVarInfo> p = std::make_pair( sv, svi ); it->second.var->stagingVars.insert( p ); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::putUnallocatedIfNotExists( " << label << " ) - " << " Put an unallocated staging variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " size (" << size.x << ", " << size.y << ", " << size.z << ")" << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } } } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::allocateAndPut(GPUGridVariableBase &var, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 low, int3 high, size_t sizeOfDataType, GhostType gtype, int numGhostCells) { // Allocate space on the GPU and declare a variable onto the GPU. // Check if it exists prior to allocating memory for it. // If it has already been allocated, just use that. // If it hasn't, this is lock free and the first thread to request allocating gets to allocate // If another thread sees that allocating is in process, it loops and waits until the allocation complete. bool allocationNeeded = false; int3 size = make_int3(high.x-low.x, high.y-low.y, high.z-low.z); int3 offset = low; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " staging: " << std::boolalpha << staging << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } // This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry. putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, staging, offset, size); varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); std::map<stagingVar, stagingVarInfo>::iterator staging_it; if (staging) { stagingVar sv; sv.device_offset = offset; sv.device_size = size; staging_it = it->second.var->stagingVars.find(sv); } varLock->unlock(); // Locking not needed from here on in this method. STL maps ensure that iterators point to correct values // even if other threads add nodes. We just can't remove values, but that shouldn't ever happen. // This prepares the var with the offset and size. Any possible allocation will come later. // If it needs to go into the database, that will also come later void* addr = nullptr; var.setArray3(offset, size, addr); // Now see if we allocate the variable or use a previous existing allocation. if (staging == false) { // See if someone has stated they are allocating it allocationNeeded = compareAndSwapAllocating(it->second.var->atomicStatusInGpuMemory); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " allocationNeeded is " << std::boolalpha << allocationNeeded << " for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << std::endl; } cerrLock.unlock(); } if (!allocationNeeded) { // Someone else is allocating it or it has already been allocated. Wait until they are done. bool allocated = false; while (!allocated) { allocated = checkAllocated(it->second.var->atomicStatusInGpuMemory); } // Sanity check to ensure we have correct size information. varLock->lock(); it = varPointers->find(lpml); varLock->unlock(); if (it->second.var->device_offset.x == low.x && it->second.var->device_offset.y == low.y && it->second.var->device_offset.z == low.z && it->second.var->device_size.x == size.x && it->second.var->device_size.y == size.y && it->second.var->device_size.z == size.z) { // Space for this var already exists. Use that and return. if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " This non-staging/regular variable already exists. No need to allocate another. GPUDW has a variable for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " on device " << d_device_id << " with data pointer " << it->second.var->device_ptr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } // Have this var use the existing memory address. var.setArray3(it->second.var->device_offset, it->second.var->device_size, it->second.var->device_ptr); } else if (it->second.var->device_offset.x <= low.x && it->second.var->device_offset.y <= low.y && it->second.var->device_offset.z <= low.z && it->second.var->device_size.x >= size.x && it->second.var->device_size.y >= size.y && it->second.var->device_size.z >= size.z) { //It fits inside. Just use it. if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " This non-staging/regular variable fits inside another variable that already exists. No need to allocate another. GPUDW has a variable for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " on device " << d_device_id << " with data pointer " << it->second.var->device_ptr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } var.setArray3(it->second.var->device_offset, it->second.var->device_size, it->second.var->device_ptr); } else { printf("ERROR:\nGPUDataWarehouse::allocateAndPut( %s ) Variable in database but of the wrong size. This shouldn't ever happen. This needs low (%d, %d, %d) and size (%d, %d, %d), but in the database it is low (%d, %d, %d) and size (%d, %d, %d)\n", label, low.x, low.y, low.z, size.x, size.y, size.z, it->second.var->device_offset.x, it->second.var->device_offset.y, it->second.var->device_offset.z, it->second.var->device_size.x, it->second.var->device_size.y, it->second.var->device_size.z); exit(-1); } } } else { // it's a staging variable if (staging_it != it->second.var->stagingVars.end()) { // This variable exists in the database, no need to "put" it in again. // See if someone has stated they are allocating it allocationNeeded = compareAndSwapAllocating(staging_it->second.atomicStatusInGpuMemory); if (!allocationNeeded) { if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " This staging variable already exists. No need to allocate another. For label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " on device " << d_device_id << " with data pointer " << staging_it->second.device_ptr << " with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory) << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } // We need the pointer. We can't move on until we get the pointer. // Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it // but not completed that action. If that's the case, wait until it's done so we can get the pointer. bool allocated = false; while (!allocated) { allocated = checkAllocated(staging_it->second.atomicStatusInGpuMemory); } //Have this var use the existing memory address. var.setArray3(offset, size, staging_it->second.device_ptr); } } } //Now allocate it if (allocationNeeded) { OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id); unsigned int memSize = var.getMemSize(); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool" << " for " << label << " patch " << patchID << " material " << matlIndx << " level " << levelIndx << " staging: " << std::boolalpha << staging << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " at " << addr << " with status codes "; if (!staging) { gpu_stats << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory); } else { gpu_stats << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory); } gpu_stats << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize); // Also update the var object itself var.setArray3(offset, size, addr); // Put all remaining information about the variable into the the database. put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx, staging, gtype, numGhostCells); // Now that we have the pointer and that it has been inserted into the database, // Update the status from allocating to allocated if (!staging) { compareAndSwapAllocate(it->second.var->atomicStatusInGpuMemory); } else { compareAndSwapAllocate(staging_it->second.atomicStatusInGpuMemory); } if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut(), complete" << " for " << label << " patch " << patchID << " material " << matlIndx << " level " << levelIndx << " staging: " << std::boolalpha << staging << " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")" << " and size (" << size.x << ", " << size.y << ", " << size.z << ")" << " at " << addr << " with status codes "; if (!staging) { gpu_stats << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory); } else { gpu_stats << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory); } gpu_stats << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } } } //______________________________________________________________________ // This method is meant to take an entry from the host side DW and copy it into // the task datawarehouse whose job is to eventually live GPU side. __host__ void GPUDataWarehouse::copyItemIntoTaskDW(GPUDataWarehouse *hostSideGPUDW, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 offset, int3 size) { if (d_device_copy == nullptr) { // sanity check printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW() - This method should only be called from a task data warehouse.\n"); exit(-1); } varLock->lock(); if (d_numVarDBItems==MAX_VARDB_ITEMS) { printf("ERROR: Out of GPUDataWarehouse space"); varLock->unlock(); exit(-1); } varLock->unlock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); stagingVar sv; sv.device_offset = offset; sv.device_size = size; // Get the iterator(s) from the host side GPUDW. hostSideGPUDW->varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator hostSideGPUDW_iter = hostSideGPUDW->varPointers->find(lpml); std::map<stagingVar, stagingVarInfo>::iterator hostSideGPUDW_staging_iter; if (staging) { hostSideGPUDW_staging_iter = hostSideGPUDW_iter->second.var->stagingVars.find(sv); if (hostSideGPUDW_staging_iter == hostSideGPUDW_iter->second.var->stagingVars.end()) { printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW() - No staging var was found for for %s patch %d material %d level %d offset (%d, %d, %d) size (%d, %d, %d) in the DW located at %p\n", label, patchID, matlIndx, levelIndx, offset.x, offset.y, offset.z, size.x, size.y, size.z, hostSideGPUDW); varLock->unlock(); exit(-1); } } hostSideGPUDW->varLock->unlock(); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml); //sanity check if (iter != varPointers->end() && !staging) { printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW() - This task datawarehouse already had an entry for %s patch %d material %d level %d\n", label, patchID, matlIndx, levelIndx); varLock->unlock(); exit(-1); } // If it's staging, there should already be a non-staging var in the host-side GPUDW (even if it's just a placeholder) // Inserting into this task DW, it is a requirement that non-staging variables get inserted first // then any staging variables can come in later. This won't handle any scenario where a staging variable is requested // into the task DW without a non-staging variable already existing here. //TODO: Replace with an atomic counter. int d_varDB_index=d_numVarDBItems; d_numVarDBItems++; int i = d_varDB_index; if (!staging) { // Create a new allVarPointersInfo object, copying over the offset. allVarPointersInfo vp; vp.device_offset = hostSideGPUDW_iter->second.device_offset; // Give it a d_varDB index vp.varDB_index = d_varDB_index; // insert it in varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) ); strncpy(d_varDB[i].label, label, MAX_NAME_LENGTH); d_varDB[i].domainID = patchID; d_varDB[i].matlIndx = matlIndx; d_varDB[i].levelIndx = levelIndx; d_varDB[i].sizeOfDataType = hostSideGPUDW_iter->second.var->sizeOfDataType; d_varDB[i].varItem.gtype = hostSideGPUDW_iter->second.var->gtype; d_varDB[i].varItem.numGhostCells = hostSideGPUDW_iter->second.var->numGhostCells; d_varDB[i].varItem.staging = staging; d_varDB[i].ghostItem.dest_varDB_index = -1; //Signify that this d_varDB item is NOT meta data to copy a ghost cell. d_varDB[i].var_offset = hostSideGPUDW_iter->second.var->device_offset; d_varDB[i].var_size = hostSideGPUDW_iter->second.var->device_size; d_varDB[i].var_ptr = hostSideGPUDW_iter->second.var->device_ptr; } else { if (iter == varPointers->end()) { // A staging item was requested but there's no regular variable for it to piggy back in. // So create an empty placeholder regular variable. // Create a new allVarPointersInfo object, copying over the offset. allVarPointersInfo vp; vp.device_offset = hostSideGPUDW_iter->second.device_offset; // Empty placeholders won't be placed in the d_varDB array. vp.varDB_index = -1; // insert it in std::pair<std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator, bool> ret = varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) ); if (!ret.second) { printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW( ) Failure inserting into varPointers map.\n"); varLock->unlock(); exit(-1); } iter = ret.first; } //copy the item stagingVarInfo svi = hostSideGPUDW_staging_iter->second; //Give it a d_varDB index svi.varDB_index = d_varDB_index; //insert it in std::map<stagingVar, stagingVarInfo>::iterator staging_iter = iter->second.var->stagingVars.find(sv); if (staging_iter != iter->second.var->stagingVars.end()) { printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW( ) This staging var already exists in this task DW\n"); } std::pair<stagingVar, stagingVarInfo> p = std::make_pair( sv, svi ); iter->second.var->stagingVars.insert( p ); strncpy(d_varDB[i].label, label, MAX_NAME_LENGTH); d_varDB[i].domainID = patchID; d_varDB[i].matlIndx = matlIndx; d_varDB[i].levelIndx = levelIndx; d_varDB[i].sizeOfDataType = hostSideGPUDW_iter->second.var->sizeOfDataType; d_varDB[i].varItem.gtype = hostSideGPUDW_iter->second.var->gtype; d_varDB[i].varItem.numGhostCells = hostSideGPUDW_iter->second.var->numGhostCells; d_varDB[i].varItem.staging = staging; d_varDB[i].ghostItem.dest_varDB_index = -1; //Signify that this d_varDB item is NOT meta data to copy a ghost cell. d_varDB[i].var_offset = hostSideGPUDW_staging_iter->first.device_offset; d_varDB[i].var_size = hostSideGPUDW_staging_iter->first.device_size; d_varDB[i].var_ptr = hostSideGPUDW_staging_iter->second.device_ptr; } d_dirty=true; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::copyItemIntoTaskDW( " << label << " ) - " << " Put into d_varDB at index " << i << " of max index " << d_maxdVarDBItems - 1 << " label " << label << " patch " << d_varDB[i].domainID << " matl " << matlIndx << " level " << levelIndx << " staging: " << std::boolalpha << staging << " datatype size " <<d_varDB[i].sizeOfDataType << " into address " << d_varDB[i].var_ptr << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " size [" << d_varDB[i].var_size.x << ", " << d_varDB[i].var_size.y << ", " << d_varDB[i].var_size.z << "]" << " offset [" << d_varDB[i].var_offset.x << ", " << d_varDB[i].var_offset.y << ", " << d_varDB[i].var_offset.z << "]" << std::endl; } cerrLock.unlock(); } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::putContiguous(GPUGridVariableBase &var, const char* indexID, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 low, int3 high, size_t sizeOfDataType, GridVariableBase* gridVar, bool stageOnHost) { /* #ifdef __CUDA_ARCH__ //Should not put from device side as all memory allocation should be done on CPU side through CUDAMalloc() #else varLock->lock(); //first check if this patch/var/matl is in the process of loading in. labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { //Space for this patch already exists. Use that and return. if (d_debug){ printf("GPUDataWarehouse::putContiguous( %s ). This gpudw database has a variable for label %s patch %d matl %d level %d staging %s on device %d. Reusing it.\n", label, label, patchID, matlIndx, levelIndx, staging ? "true" : "false", d_device_id); } var.setArray3(varPointers->at(lpml).device_offset, varPointers->at(lpml).device_size, varPointers->at(lpml).device_ptr); varLock->unlock(); return; } int3 size=make_int3(high.x-low.x, high.y-low.y, high.z-low.z); int3 offset=low; void* device_ptr=nullptr; var.setArray3(offset, size, device_ptr); allocateLock->lock(); contiguousArrayInfo *ca = &(contiguousArrays->at(indexID)); allocateLock->unlock(); if ( (ca->allocatedDeviceMemory == nullptr || ca->sizeOfAllocatedMemory - ca->assignedOffset < var.getMemSize()) && stageOnHost) { printf("ERROR: No room left on device to be assigned address space\n"); if (ca->allocatedDeviceMemory != nullptr) { printf("There was %lu bytes allocated, %lu has been assigned, and %lu more bytes were attempted to be assigned for %s patch %d matl %d level %d staging %s\n", ca->sizeOfAllocatedMemory, ca->assignedOffset, var.getMemSize(), label, patchID, matlIndx, levelIndx, staging ? "true" : "false"); } varLock->unlock(); exit(-1); } else { //There is already pre-allocated contiguous memory chunks with room available on //both the device and the host. Just assign pointers for both the device and host contiguous arrays. //This prepares the var with the offset and size. The actual address will come next. void* host_contiguousArrayPtr = nullptr; int varMemSize = var.getMemSize(); device_ptr = (void*)((uint8_t*)ca->allocatedDeviceMemory + ca->assignedOffset); var.setArray3(offset, size, device_ptr); host_contiguousArrayPtr = (void*)((uint8_t*)ca->allocatedHostMemory + ca->assignedOffset); //We ran into cuda misaligned errors previously when mixing different data types. We suspect the ints at 4 bytes //were the issue. So the engine previously computes buffer room for each variable as a multiple of UnifiedScheduler::bufferPadding. //So the contiguous array has been sized with extra padding. (For example, if a var holds 12 ints, then it would be 48 bytes in //size. But if UnifiedScheduler::bufferPadding = 32, then it should add 16 bytes for padding, for a total of 64 bytes). int memSizePlusPadding = ((UnifiedScheduler::bufferPadding - varMemSize % UnifiedScheduler::bufferPadding) % UnifiedScheduler::bufferPadding) + varMemSize; ca->assignedOffset += memSizePlusPadding; if (stageOnHost) { //Some GPU grid variable data doesn't need to be copied from the host //For example, computes vars are just uninitialized space. //Others grid vars need to be copied. This copies the data into a contiguous //array on the host so that copyDataHostToDevice() can copy the contiguous //host array to the device. //Data listed as required. Or compute data that was initialized as a copy of something else. ca->copiedOffset += memSizePlusPadding; memcpy(host_contiguousArrayPtr, gridVar->getBasePointer(), varMemSize); } varLock->unlock(); put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx, staging, None, 0, host_contiguousArrayPtr); } #endif */ } //______________________________________________________________________ // __host__ void GPUDataWarehouse::allocate(const char* indexID, size_t size) { /* #ifdef __CUDA_ARCH__ // Should not put from device side as all memory allocation should be done on CPU side through CUDAMalloc() #else if (size == 0) { return; } //This method allocates one big chunk of memory so that little allocations do not have to occur for each grid variable. //This is needed because devices often have substantial overhead for each device malloc and device copy. By putting it into one //chunk of memory, only one malloc and one copy to device should be needed. double *d_ptr = nullptr; double *h_ptr = nullptr; OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id); printf("Allocated GPU buffer of size %lu \n", (unsigned long)size); CUDA_RT_SAFE_CALL(cudaMalloc(&d_ptr, size) ); //printf("In allocate(), cuda malloc for size %ld at %p on device %d\n", size, d_ptr, d_device_id); if (d_debug) { printf("In allocate(), cudaMalloc for size %ld at %p on device %d\n", size, d_ptr, d_device_id); } //Now allocate that much also on the host. We want to do this because it's easier to pool up all the data on the host side //and then move it over to the device side later in one shot. It also allows for one copy doing a device to host later. //h_ptr = new double[size]; h_ptr = (double*)malloc(size); //Registering memory seems good in theory, but bad in practice for our purposes. //On the k20 device on beast.sci.utah.edu, this single register call was taking 0.1 seconds! //On my home GTX580 device, it was taking 0.015 seconds, better, but still substantial enough //we should avoid it for now. (If you want to use it, then also uncomment the cudaHostUnregister call in clear()). //cudaHostRegister(h_ptr, size, cudaHostRegisterPortable); contiguousArrayInfo ca(d_ptr, h_ptr, size); allocateLock->lock(); contiguousArrays->insert( std::map<const char *, contiguousArrayInfo>::value_type( indexID, ca ) ); //for (std::map<std::string, contiguousArrayInfo>::iterator it = contiguousArrays->begin(); it != contiguousArrays->end(); ++it) // printf("%s\n", it->first.c_str()); allocateLock->unlock(); #endif */ } //______________________________________________________________________ // __host__ void GPUDataWarehouse::copyHostContiguousToHost(GPUGridVariableBase& device_var, GridVariableBase* host_var, char const* label, int patchID, int matlIndx, int levelIndx) { /* #ifdef __CUDA_ARCH__ //Should not called from device side as all memory allocation should be done on CPU side through CUDAMalloc() #else //see if this datawarehouse has anything for this patchGroupID. varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo info = varPointers->at(lpml); device_var.setArray3(varPointers->at(lpml).device_offset, varPointers->at(lpml).device_offset, info.device_ptr); varLock->unlock(); // size_t size = device_var.getMemSize(); //TODO: Instead of doing a memcpy, I bet the original host grid variable could just have its pointers updated //to work with what we were sent back. This would take some considerable work though to get all the details right //TODO: This needs to be a memcpy async memcpy(host_var->getBasePointer(), info.host_contiguousArrayPtr, device_var.getMemSize()); //Since we've moved it back into the host, lets mark it as being used. //It's possible in the future there could be a scenario where we want to bring it //back to the host but still retain it in the GPU. One scenario is //sending data to an output .ups file but not modifying it on the host. remove(label, patchID, matlIndx, levelIndx); } else { varLock->unlock(); printf("ERROR: host copyHostContiguoustoHost unknown variable on GPUDataWarehouse"); //for (std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it=varPointers->begin(); it!=varPointers->end(); ++it) // printf("%s %d %d => %d \n", it->first.label, it->first.patchID, it->first.matlIndx, it->second.varDB_index); exit(-1); } #endif */ } //______________________________________________________________________ // __host__ void GPUDataWarehouse::put(GPUReductionVariableBase &var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, void* host_ptr) { varLock->lock(); void* var_ptr; // raw pointer to the memory var.getData(var_ptr); //See if it already exists. Also see if we need to update this into d_varDB. labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml); //sanity check if (iter == varPointers->end()) { printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database.\n"); exit(-1); } iter->second.varDB_index = -1; iter->second.var->device_ptr = var_ptr; iter->second.var->sizeOfDataType = sizeOfDataType; iter->second.var->gtype = None; iter->second.var->numGhostCells = 0; iter->second.var->host_contiguousArrayPtr = host_ptr; iter->second.var->atomicStatusInHostMemory = UNKNOWN; int3 zeroValue; zeroValue.x = 0; zeroValue.y = 0; zeroValue.z = 0; iter->second.var->device_offset = zeroValue; iter->second.var->device_size = zeroValue; //previously set, do not set here //iter->second.var->atomicStatusInGpuMemory = if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::put( " << label << " ) - " << " Put a reduction variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " at device address " << var_ptr << " with datatype size " << iter->second.var->sizeOfDataType << " with status codes " << getDisplayableStatusCodes(iter->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << " current varPointers size is: " << varPointers->size() << std::endl; } cerrLock.unlock(); } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::put(GPUPerPatchBase& var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, void* host_ptr) { varLock->lock(); void* var_ptr; // raw pointer to the memory var.getData(var_ptr); //See if it already exists. Also see if we need to update this into d_varDB. labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml); //sanity check if (iter == varPointers->end()) { printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database for %s patch %d matl %d.\n", label, patchID, matlIndx); exit(-1); } iter->second.varDB_index = -1; iter->second.var->device_ptr = var_ptr; iter->second.var->sizeOfDataType = sizeOfDataType; iter->second.var->gtype = None; iter->second.var->numGhostCells = 0; iter->second.var->host_contiguousArrayPtr = host_ptr; iter->second.var->atomicStatusInHostMemory = UNKNOWN; int3 zeroValue; zeroValue.x = 0; zeroValue.y = 0; zeroValue.z = 0; iter->second.var->device_offset = zeroValue; iter->second.var->device_size = zeroValue; //previously set, do not set here //iter->second.atomicStatusInGputMemory = if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::put( " << label << " ) - " << " Put a patch variable in the host-side varPointers map for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " at device address " << var_ptr << " with datatype size " << iter->second.var->sizeOfDataType << " with status codes " << getDisplayableStatusCodes(iter->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << " current varPointers size is: " << varPointers->size() << std::endl; } cerrLock.unlock(); } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::allocateAndPut(GPUReductionVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx, size_t sizeOfDataType) { //Allocate space on the GPU and declare a variable onto the GPU. //This method does NOT stage everything in a big array. //Check if it exists prior to allocating memory for it. //If it has already been allocated, just use that. //If it hasn't, this is lock free and the first thread to request allocating gets to allocate //If another thread sees that allocating is in process, it loops and waits until the allocation complete. bool allocationNeeded = false; int3 size = make_int3(0,0,0); int3 offset = make_int3(0,0,0); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } //This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry. putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, false, offset, size); varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); varLock->unlock(); void* addr = nullptr; //Now see if we allocate the variable or use a previous existing allocation. //See if someone has stated they are allocating it allocationNeeded = compareAndSwapAllocating(it->second.var->atomicStatusInGpuMemory); if (!allocationNeeded) { //Someone else is allocating it or it has already been allocated. //Space for this var already exists. Use that and return. if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " This reduction variable already exists. No need to allocate another. GPUDW has a variable for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " on device " << d_device_id << " with data pointer " << it->second.var->device_ptr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } //We need the pointer. We can't move on until we get the pointer. //Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it //but not completed that action. If that's the case, wait until it's done so we can get the pointer. bool allocated = false; while (!allocated) { allocated = checkAllocated(it->second.var->atomicStatusInGpuMemory); addr = it->second.var->device_ptr; } //Have this var use the existing memory address. var.setData(addr); } else { //We are the first task to request allocation. Do it. OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id); size_t memSize = var.getMemSize(); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool" << " for reduction variable " << label << " patch " << patchID << " material " << matlIndx << " level " << levelIndx << " size " << var.getMemSize() << " at " << addr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize); //Also update the var object itself var.setData(addr); //Put all remaining information about the variable into the the database. put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx); //Now that the database knows of this and other threads can see the device pointer, update the status from allocating to allocated compareAndSwapAllocate(it->second.var->atomicStatusInGpuMemory); } } //______________________________________________________________________ // __host__ void GPUDataWarehouse::allocateAndPut(GPUPerPatchBase& var, char const* label, int patchID, int matlIndx, int levelIndx, size_t sizeOfDataType) { //Allocate space on the GPU and declare a variable onto the GPU. //This method does NOT stage everything in a big array. //Check if it exists prior to allocating memory for it. //If it has already been allocated, just use that. //If it hasn't, this is lock free and the first thread to request allocating gets to allocate //If another thread sees that allocating is in process, it loops and waits until the allocation complete. bool allocationNeeded = false; int3 size = make_int3(0,0,0); int3 offset = make_int3(0,0,0); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } //This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry. putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, false, offset, size); varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); varLock->unlock(); void* addr = nullptr; //Now see if we allocate the variable or use a previous existing allocation. //See if someone has stated they are allocating it allocationNeeded = compareAndSwapAllocating(it->second.var->atomicStatusInGpuMemory); if (!allocationNeeded) { //Someone else is allocating it or it has already been allocated. //Space for this var already exists. Use that and return. if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut( " << label << " ) - " << " This patch variable already exists. No need to allocate another. GPUDW has a variable for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " on device " << d_device_id << " with data pointer " << it->second.var->device_ptr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } //We need the pointer. We can't move on until we get the pointer. //Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it //but not completed that action. If that's the case, wait until it's done so we can get the pointer. bool allocated = false; while (!allocated) { allocated = checkAllocated(it->second.var->atomicStatusInGpuMemory); addr = it->second.var->device_ptr; } //Have this var use the existing memory address. var.setData(addr); } else { //We are the first task to request allocation. Do it. OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id); size_t memSize = var.getMemSize(); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool" << " for PerPatch variable " << label << " patch " << patchID << " material " << matlIndx << " level " << levelIndx << " size " << var.getMemSize() << " at " << addr << " with status codes " << getDisplayableStatusCodes(it->second.var->atomicStatusInGpuMemory) << " on device " << d_device_id << " into GPUDW at " << std::hex << this << std::dec << std::endl; } cerrLock.unlock(); } addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize); //Also update the var object itself var.setData(addr); //Put all remaining information about the variable into the the database. put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx); //Now that the database knows of this and other threads can see the device pointer, update the status from allocating to allocated compareAndSwapAllocate(it->second.var->atomicStatusInGpuMemory); } } //______________________________________________________________________ // __device__ GPUDataWarehouse::dataItem* GPUDataWarehouse::getItem(char const* label, const int patchID, const int8_t matlIndx, const int8_t levelIndx) { //This upcoming __syncthreads is needed. With CUDA function calls are inlined. // If you don't have it this upcoming __syncthreads here's what I think can happen: // * The correct index was found by one of the threads. // * The last __syncthreads is called, all threads met up there. // * Some threads in the block then make a second "function" call and reset index to -1 // * Meanwhile, those other threads were still in the first "function" call and hadn't // yet processed if (index == -1). They now run that line. And see index is now -1. That's bad. // So to prevent this scenario, we have one more __syncthreads listed immediately below. __syncthreads(); //sync before get short numThreads = blockDim.x * blockDim.y * blockDim.z; //int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; //blockID on the grid int i = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; //threadID in the block //int threadID = i; //if (d_debug && threadID == 0 && blockID == 0) { // printf("device getting item \"%s\" from GPUDW %p", label, this); // printf("size (%d vars)\n Available labels:", d_numVarDBItems); //} //Have every thread try to find the label/patchId/matlIndx is a match in //array. This is a parallel approach so that instead of doing a simple //sequential search with one thread, we can let every thread search for it. Only the //winning thread gets to write to shared data. __shared__ int index; index = -1; __syncthreads(); //sync before get, making sure everyone set index to -1 while(i<d_numVarDBItems){ short strmatch=0; char const *s1 = label; //reset s1 and s2 back to the start char const *s2 = &(d_varDB[i].label[0]); //a one-line strcmp. This should keep branching down to a minimum. while (!(strmatch = *(unsigned char *) s1 - *(unsigned char *) s2) && *s1++ && *s2++); //only one thread will ever match this. //And nobody on the device side should ever access "staging" variables. if (strmatch == 0) { if (patchID ==-99999999 //Only getLevel calls should hit this (note, && d_varDB[i].matlIndx == matlIndx && d_varDB[i].levelIndx == levelIndx && d_varDB[i].varItem.staging == false /* we don't support staging/foregin vars for get() */ && d_varDB[i].ghostItem.dest_varDB_index == -1) { /*don't let ghost cell copy data mix in with normal variables for get() */ index = i; //we found it. } else if(d_varDB[i].domainID == patchID && d_varDB[i].matlIndx == matlIndx /*&& d_varDB[i].levelIndx == levelIndx*/ //No need for level lookups, label + patchID + matl is a unique tuple. && d_varDB[i].varItem.staging == false && d_varDB[i].ghostItem.dest_varDB_index == -1) { index = i; //we found it. //printf("I'm thread %d In DW at %p, We found it for var %s patch %d matl %d level %d. d_varDB has it at index %d var %s patch %d at its item address %p with var pointer %p\n", // threadID, this, label, patchID, matlIndx, levelIndx, index, &(d_varDB[index].label[0]), d_varDB[index].domainID, &d_varDB[index], d_varDB[index].var_ptr); } } i = i + numThreads; //Since every thread is involved in searching for the string, have this thread loop to the next possible item to check for. } //sync before return; __syncthreads(); if (index == -1) { printf("ERROR:\nGPUDataWarehouse::getItem() didn't find anything for %s patch %d matl %d\n", label, patchID, matlIndx); return nullptr; } return &d_varDB[index]; } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::remove(char const* label, int patchID, int matlIndx, int levelIndx) { /* //This is more of a stub. Remove hasn't been needed up until yet. If removing is needed, it //would likely be best to deallocate things but leave an entry in the collection. bool retVal = false; labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); varLock->lock(); if (varPointers->find(lpml) != varPointers->end()) { int i = varPointers->at(lpml).varDB_index; d_varDB[i].label[0] = '\0'; //leave a hole in the flat array, not deleted. varPointers->erase(lpml); //TODO: GPU Memory leak? retVal = true; d_dirty=true; } if (d_debug){ printf("GPUDataWarehouse::remove( %s ). Removed a variable for label %s patch %d matl %d level %d \n", label, label, patchID, matlIndx, levelIndx); } varLock->unlock(); return retVal; */ return false; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::init(int id, std::string internalName) { d_device_id = id; //this->_internalName = new std::string(internalName); strncpy(_internalName, internalName.c_str(), sizeof(_internalName)); objectSizeInBytes = 0; d_maxdVarDBItems = 0; //this->placementNewBuffer = placementNewBuffer; allocateLock = new Uintah::MasterLock{}; varLock = new Uintah::MasterLock{}; varPointers = new std::map<labelPatchMatlLevel, allVarPointersInfo>; contiguousArrays = new std::map<std::string, contiguousArrayInfo>; //other data members are initialized in the constructor d_numVarDBItems = 0; d_numMaterials = 0; d_debug = false; //d_numGhostCells = 0; d_device_copy = nullptr; d_dirty = true; objectSizeInBytes = 0; //resetdVarDB(); numGhostCellCopiesNeeded = 0; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::cleanup() { delete allocateLock; delete varLock; delete varPointers; delete contiguousArrays; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::init_device(size_t objectSizeInBytes, unsigned int d_maxdVarDBItems) { this->objectSizeInBytes = objectSizeInBytes; this->d_maxdVarDBItems = d_maxdVarDBItems; OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id ); void* temp = nullptr; //CUDA_RT_SAFE_CALL(cudaMalloc(&temp, objectSizeInBytes)); temp = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, objectSizeInBytes); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::init_device() -" << " requested GPU space from GPUMemoryPool::allocateCudaSpaceFromPool for Task DW of size " << objectSizeInBytes << " bytes at " << temp << " on device " << d_device_id << " the host GPUDW is at " << this << std::endl; } cerrLock.unlock(); } d_device_copy = (GPUDataWarehouse*)temp; //cudaHostRegister(this, sizeof(GPUDataWarehouse), cudaHostRegisterPortable); d_dirty = true; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::syncto_device(void *cuda_stream) { if (!d_device_copy) { printf("ERROR:\nGPUDataWarehouse::syncto_device()\nNo device copy\n"); exit(-1); } varLock->lock(); if (d_dirty){ OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id ); //Even though this is in a writeLock state on the CPU, the nature of multiple threads //each with their own stream copying to a GPU means that one stream might seemingly go out //of order. This is ok for two reasons. 1) Nothing should ever be *removed* from a gpu data warehouse //2) Therefore, it doesn't matter if streams go out of order, each thread will still ensure it copies //exactly what it needs. Other streams may write additional data to the gpu data warehouse, but cpu //threads will only access their own data, not data copied in by other cpu threada via streams. //This approach does NOT require CUDA pinned memory. //unsigned int sizeToCopy = sizeof(GPUDataWarehouse); cudaStream_t* stream = (cudaStream_t*)(cuda_stream); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::syncto_device() - cudaMemcpy -" << " sync GPUDW at " << d_device_copy << " with description " << _internalName << " to device " << d_device_id << " on stream " << stream << std::endl; } cerrLock.unlock(); } CUDA_RT_SAFE_CALL (cudaMemcpyAsync( d_device_copy, this, objectSizeInBytes, cudaMemcpyHostToDevice, *stream)); d_dirty=false; } varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::clear() { OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id ); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator varIter; for (varIter = varPointers->begin(); varIter != varPointers->end(); ++varIter) { // clear out all the staging vars, if any std::map<stagingVar, stagingVarInfo>::iterator stagingIter; for (stagingIter = varIter->second.var->stagingVars.begin(); stagingIter != varIter->second.var->stagingVars.end(); ++stagingIter) { if (compareAndSwapDeallocating(stagingIter->second.atomicStatusInGpuMemory)) { //The counter hit zero, so lets deallocate the var. if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::clear() -" << " calling GPUMemoryPool::freeCudaSpaceFromPool() for staging var for " << varIter->first.label << " at device ptr " << stagingIter->second.device_ptr << " on device " << d_device_id << std::endl; } cerrLock.unlock(); } if (GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, stagingIter->second.device_ptr) ) { stagingIter->second.device_ptr = nullptr; compareAndSwapDeallocate(stagingIter->second.atomicStatusInGpuMemory); } else { printf("ERROR:\nGPUDataWarehouse::clear(), for a staging variable, couldn't find in the GPU memory pool the space starting at address %p\n", stagingIter->second.device_ptr); varLock->unlock(); exit(-1); } } } varIter->second.var->stagingVars.clear(); // clear out the regular vars // See if it's a placeholder var for staging vars. This happens if the non-staging var // had a device_ptr of nullptr, and it was only in the varPointers map to only hold staging vars if (compareAndSwapDeallocating(varIter->second.var->atomicStatusInGpuMemory)) { if (varIter->second.var->device_ptr) { if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::clear() -" << " calling GPUMemoryPool::freeCudaSpaceFromPool() for non-staging var for " << varIter->first.label << " at device ptr " << varIter->second.var->device_ptr << " on device " << d_device_id << std::endl; } cerrLock.unlock(); } if (GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, varIter->second.var->device_ptr)) { varIter->second.var->device_ptr = nullptr; compareAndSwapDeallocate(varIter->second.var->atomicStatusInGpuMemory); } else { printf("ERROR:\nGPUDataWarehouse::clear(), for a non-staging variable, couldn't find in the GPU memory pool the space starting at address %p\n", varIter->second.var->device_ptr); varLock->unlock(); exit(-1); } } } } varPointers->clear(); varLock->unlock(); init(d_device_id, _internalName); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::deleteSelfOnDevice() { if ( d_device_copy ) { OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id ); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::deleteSelfOnDevice - calling GPUMemoryPool::freeCudaSpaceFromPool for Task DW at " << std::hex << d_device_copy << " on device " << std::dec << d_device_id << std::endl; } cerrLock.unlock(); } GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, d_device_copy); } } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::resetdVarDB() { #ifdef __CUDA_ARCH__ //no meaning in device method #else if (d_device_copy != nullptr) { //TODO: When TaskDWs are removed, this section shouldn't be needed as there won't be concurrency problems //This is designed to help stop tricky race scenarios. One such scenario I encountered was as follows: //Thread A would call getItem() on the GPU, and look thruogh d_varDB for a matching label/patch/matl tuple //Thread B would have previously added a new item to the d_varDB, then called syncto_device. //Thread B would be partway through updating d_varDB on the GPU. It would increase the number of items by one //And it would write the label. But it wouldn't yet write the patch or matl part of the tuple. By coincidence //the old garbage data in the GPU would have exactly the patch and matl that matches thread A's query //For a very brief window, there would be 2 tuples matching that label/patch/matl pair in d_varDB because //thread B hasn't fully written in all of his data. //Thread A's getItem() would run exactly in this brief window, find the wrong match, and use the wrong //memory address, and the program would crash with an invalid address. //The answer is to initialize d_varDB to items that should never provide an accidental match. //This should also occur for all other arrays. //TODO: Should this be could be cleaned up to only reset as much as was used. for (int i = 0; i < MAX_VARDB_ITEMS; i++) { d_varDB[i].label[0] = '\0'; d_varDB[i].domainID = -1; d_varDB[i].matlIndx = -1; //d_varDB[i].staging = false; d_varDB[i].var_ptr = nullptr; d_varDB[i].ghostItem.dest_varDB_index = -1; } for (int i = 0; i < MAX_LEVELDB_ITEMS; i++) { d_levelDB[i].label[0] = '\0'; d_levelDB[i].domainID = -1; d_levelDB[i].matlIndx = -1; //d_varDB[i].staging = false; d_levelDB[i].var_ptr = nullptr; } for (int i = 0; i < MAX_MATERIALSDB_ITEMS; i++) { d_materialDB[i].simulationType[0] = '\0'; } } #endif } //______________________________________________________________________ //These material methods below needs more work. They haven't been tested. __host__ void GPUDataWarehouse::putMaterials( std::vector< std::string > materials) { varLock->lock(); //see if a thread has already supplied this datawarehouse with the material data int numMaterials = materials.size(); if (d_numMaterials != numMaterials) { //nobody has given us this material data yet, so lets add it in from the beginning. if (numMaterials > MAX_MATERIALSDB_ITEMS) { printf("ERROR: out of GPUDataWarehouse space for materials"); exit(-1); } for (int i = 0; i < numMaterials; i++) { if (strcmp(materials.at(i).c_str(), "ideal_gas") == 0) { d_materialDB[i].material = IDEAL_GAS; } else { printf("ERROR: This material has not yet been coded for GPU support\n."); exit(-1); } } d_numMaterials = numMaterials; } varLock->unlock(); } //______________________________________________________________________ // HOST_DEVICE int GPUDataWarehouse::getNumMaterials() const { #ifdef __CUDA_ARCH__ return d_numMaterials; #else //I don't know if it makes sense to write this for the host side, when it already exists elsewhere host side. return -1; #endif } //______________________________________________________________________ // HOST_DEVICE materialType GPUDataWarehouse::getMaterial(int i) const { #ifdef __CUDA_ARCH__ if (i >= d_numMaterials) { printf("ERROR: Attempting to access material past bounds\n"); assert(0); } return d_materialDB[i].material; #else //I don't know if it makes sense to write this for the host side, when it already exists elsewhere host side. printf("getMaterial() is only implemented as a GPU function"); return IDEAL_GAS; //returning something to prevent a compiler error #endif } //______________________________________________________________________ //TODO: This is too slow. It needs work. __device__ void GPUDataWarehouse::copyGpuGhostCellsToGpuVars() { //Copy all ghost cells from their source to their destination. //The ghost cells could either be only the data that needs to be copied, //or it could be on an edge of a bigger grid var. //I believe the x,y,z coordinates of everything should match. //This could probably be made more efficient by using only perhaps one block, //copying float 4s, and doing it with instruction level parallelism. int numThreads = blockDim.x*blockDim.y*blockDim.z; int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; //blockID on the grid int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; //threadID in the block int totalThreads = numThreads * gridDim.x * gridDim.y * gridDim.z; int assignedCellID; //go through every ghost cell var we need for (int i = 0; i < d_numVarDBItems; i++) { //if (threadID == 0) { // if (d_varDB[i].ghostItem.dest_varDB_index != -1) { // printf("d_varDB[%d].label is %s\n", i, d_varDB[d_varDB[i].ghostItem.dest_varDB_index].label, d_numVarDBItems); // } else { // printf("d_varDB[%d].label is %s\n", i, d_varDB[i].label, d_numVarDBItems); // } //} //some things in d_varDB are meta data for simulation variables //other things in d_varDB are meta data for how to copy ghost cells. //Make sure we're only dealing with ghost cells here if(d_varDB[i].ghostItem.dest_varDB_index != -1) { assignedCellID = blockID * numThreads + threadID; int destIndex = d_varDB[i].ghostItem.dest_varDB_index; int3 ghostCellSize; ghostCellSize.x = d_varDB[i].ghostItem.sharedHighCoordinates.x - d_varDB[i].ghostItem.sharedLowCoordinates.x; ghostCellSize.y = d_varDB[i].ghostItem.sharedHighCoordinates.y - d_varDB[i].ghostItem.sharedLowCoordinates.y; ghostCellSize.z = d_varDB[i].ghostItem.sharedHighCoordinates.z - d_varDB[i].ghostItem.sharedLowCoordinates.z; //while there's still work to do (this assigned ID is still within the ghost cell) while (assignedCellID < ghostCellSize.x * ghostCellSize.y * ghostCellSize.z ) { int z = assignedCellID / (ghostCellSize.x * ghostCellSize.y); int temp = assignedCellID % (ghostCellSize.x * ghostCellSize.y); int y = temp / ghostCellSize.x; int x = temp % ghostCellSize.x; assignedCellID += totalThreads; //if we're in a valid x,y,z space for the variable. (It's unlikely every cell will perfectly map onto every available thread.) if (x < ghostCellSize.x && y < ghostCellSize.y && z < ghostCellSize.z) { //offset them to their true array coordinates, not relative simulation cell coordinates //When using virtual addresses, the virtual offset is always applied to the source, but the destination is correct. int x_source_real = x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[i].ghostItem.virtualOffset.x - d_varDB[i].var_offset.x; int y_source_real = y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[i].ghostItem.virtualOffset.y - d_varDB[i].var_offset.y; int z_source_real = z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[i].ghostItem.virtualOffset.z - d_varDB[i].var_offset.z; //count over array slots. int sourceOffset = x_source_real + d_varDB[i].var_size.x * (y_source_real + z_source_real * d_varDB[i].var_size.y); int x_dest_real = x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[destIndex].var_offset.x; int y_dest_real = y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[destIndex].var_offset.y; int z_dest_real = z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[destIndex].var_offset.z; int destOffset = x_dest_real + d_varDB[destIndex].var_size.x * (y_dest_real + z_dest_real * d_varDB[destIndex].var_size.y); //if (threadID == 0) { // printf("Going to copy, between (%d, %d, %d) from offset %d to offset %d. From starts at (%d, %d, %d) with size (%d, %d, %d) at index %d pointer %p. To starts at (%d, %d, %d) with size (%d, %d, %d).\n", // d_varDB[i].ghostItem.sharedLowCoordinates.x, // d_varDB[i].ghostItem.sharedLowCoordinates.y, // d_varDB[i].ghostItem.sharedLowCoordinates.z, // sourceOffset, // destOffset, // d_varDB[i].var_offset.x, d_varDB[i].var_offset.y, d_varDB[i].var_offset.z, // d_varDB[i].var_size.x, d_varDB[i].var_size.y, d_varDB[i].var_size.z, // i, // d_varDB[i].var_ptr, // d_varDB[destIndex].var_offset.x, d_varDB[destIndex].var_offset.y, d_varDB[destIndex].var_offset.z, // d_varDB[destIndex].var_size.x, d_varDB[destIndex].var_size.y, d_varDB[destIndex].var_size.z); //} //copy all 8 bytes of a double in one shot if (d_varDB[i].sizeOfDataType == sizeof(double)) { *((double*)(d_varDB[destIndex].var_ptr) + destOffset) = *((double*)(d_varDB[i].var_ptr) + sourceOffset); //Note: Every now and then I've seen this printf statement get confused, a line will print with the wrong variables/offset variables... // printf("Thread %d - %s At (%d, %d, %d), real: (%d, %d, %d), copying within region between (%d, %d, %d) and (%d, %d, %d). Source d_varDB index (%d, %d, %d) varSize (%d, %d, %d) virtualOffset(%d, %d, %d), varOffset(%d, %d, %d), sourceOffset %d actual pointer %p, value %e. Dest d_varDB index %d ptr %p destOffset %d actual pointer. %p\n", // threadID, d_varDB[destIndex].label, x, y, z, x_source_real, y_source_real, z_source_real, // d_varDB[i].ghostItem.sharedLowCoordinates.x, d_varDB[i].ghostItem.sharedLowCoordinates.y, d_varDB[i].ghostItem.sharedLowCoordinates.z, // d_varDB[i].ghostItem.sharedHighCoordinates.x, d_varDB[i].ghostItem.sharedHighCoordinates.y, d_varDB[i].ghostItem.sharedHighCoordinates.z, // x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[i].ghostItem.virtualOffset.x, // y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[i].ghostItem.virtualOffset.y, // z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[i].ghostItem.virtualOffset.z, // d_varDB[i].var_size.x, d_varDB[i].var_size.y, d_varDB[i].var_size.z, // d_varDB[i].ghostItem.virtualOffset.x, d_varDB[i].ghostItem.virtualOffset.y, d_varDB[i].ghostItem.virtualOffset.z, // d_varDB[i].var_offset.x, d_varDB[i].var_offset.y, d_varDB[i].var_offset.z, // sourceOffset, (double*)(d_varDB[i].var_ptr) + sourceOffset, *((double*)(d_varDB[i].var_ptr) + sourceOffset), // destIndex, d_varDB[destIndex].var_ptr, destOffset, (double*)(d_varDB[destIndex].var_ptr) + destOffset); } //or copy all 4 bytes of an int in one shot. else if (d_varDB[i].sizeOfDataType == sizeof(int)) { *(((int*)d_varDB[destIndex].var_ptr) + destOffset) = *((int*)(d_varDB[i].var_ptr) + sourceOffset); //Copy each byte until we've copied all for this data type. } else { for (int j = 0; j < d_varDB[i].sizeOfDataType; j++) { *(((char*)d_varDB[destIndex].var_ptr) + (destOffset * d_varDB[destIndex].sizeOfDataType + j)) = *(((char*)d_varDB[i].var_ptr) + (sourceOffset * d_varDB[i].sizeOfDataType + j)); } } } } } } } //______________________________________________________________________ // __global__ void copyGpuGhostCellsToGpuVarsKernel( GPUDataWarehouse *gpudw) { gpudw->copyGpuGhostCellsToGpuVars(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::copyGpuGhostCellsToGpuVarsInvoker(cudaStream_t* stream) { //see if this GPU datawarehouse has ghost cells in it. if (numGhostCellCopiesNeeded > 0) { //call a kernel which gets the copy process started. OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id); #if 0 // compiler warnings const int BLOCKSIZE = 1; int xblocks = 32; int yblocks = 1; int zblocks = 1; #endif dim3 dimBlock(32, 16, 1); dim3 dimGrid(1, 1, 1); //Give each ghost copying kernel 32 * 16 = 512 threads to copy //(32x32 was too much for a smaller laptop GPU, but was fine for the Titan X on Albion) if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::copyGpuGhostCellsToGpuVarsInvoker() - " << " Launching ghost cell copies kernel" << " on device " << d_device_id << " at GPUDW at " << std::hex << this << std::dec << " with description " << _internalName << std::endl; } cerrLock.unlock(); } copyGpuGhostCellsToGpuVarsKernel<<< dimGrid, dimBlock, 0, *stream >>>(this->d_device_copy); } } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::ghostCellCopiesNeeded() { //see if this GPU datawarehouse has ghost cells in it. return (numGhostCellCopiesNeeded > 0); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::putGhostCell(char const* label, int sourcePatchID, int destPatchID, int matlIndx, int levelIndx, bool sourceStaging, bool destStaging, int3 varOffset, int3 varSize, int3 sharedLowCoordinates, int3 sharedHighCoordinates, int3 virtualOffset) { //Add information describing a ghost cell that needs to be copied internally from //one chunk of data to the destination. This covers a GPU -> same GPU copy scenario. varLock->lock(); unsigned int i = d_numVarDBItems; if (i > d_maxdVarDBItems) { printf("ERROR: GPUDataWarehouse::putGhostCell( %s ). Exceeded maximum d_varDB entries. Index is %d and max items is %d\n", label, i, d_maxdVarDBItems); varLock->unlock(); exit(-1); } int index = -1; d_numVarDBItems++; numGhostCellCopiesNeeded++; d_varDB[i].ghostItem.sharedLowCoordinates = sharedLowCoordinates; d_varDB[i].ghostItem.sharedHighCoordinates = sharedHighCoordinates; d_varDB[i].ghostItem.virtualOffset = virtualOffset; //look up the source index and the destination index for these. //it may be an entire variable (in which case staging is false) //or it may be a staging variable. labelPatchMatlLevel lpml_source(label, sourcePatchID, matlIndx, levelIndx); if (!sourceStaging) { if (varPointers->find(lpml_source) != varPointers->end()) { index = varPointers->at(lpml_source).varDB_index; } } else { //Find the variable that contains the region in which our ghost cells exist. //Usually the sharedLowCoordinates and sharedHighCoordinates correspond //exactly to the size of the staging variable. //(TODO ? But sometimes the ghost data is found within larger staging variable. Not sure if there is a use case for this yet) stagingVar sv; sv.device_offset = varOffset; sv.device_size = varSize; std::map<stagingVar, stagingVarInfo>::iterator staging_it = varPointers->at(lpml_source).var->stagingVars.find(sv); if (staging_it != varPointers->at(lpml_source).var->stagingVars.end()) { index = staging_it->second.varDB_index; } else { int nStageVars = varPointers->at(lpml_source).var->stagingVars.size(); printf("ERROR: GPUDataWarehouse::putGhostCell( %s ). Number of staging vars for this var: %d, No staging variable found exactly matching all of the following: label %s patch %d matl %d level %d offset (%d, %d, %d) size (%d, %d, %d) on DW at %p.\n", label, nStageVars, label, sourcePatchID, matlIndx, levelIndx, sv.device_offset.x, sv.device_offset.y, sv.device_offset.z, sv.device_size.x, sv.device_size.y, sv.device_size.z, this); varLock->unlock(); exit(-1); } //Find the d_varDB entry for this specific one. } if (index < 0) { printf("ERROR:\nGPUDataWarehouse::putGhostCell, label %s, source patch ID %d, matlIndx %d, levelIndex %d staging %s not found in GPU DW %p\n", label, sourcePatchID, matlIndx, levelIndx, sourceStaging ? "true" : "false", this); varLock->unlock(); exit(-1); } d_varDB[i].var_offset = d_varDB[index].var_offset; d_varDB[i].var_size = d_varDB[index].var_size; d_varDB[i].var_ptr = d_varDB[index].var_ptr; d_varDB[i].sizeOfDataType = d_varDB[index].sizeOfDataType; if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::putGhostCell() - " << " Placed into d_varDB at index " << i << " of max index " << d_maxdVarDBItems - 1 << " from patch " << sourcePatchID << " staging " << sourceStaging << " to patch " << destPatchID << " staging " << destStaging << " has shared coordinates (" << sharedLowCoordinates.x << ", " << sharedLowCoordinates.y << ", " << sharedLowCoordinates.z << ")," << " (" << sharedHighCoordinates.x << ", " << sharedHighCoordinates.y << ", " << sharedHighCoordinates.z << "), " << " from low/offset (" << d_varDB[i].var_offset.x << ", " << d_varDB[i].var_offset.y << ", " << d_varDB[i].var_offset.z << ") " << " size (" << d_varDB[i].var_size.x << ", " << d_varDB[i].var_size.y << ", " << d_varDB[i].var_size.z << ") " << " virtualOffset (" << d_varDB[i].ghostItem.virtualOffset.x << ", " << d_varDB[i].ghostItem.virtualOffset.y << ", " << d_varDB[i].ghostItem.virtualOffset.z << ") " << " datatype size " << d_varDB[i].sizeOfDataType << " on device " << d_device_id << " at GPUDW at " << std::hex << this<< std::dec << std::endl; } cerrLock.unlock(); } // Find where we are sending the ghost cell data to labelPatchMatlLevel lpml_dest(label, destPatchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml_dest); if (it != varPointers->end()) { if (destStaging) { //TODO: Do the same thing as the source. //If the destination is staging, then the shared coordinates are also the ghost coordinates. stagingVar sv; sv.device_offset = sharedLowCoordinates; sv.device_size = make_int3(sharedHighCoordinates.x-sharedLowCoordinates.x, sharedHighCoordinates.y-sharedLowCoordinates.y, sharedHighCoordinates.z-sharedLowCoordinates.z); std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.find(sv); if (staging_it != it->second.var->stagingVars.end()) { d_varDB[i].ghostItem.dest_varDB_index = staging_it->second.varDB_index; } else { printf("\nERROR:\nGPUDataWarehouse::putGhostCell() didn't find a staging variable from the device for offset (%d, %d, %d) and size (%d, %d, %d).\n", sharedLowCoordinates.x, sharedLowCoordinates.y, sharedLowCoordinates.z, sv.device_size.x, sv.device_size.y, sv.device_size.z); varLock->unlock(); exit(-1); } } else { d_varDB[i].ghostItem.dest_varDB_index = it->second.varDB_index; } } else { printf("ERROR:\nGPUDataWarehouse::putGhostCell(), label: %s destination patch ID %d, matlIndx %d, levelIndex %d, staging %s not found in GPU DW variable database\n", label, destPatchID, matlIndx, levelIndx, destStaging ? "true" : "false"); varLock->unlock(); exit(-1); } d_dirty=true; varLock->unlock(); } //______________________________________________________________________ // __host__ void GPUDataWarehouse::getSizes(int3& low, int3& high, int3& siz, GhostType& gtype, int& numGhostCells, char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { allVarPointersInfo info = varPointers->at(lpml); low = info.device_offset; high.x = info.var->device_size.x + info.var->device_offset.x; high.y = info.var->device_size.y + info.var->device_offset.y; high.z = info.var->device_size.z + info.var->device_offset.z; siz = info.var->device_size; gtype = info.var->gtype; numGhostCells = info.var->numGhostCells; } varLock->unlock(); } //______________________________________________________________________ //Deep copies (not shallow copies or moves) an entry from one data warehouse to another. //(Note: A deep copy is a full copy of data from one variable's memory space to another variable's memory space //A shallow copy is just a pointer copy and a ref counting //A move is a true std::move() reseating.) //RMCRT and Arches often keep a variable in the old data warehouse alive by copying it to the new data warehouse. //It can't be a move (it may be needed to use data from the old and the new) //It can't be a shallow copy (it may be needed to modify the new and also use the old) //So it must be a deep copy. //Both the source and destination variables must be in the GPU data warehouse, //both must be listed as "allocated". If these are not the case, the transferFrom doesn't proceed. //Both must have the same variable sizes. If this is not the case, the program will exit. //If all above conditions are met, then it will do a device to device memcopy call. //*Important*: For this to work, it needs a GPU stream. GPU streams are stored per task, every Uintah task is assigned //a possible stream to use. To get the stream you have to request it from the detailedTask object. //Normal CPU task callback functions do not have access to the detailedTask object, but it is possible to //extend the callack function parameter list so that it does. See UnifiedSchedulerTest::timeAdvanceUnified as an example. //*Also important*: For this to work, the destination variable *MUST* be listed as a computes in the task that's //calling transferFrom(). That allows for the computes data to have been preallocated ahead of time by the scheduler. //Uintah's scheduler is fine if it is able to allocate the space, so that it can allow the task developer to write data //into space it created. If it was a computes, then this method can copy data into the computes memory, and //when the task which called transferFrom is done, the scheduler will mark this computes variable as VALID. //Note: A shallow copy method has been requested by the Arches team. That hasn't been implemented yet. It would require //ref counting a variable, and perhaps some sanity checks to ensure a shallow copied variable is not called a computes and //then later listed as a modifies. __host__ bool GPUDataWarehouse::transferFrom(cudaStream_t* stream, GPUGridVariableBase &var_source, GPUGridVariableBase &var_dest, GPUDataWarehouse * from, char const* label, int patchID, int matlIndx, int levelIndx){ from->varLock->lock(); this->varLock->lock(); //lock both data warehouses, no way to lock free this section, //you could get the dining philosophers problem. labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator source_it = from->varPointers->find(lpml); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator dest_it = this->varPointers->find(lpml); int proceed = true; if (source_it == from->varPointers->end()) { //It may just be there wasn't any requires in the GPU to begin with, so don't bother attempting to copy. //printf("GPU source not found\n"); proceed = false; } else if (dest_it == this->varPointers->end()) { //It may just be there wasn't any computes in the GPU to begin with, so don't bother attempting to copy. //printf("GPU dest not found in DW at %p for variable %s patch %d matl %d level %d\n", this, label, patchID, matlIndx, levelIndx); proceed = false; } else if (((__sync_fetch_and_or(&(source_it->second.var->atomicStatusInGpuMemory), 0) & ALLOCATED) != ALLOCATED)){ //It may just be there wasn't any computes in the GPU to begin with, so don't bother attempting to copy. //printf("GPU source not allocated for variable %s patch %d matl %d level %d, it has status codes %s\n", label, patchID, matlIndx, levelIndx, getDisplayableStatusCodes(source_it->second.atomicStatusInGpuMemory).c_str()); proceed = false; //Is this a problem? We know of this variable in the data warehouse, but we have no space for it. //printf("Error: GPUDataWarehouse::transferFrom() - No source variable device space found. Cannot proceed with deep copy. Exiting...\n"); //exit(-1); } else if (((__sync_fetch_and_or(&(dest_it->second.var->atomicStatusInGpuMemory), 0) & ALLOCATED) != ALLOCATED)){ //printf("GPU destination not allocated for variable %s patch %d matl %d level %d\n", label, patchID, matlIndx, levelIndx); //It may just be there wasn't any computes in the GPU to begin with, so don't bother attempting to copy. proceed = false; //Is a problem? We know of this variable in the data warehouse, but we have no space for it. //printf("Error: GPUDataWarehouse::transferFrom() - No destination variable device space found. Cannot proceed with deep copy. Exiting...\n"); //exit(-1); } if (!proceed) { from->varLock->unlock(); this->varLock->unlock(); return false; } if (!( source_it->second.var->device_offset.x == dest_it->second.var->device_offset.x && source_it->second.var->device_offset.y == dest_it->second.var->device_offset.y && source_it->second.var->device_offset.z == dest_it->second.var->device_offset.z && source_it->second.var->device_size.x == dest_it->second.var->device_size.x && source_it->second.var->device_size.y == dest_it->second.var->device_size.y && source_it->second.var->device_size.z == dest_it->second.var->device_size.z )) { printf("Error: GPUDataWarehouse::transferFrom() - The source and destination variables exists for variable %s patch %d matl %d level %d, but the sizes don't match. Cannot proceed with deep copy. Exiting...\n", label, patchID, matlIndx, levelIndx); printf("The source size is (%d, %d, %d) with offset (%d, %d, %d) and device size is (%d, %d, %d) with offset (%d, %d, %d)\n", source_it->second.var->device_size.x, source_it->second.var->device_size.y, source_it->second.var->device_size.z, source_it->second.var->device_offset.x, source_it->second.var->device_offset.y, source_it->second.var->device_offset.z, dest_it->second.var->device_size.x, dest_it->second.var->device_size.y, dest_it->second.var->device_size.z, dest_it->second.var->device_offset.x, dest_it->second.var->device_offset.y, dest_it->second.var->device_offset.z); from->varLock->unlock(); this->varLock->unlock(); exit(-1); } else if (!(source_it->second.var->device_ptr)) { //A couple more santiy checks, this may be overkill... printf("Error: GPUDataWarehouse::transferFrom() - No source variable pointer found for variable %s patch %d matl %d level %d\n", label, patchID, matlIndx, levelIndx); from->varLock->unlock(); this->varLock->unlock(); exit(-1); } else if (!(dest_it->second.var->device_ptr)) { printf("Error: GPUDataWarehouse::transferFrom() - No destination variable pointer found for variable %s patch %d matl %d level %d\n", label, patchID, matlIndx, levelIndx); from->varLock->unlock(); this->varLock->unlock(); exit(-1); } else if (!stream) { printf("ERROR: No stream associated with the detailed task. Cannot proceed with deep copy. Exiting...\n"); printf("If you get this message, the fix is not that rough. You need to change your CPU callback function to having the full set of parameters common for a GPU task. If you do that, the engine should pick up the rest of the details.\n"); from->varLock->unlock(); this->varLock->unlock(); exit(-1); } //We shouldn't need to allocate space on either the source or the datination. The source should have been listed as a requires, //and the destination should have been listed as a computes for the task. //And this solves a mess of problems, mainly deailing with when it is listed as allocated and when it's listed as valid. var_source.setArray3(source_it->second.var->device_offset, source_it->second.var->device_size, source_it->second.var->device_ptr); var_source.setArray3(dest_it->second.var->device_offset, dest_it->second.var->device_size, dest_it->second.var->device_ptr); cudaMemcpyAsync(dest_it->second.var->device_ptr, source_it->second.var->device_ptr, source_it->second.var->device_size.x * source_it->second.var->device_size.y * source_it->second.var->device_size.z * source_it->second.var->sizeOfDataType, cudaMemcpyDeviceToDevice, *stream); from->varLock->unlock(); this->varLock->unlock(); //Let the caller know we found and transferred something. return true; } //______________________________________________________________________ // Go through all staging vars for a var. See if they are all marked as valid. __host__ bool GPUDataWarehouse::areAllStagingVarsValid(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { for (std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.begin(); staging_it != it->second.var->stagingVars.end(); ++staging_it) { if (!checkValid(staging_it->second.atomicStatusInGpuMemory)) { varLock->unlock(); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::areAllStagingVarsValid() -" // Task: " << dtask->getName() << " Not all staging vars were ready for " << label << " patch " << patchID << " material " << matlIndx << " level " << levelIndx << " offset (" << staging_it->first.device_offset.x << ", " << staging_it->first.device_offset.y << ", " << staging_it->first.device_offset.z << ") and size (" << staging_it->first.device_size.x << ", " << staging_it->first.device_size.y << ", " << staging_it->first.device_size.z << ") with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory) << std::endl; } cerrLock.unlock(); } return false; } } } varLock->unlock(); return true; } //______________________________________________________________________ // Simply performs an atomic fetch on the status variable. //typedef int atomicDataStatus; //__host__ atomicDataStatus //GPUDataWarehouse::getStatus(atomicDataStatus& status) { // return __sync_or_and_fetch(&(status), 0); //} //______________________________________________________________________ // __host__ std::string GPUDataWarehouse::getDisplayableStatusCodes(atomicDataStatus& status) { atomicDataStatus varStatus = __sync_or_and_fetch(&status, 0); std::string retval = ""; if (varStatus == 0) { retval += "Unallocated "; } else { if ((varStatus & ALLOCATING) == ALLOCATING) { retval += "Allocating "; } if ((varStatus & ALLOCATED) == ALLOCATED) { retval += "Allocated "; } if ((varStatus & COPYING_IN) == COPYING_IN) { retval += "Copying-in "; } if ((varStatus & VALID) == VALID) { retval += "Valid "; } if ((varStatus & AWAITING_GHOST_COPY) == AWAITING_GHOST_COPY) { retval += "Awaiting-ghost-copy "; } if ((varStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS) { retval += "Valid-with-ghosts "; } if ((varStatus & DEALLOCATING) == DEALLOCATING) { retval += "Deallocating "; } if ((varStatus & FORMING_SUPERPATCH) == FORMING_SUPERPATCH) { retval += "Forming-superpatch "; } if ((varStatus & SUPERPATCH) == SUPERPATCH) { retval += "Superpatch "; } if ((varStatus & UNKNOWN) == UNKNOWN) { retval += "Unknown "; } } //trim whitespace retval.erase(std::find_if(retval.rbegin(), retval.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), retval.end()); return retval; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::getStatusFlagsForVariableOnGPU(bool& correctSize, bool& allocating, bool& allocated, bool& copyingIn, bool& validOnGPU, bool& gatheringGhostCells, bool& validWithGhostCellsOnGPU, bool& deallocating, bool& formingSuperPatch, bool& superPatch, char const* label, const int patchID, const int matlIndx, const int levelIndx, const int3& offset, const int3& size) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { //check the sizes allVarPointersInfo vp = varPointers->at(lpml); int3 device_offset = vp.var->device_offset; int3 device_size = vp.var->device_size; correctSize = (device_offset.x == offset.x && device_offset.y == offset.y && device_offset.z == offset.z && device_size.x == size.x && device_size.y == size.y && device_size.z == size.z); //get the value atomicDataStatus varStatus = __sync_or_and_fetch(&(vp.var->atomicStatusInGpuMemory), 0); allocating = ((varStatus & ALLOCATING) == ALLOCATING); allocated = ((varStatus & ALLOCATED) == ALLOCATED); copyingIn = ((varStatus & COPYING_IN) == COPYING_IN); validOnGPU = ((varStatus & VALID) == VALID); gatheringGhostCells = ((varStatus & AWAITING_GHOST_COPY) == AWAITING_GHOST_COPY); validWithGhostCellsOnGPU = ((varStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS); deallocating = ((varStatus & DEALLOCATING) == DEALLOCATING); formingSuperPatch = ((varStatus & FORMING_SUPERPATCH) == FORMING_SUPERPATCH); superPatch = ((varStatus & SUPERPATCH) == SUPERPATCH); } else { correctSize = false; allocating = false; allocated = false; copyingIn = false; validOnGPU = false; gatheringGhostCells = false; validWithGhostCellsOnGPU = false; formingSuperPatch = false; superPatch = false; } varLock->unlock(); } //______________________________________________________________________ // returns false if something else already allocated space and we don't have to. // returns true if we are the ones to allocate the space. // performs operations with atomic compare and swaps __host__ bool GPUDataWarehouse::compareAndSwapAllocating(atomicDataStatus& status) { bool allocating = false; while (!allocating) { //get the value atomicDataStatus oldVarStatus = __sync_or_and_fetch(&status, 0); unsigned int refCounter = (oldVarStatus >> 16); //if it's allocated, return true if (refCounter >= 1 ) { //Something else already took care of it, and it has moved beyond the allocating state into something else. return false; } else if ((oldVarStatus & UNALLOCATED) != UNALLOCATED) { //Sanity check. The ref counter was zero, but the variable isn't unallocated. We can't have this. printf("ERROR:\nGPUDataWarehouse::compareAndSwapAllocate( ) Something wrongly modified the atomic status while setting the allocated flag\n"); exit(-1); } else { //Attempt to claim we'll allocate it. If not go back into our loop and recheck short refCounter = 1; atomicDataStatus newVarStatus = (refCounter << 16) | (oldVarStatus & 0xFFFF); //Place in the reference counter and save the right 16 bits. newVarStatus = newVarStatus | ALLOCATING; //It's possible to preserve a flag, such as copying in ghost cells. allocating = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus); } } return true; } //______________________________________________________________________ // Sets the allocated flag on a variables atomicDataStatus // This is called after an allocating process completes. *Only* the thread that got a true from // compareAndSwapAllocating() should immediately call this. __host__ bool GPUDataWarehouse::compareAndSwapAllocate(atomicDataStatus& status) { bool allocated = false; //get the value atomicDataStatus oldVarStatus = __sync_or_and_fetch(&status, 0); if ((oldVarStatus & ALLOCATING) == 0) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapAllocate( ) Can't allocate a status if it wasn't previously marked as allocating.\n"); exit(-1); } else if ((oldVarStatus & ALLOCATED) == ALLOCATED) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapAllocate( ) Can't allocate a status if it's already allocated\n"); exit(-1); } else { //Attempt to claim we'll allocate it. Create what we want the status to look like //by turning off allocating and turning on allocated. //Note: No need to turn off UNALLOCATED, it's defined as all zero bits. //But the below is kept in just for readability's sake. atomicDataStatus newVarStatus = oldVarStatus & ~UNALLOCATED; newVarStatus = newVarStatus & ~ALLOCATING; newVarStatus = newVarStatus | ALLOCATED; //If we succeeded in our attempt to claim to allocate, this returns true. //If we failed, thats a real problem, and we crash the problem below. allocated = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus); } if (!allocated) { //Another sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapAllocate( ) Something wrongly modified the atomic status while setting the allocated flag\n"); exit(-1); } return allocated; } //______________________________________________________________________ // Simply determines if a variable has been marked as allocated. __host__ bool GPUDataWarehouse::checkAllocated(atomicDataStatus& status) { return ((__sync_or_and_fetch(&status, 0) & ALLOCATED) == ALLOCATED); } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::compareAndSwapDeallocating(atomicDataStatus& status) { bool deallocating = false; while (!deallocating) { //get the value atomicDataStatus oldVarStatus = __sync_or_and_fetch(&status, 0); unsigned int refCounter = (oldVarStatus >> 16); if (refCounter == 0 || ((oldVarStatus & DEALLOCATING) == DEALLOCATING) || ((oldVarStatus & 0xFFFF) == UNALLOCATED) || ((oldVarStatus & UNKNOWN) == UNKNOWN)) { //There's nothing to deallocate, or something else already deallocated it or is deallocating it. //So this thread won't do it. return false; } else if (refCounter == 1) { //Ref counter is 1, we can deallocate it. //Leave the refCounter at 1. atomicDataStatus newVarStatus = (refCounter << 16) | (oldVarStatus & 0xFFFF); //Place in the reference counter and save the right 16 bits. newVarStatus = newVarStatus | DEALLOCATING; //Set it to deallocating so nobody else can attempt to use it bool successfulUpdate = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus); if (successfulUpdate) { //Need to deallocate, let the caller know it. deallocating = true; } } else if (refCounter > 1) { //Something else is using this variable, don't deallocate, just decrement the counter refCounter--; atomicDataStatus newVarStatus = (refCounter << 16) | (oldVarStatus & 0xFFFF); bool successfulUpdate = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus); if (successfulUpdate) { //No need to deallocate, let the caller know it. return false; } } else { printf("ERROR:\nGPUDataWarehouse::compareAndSwapDeallocating( ) This variable's ref counter was 0, but its status said it was in use. This shouldn't happen\n"); exit(-1); } } return true; } //______________________________________________________________________ // Sets the allocated flag on a variables atomicDataStatus // This is called after a deallocating process completes. *Only* the thread that got a true from //compareAndSwapDeallocating() should immediately call this. __host__ bool GPUDataWarehouse::compareAndSwapDeallocate(atomicDataStatus& status) { bool allocated = false; //get the value atomicDataStatus oldVarStatus = __sync_or_and_fetch(&status, 0); unsigned int refCounter = (oldVarStatus >> 16); if ((oldVarStatus & DEALLOCATING) == 0) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapDeallocate( ) Can't deallocate a status if it wasn't previously marked as deallocating.\n"); exit(-1); } else if ((oldVarStatus & 0xFFFF) == UNALLOCATED) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapDeallocate( ) Can't deallocate a status if it's already deallocated\n"); exit(-1); } else if (refCounter != 1) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapDeallocate( ) Attemping to deallocate a variable but the ref counter isn't the required value of 1\n"); exit(-1); } else { //Attempt to claim we'll deallocate it. Create what we want the status to look like //by turning off all status flags (indicating unallocated), it should also zero out the reference counter. atomicDataStatus newVarStatus = UNALLOCATED; //If we succeeded in our attempt to claim to deallocate, this returns true. //If we failed, thats a real problem, and we crash the problem below. allocated = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus); } if (!allocated) { //Another sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapDeallocate( ) Something wrongly modified the atomic status while trying set the status flags to unallocated\n"); exit(-1); } return allocated; } //______________________________________________________________________ // Simply determines if a variable has been marked as valid. __host__ bool GPUDataWarehouse::checkValid(atomicDataStatus& status) { return ((__sync_or_and_fetch(&status, 0) & VALID) == VALID); } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isAllocatedOnGPU(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { bool retVal = ((__sync_fetch_and_or(&(varPointers->at(lpml).var->atomicStatusInGpuMemory), 0) & ALLOCATED) == ALLOCATED); varLock->unlock(); return retVal; } else { varLock->unlock(); return false; } } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isAllocatedOnGPU(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { //cout << "In isAllocatedOnGPU - For patchID " << patchID << " for the status is " << getDisplayableStatusCodes(varPointers->at(lpml).atomicStatusInGpuMemory) << endl; bool retVal = ((__sync_fetch_and_or(&(varPointers->at(lpml).var->atomicStatusInGpuMemory), 0) & ALLOCATED) == ALLOCATED); if (retVal) { //now check the sizes int3 device_offset = varPointers->at(lpml).var->device_offset; int3 device_size = varPointers->at(lpml).var->device_size; retVal = (device_offset.x == offset.x && device_offset.y == offset.y && device_offset.z == offset.z && device_size.x == size.x && device_size.y == size.y && device_size.z == size.z); } varLock->unlock(); return retVal; } else { varLock->unlock(); return false; } } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isValidOnGPU(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { bool retVal = ((__sync_fetch_and_or(&(varPointers->at(lpml).var->atomicStatusInGpuMemory), 0) & VALID) == VALID); varLock->unlock(); return retVal; } else { varLock->unlock(); return false; } } //______________________________________________________________________ __host__ bool GPUDataWarehouse::compareAndSwapSetValidOnGPU(char const* const label, const int patchID, const int matlIndx, const int levelIndx) { varLock->lock(); bool settingValid = false; while (!settingValid) { labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { atomicDataStatus *status = &(it->second.var->atomicStatusInGpuMemory); atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if ((oldVarStatus & VALID) == VALID) { //Something else already took care of it. So this task won't manage it. varLock->unlock(); return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus & ~COPYING_IN; newVarStatus = newVarStatus | VALID; settingValid = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } else { varLock->unlock(); printf("ERROR\nGPUDataWarehouse::compareAndSwapSetValidOnGPU() - Unknown variable %s on GPUDataWarehouse\n", label); exit(-1); } } varLock->unlock(); return true; } //______________________________________________________________________ __host__ bool GPUDataWarehouse::compareAndSwapSetValidOnGPUStaging(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size) { varLock->lock(); bool settingValidOnStaging = false; while (!settingValidOnStaging) { labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { stagingVar sv; sv.device_offset = offset; sv.device_size = size; std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.find(sv); if (staging_it != it->second.var->stagingVars.end()) { atomicDataStatus *status = &(staging_it->second.atomicStatusInGpuMemory); atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if ((oldVarStatus & VALID) == VALID) { //Something else already took care of it. So this task won't manage it. varLock->unlock(); return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus & ~COPYING_IN; newVarStatus = newVarStatus | VALID; settingValidOnStaging = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapSetValidOnGPUStaging( ) Staging variable %s not found.\n", label); exit(-1); } } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapSetValidOnGPUStaging( ) Variable %s not found.\n", label); exit(-1); } } varLock->unlock(); return true; } //______________________________________________________________________ // We have an entry for this item in the GPU DW, and it's not unknown. Therefore // if this returns true it means this GPU DW specifically knows something about the // state of this variable. (The reason for the unknown check is currently when a // var is added to the GPUDW, we also need to state what we know about its data in // host memory. Since it doesn't know, it marks it as unknown, meaning, the host // side DW is possibly managing the data.) __host__ bool GPUDataWarehouse::dwEntryExistsOnCPU(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); bool retVal = false; labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { if ((it->second.var->atomicStatusInHostMemory & UNKNOWN) != UNKNOWN) { retVal = true; } } varLock->unlock(); return retVal; } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isValidOnCPU(char const* label, const int patchID, const int matlIndx, const int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { bool retVal = ((__sync_fetch_and_or(&(varPointers->at(lpml).var->atomicStatusInHostMemory), 0) & VALID) == VALID); varLock->unlock(); return retVal; } else { varLock->unlock(); return false; } } //______________________________________________________________________ //TODO: This needs to be turned into a compare and swap operation //______________________________________________________________________ __host__ bool GPUDataWarehouse::compareAndSwapSetValidOnCPU(char const* const label, const int patchID, const int matlIndx, const int levelIndx) { varLock->lock(); bool settingValid = false; while (!settingValid) { labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { atomicDataStatus *status = &(it->second.var->atomicStatusInHostMemory); atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if ((oldVarStatus & VALID) == VALID) { //Something else already took care of it. So this task won't manage it. varLock->unlock(); return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus & ~COPYING_IN; newVarStatus = newVarStatus | VALID; settingValid = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } else { varLock->unlock(); printf("ERROR\nGPUDataWarehouse::compareAndSwapSetValidOnCPU() - Unknown variable %s on GPUDataWarehouse\n", label); exit(-1); } } varLock->unlock(); return true; } //______________________________________________________________________ // returns false if something else already changed a valid variable to valid awaiting ghost data // returns true if we are the ones to manage this variable's ghost data. __host__ bool GPUDataWarehouse::compareAndSwapAwaitingGhostDataOnGPU(char const* label, int patchID, int matlIndx, int levelIndx) { bool allocating = false; varLock->lock(); while (!allocating) { //get the address labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { atomicDataStatus *status = &(varPointers->at(lpml).var->atomicStatusInGpuMemory); atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (((oldVarStatus & AWAITING_GHOST_COPY) == AWAITING_GHOST_COPY) || ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) { //Something else already took care of it. So this task won't manage it. varLock->unlock(); return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus | AWAITING_GHOST_COPY; allocating = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapAwaitingGhostDataOnGPU( ) Variable %s not found.\n", label); exit(-1); return false; } } varLock->unlock(); return true; } //______________________________________________________________________ // returns false if something else already claimed to copy or has copied data into the GPU. // returns true if we are the ones to manage this variable's ghost data. __host__ bool GPUDataWarehouse::compareAndSwapCopyingIntoGPU(char const* label, int patchID, int matlIndx, int levelIndx) { atomicDataStatus* status = nullptr; // get the status labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { status = &(it->second.var->atomicStatusInGpuMemory); } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPU( ) Variable %s not found.\n", label); exit(-1); return false; } varLock->unlock(); bool copyingin = false; while (!copyingin) { atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (oldVarStatus == UNALLOCATED) { printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPU( ) Variable %s is unallocated.\n", label); exit(-1); } if (((oldVarStatus & COPYING_IN) == COPYING_IN) || ((oldVarStatus & VALID) == VALID) || ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) { // Something else already took care of it. So this task won't manage it. return false; } else { // Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN; copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } return true; } //______________________________________________________________________ // returns false if something else already claimed to copy or has copied data into the CPU. // returns true if we are the ones to manage this variable's ghost data. __host__ bool GPUDataWarehouse::compareAndSwapCopyingIntoCPU(char const* label, int patchID, int matlIndx, int levelIndx) { atomicDataStatus* status = nullptr; // get the status labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (varPointers->find(lpml) != varPointers->end()) { status = &(it->second.var->atomicStatusInHostMemory); } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoCPU( ) Variable %s not found.\n", label); exit(-1); return false; } varLock->unlock(); bool copyingin = false; while (!copyingin) { // get the address atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (((oldVarStatus & COPYING_IN) == COPYING_IN) || ((oldVarStatus & VALID) == VALID) || ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) { // Something else already took care of it. So this task won't manage it. return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN; newVarStatus = newVarStatus & ~UNKNOWN; copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } return true; } //______________________________________________________________________ // returns false if something else already claimed to copy or has copied data into the GPU. // returns true if we are the ones to manage this variable's ghost data. __host__ bool GPUDataWarehouse::compareAndSwapCopyingIntoGPUStaging(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size) { atomicDataStatus* status; // get the status labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); varLock->lock(); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { stagingVar sv; sv.device_offset = offset; sv.device_size = size; std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.var->stagingVars.find(sv); if (staging_it != it->second.var->stagingVars.end()) { status = &(staging_it->second.atomicStatusInGpuMemory); } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPUStaging( ) Staging variable %s not found.\n", label); exit(-1); return false; } } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPUStaging( ) Variable %s not found.\n", label); exit(-1); return false; } varLock->unlock(); bool copyingin = false; while (!copyingin) { //get the address atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (oldVarStatus == UNALLOCATED) { printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPUStaging( ) Variable %s is unallocated.\n", label); exit(-1); } else if ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS) { printf("ERROR:\nGPUDataWarehouse::compareAndSwapCopyingIntoGPUStaging( ) Variable %s is marked as valid with ghosts, that should never happen with staging vars.\n", label); exit(-1); } else if (((oldVarStatus & COPYING_IN) == COPYING_IN) || ((oldVarStatus & VALID) == VALID)) { //Something else already took care of it. So this task won't manage it. return false; } else { //Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN; copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } return true; } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isValidWithGhostsOnGPU(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { bool retVal = ((__sync_fetch_and_or(&(it->second.var->atomicStatusInGpuMemory), 0) & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS); varLock->unlock(); return retVal; } else { varLock->unlock(); return false; } } //______________________________________________________________________ //TODO: This needs to be turned into a compare and swap operation __host__ void GPUDataWarehouse::setValidWithGhostsOnGPU(char const* label, int patchID, int matlIndx, int levelIndx) { varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if (it != varPointers->end()) { //UNKNOWN //make sure the valid is still turned on __sync_or_and_fetch(&(it->second.var->atomicStatusInGpuMemory), VALID); //turn off AWAITING_GHOST_COPY __sync_and_and_fetch(&(it->second.var->atomicStatusInGpuMemory), ~AWAITING_GHOST_COPY); //turn on VALID_WITH_GHOSTS __sync_or_and_fetch(&(it->second.var->atomicStatusInGpuMemory), VALID_WITH_GHOSTS); varLock->unlock(); } else { varLock->unlock(); exit(-1); } } //______________________________________________________________________ // returns true if successful if marking a variable as a superpatch. False otherwise. // Can only turn an unallocated variable into a superpatch. __host__ bool GPUDataWarehouse::compareAndSwapFormASuperPatchGPU(char const* label, int patchID, int matlIndx, int levelIndx) { bool compareAndSwapSucceeded = false; //get the status atomicDataStatus* status = nullptr; varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { status = &(varPointers->at(lpml).var->atomicStatusInGpuMemory); } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapFormASuperPatchGPU( ) Variable %s patch %d material %d levelIndx %d not found.\n", label, patchID, matlIndx, levelIndx); exit(-1); return false; } varLock->unlock(); while (!compareAndSwapSucceeded) { atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::compareAndSwapFormASuperPatchGPU() - " << " Attempting to set a superpatch flag for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with status codes " << getDisplayableStatusCodes(oldVarStatus) << std::endl; } cerrLock.unlock(); } if ( (oldVarStatus & FORMING_SUPERPATCH) == FORMING_SUPERPATCH || ((oldVarStatus & SUPERPATCH) == SUPERPATCH)) { //Something else already took care of it. So this task won't manage it. return false; } else if (((oldVarStatus & ALLOCATING) == ALLOCATING) || ((oldVarStatus & ALLOCATED) == ALLOCATED) || ((oldVarStatus & ALLOCATING) == ALLOCATING) || ((oldVarStatus & COPYING_IN) == COPYING_IN) || ((oldVarStatus & VALID) == VALID) || ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS) || ((oldVarStatus & DEALLOCATING) == DEALLOCATING)) { //Note, we DO allow a variable to be set as AWAITING_GHOST_COPY before anything else. //At the time of implementation this scenario shouldn't ever happen. If so it means //Someone is requesting to take a variable already in memory that's not a superpatch //and turn it into a superpatch. It would require some kind of special deep copy mechanism printf("ERROR:\nGPUDataWarehouse::compareAndSwapFormASuperPatchGPU( ) Variable %s cannot be turned into a superpatch, it's in use already with status %s.\n", label, getDisplayableStatusCodes(oldVarStatus).c_str()); exit(-1); return false; } else { atomicDataStatus newVarStatus = oldVarStatus | FORMING_SUPERPATCH; compareAndSwapSucceeded = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } } atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if (gpu_stats.active()) { cerrLock.lock(); { gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::compareAndSwapFormASuperPatchGPU() - " << " Success for label " << label << " patch " << patchID << " matl " << matlIndx << " level " << levelIndx << " with status codes " << getDisplayableStatusCodes(oldVarStatus) << std::endl; } cerrLock.unlock(); } return true; } //______________________________________________________________________ // Sets the allocated flag on a variables atomicDataStatus // This is called after a forming a superpatch process completes. *Only* the thread that got to set FORMING_SUPERPATCH can // set SUPERPATCH. Further, no other thread should modify the atomic status //compareAndSwapFormASuperPatchGPU() should immediately call this. __host__ bool GPUDataWarehouse::compareAndSwapSetSuperPatchGPU(char const* label, int patchID, int matlIndx, int levelIndx) { bool superpatched = false; //get the status atomicDataStatus* status = nullptr; varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { status = &(varPointers->at(lpml).var->atomicStatusInGpuMemory); } else { varLock->unlock(); printf("ERROR:\nGPUDataWarehouse::compareAndSwapSetSuperPatchGPU( ) Variable %s patch %d material %d levelIndx %d not found.\n", label, patchID, matlIndx, levelIndx); exit(-1); return false; } varLock->unlock(); const atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0); if ((oldVarStatus & FORMING_SUPERPATCH) == 0) { //A sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapSetSuperPatchGPU( ) Can't set a superpatch status if it wasn't previously marked as forming a superpatch.\n"); exit(-1); } else { //Attempt to claim forming it into a superpatch. atomicDataStatus newVarStatus = oldVarStatus; newVarStatus = newVarStatus & ~FORMING_SUPERPATCH; newVarStatus = newVarStatus | SUPERPATCH; //If we succeeded in our attempt to claim to deallocate, this returns true. //If we failed, thats a real problem, and we crash below. //printf("current status is %s oldVarStatus is %s newVarStatus is %s\n", getDisplayableStatusCodes(status) superpatched = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus); } if (!superpatched) { //Another sanity check printf("ERROR:\nGPUDataWarehouse::compareAndSwapSetSuperPatchGPU( ) Something modified the atomic status between the phases of forming a superpatch and setting a superpatch. This shouldn't happen\n"); exit(-1); } return superpatched; } //______________________________________________________________________ // __host__ bool GPUDataWarehouse::isSuperPatchGPU(char const* label, int patchID, int matlIndx, int levelIndx) { bool retVal = false; varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); if (varPointers->find(lpml) != varPointers->end()) { retVal = ((__sync_fetch_and_or(&(varPointers->at(lpml).var->atomicStatusInGpuMemory), 0) & SUPERPATCH) == SUPERPATCH); } varLock->unlock(); return retVal; } //______________________________________________________________________ // __host__ void GPUDataWarehouse::setSuperPatchLowAndSize(char const* const label, const int patchID, const int matlIndx, const int levelIndx, const int3& low, const int3& size){ varLock->lock(); labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx); std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml); if ( it == varPointers->end()) { printf("ERROR: GPUDataWarehouse::setSuperPatchLowAndSize - Didn't find a variable for label %s patch %d matl %d level %d\n", label, patchID, matlIndx, levelIndx); varLock->unlock(); exit(-1); } it->second.var->device_offset = low; it->second.var->device_size = size; varLock->unlock(); } //______________________________________________________________________ // __device__ void GPUDataWarehouse::print() { #ifdef __CUDA_ARCH__ __syncthreads(); if( isThread0_Blk0() ){ printf("\nVariables in GPUDataWarehouse\n"); for (int i = 0; i < d_numVarDBItems; i++) { dataItem me = d_varDB[i]; printf(" %-15s matl: %i, patchID: %i, L-%i, size:[%i,%i,%i] pointer: %p\n", me.label, me.matlIndx, me.domainID, me.levelIndx, me.var_size.x, me.var_size.y, me.var_size.z, me.var_ptr); } __syncthreads(); printThread(); printBlock(); printf("\n"); } #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::printError(const char* msg, const char* methodName, char const* label, const int patchID, int8_t matlIndx, int8_t levelIndx ) { #ifdef __CUDA_ARCH__ __syncthreads(); if( isThread0() ){ if (label[0] == '\0') { printf(" \nERROR GPU-side: GPUDataWarehouse::%s() - %s\n", methodName, msg ); } else { printf(" \nERROR GPU-side: GPUDataWarehouse::%s(), label: \"%s\", patch: %i, matlIndx: %i, levelIndx: %i - %s\n", methodName, label, patchID, matlIndx, levelIndx, msg); } //Should this just loop through the variable database and print out only items with a //levelIndx value greater than zero? -- Brad //for (int i = 0; i < d_numLevelItems; i++) { // printf(" Available levelDB labels(%i): \"%-15s\" matl: %i, L-%i \n", d_numLevelItems, d_levelDB[i].label, d_levelDB[i].matlIndx, d_levelDB[i].levelIndx); // } __syncthreads(); printThread(); printBlock(); // we know this is fatal and why, so just stop kernel execution __threadfence(); asm("trap;"); } #else //__________________________________ // CPU code if (label[0] == '\0') { printf(" \nERROR host-side: GPUDataWarehouse::%s() - %s\n", methodName, msg ); } else { printf(" \nERROR host-side: GPUDataWarehouse::%s(), label: \"%s\", patch: %i, matlIndx: %i, levelIndx: %i - %s\n", methodName, label, patchID, matlIndx, levelIndx, msg); } exit(-1); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::printGetLevelError(const char* msg, char const* label, int8_t levelIndx, int8_t matlIndx) { #ifdef __CUDA_ARCH__ __syncthreads(); if( isThread0() ){ printf(" \nERROR: %s( \"%s\", levelIndx: %i, matl: %i) unknown variable\n", msg, label, levelIndx, matlIndx); //Should this just loop through the variable database and print out only items with a //levelIndx value greater than zero? -- Brad __syncthreads(); printThread(); printBlock(); // we know this is fatal and why, so just stop kernel execution __threadfence(); asm("trap;"); } #else //__________________________________ // CPU code printf(" \nERROR: %s( \"%s\", levelIndx: %i, matl: %i) unknown variable\n", msg, label, levelIndx, matlIndx); #endif } //______________________________________________________________________ // HOST_DEVICE void GPUDataWarehouse::printGetError(const char* msg, char const* label, int8_t levelIndx, const int patchID, int8_t matlIndx) { #ifdef __CUDA_ARCH__ __syncthreads(); if( isThread0() ) { printf(" \nERROR: %s( \"%s\", levelIndx: %i, patchID: %i, matl: %i) unknown variable\n", msg, label, levelIndx, patchID, matlIndx); for (int i = 0; i < d_numVarDBItems; i++) { printf(" Available varDB labels(%i of %i): \"%-15s\" matl: %i, patchID: %i, level: %i\n", i, d_numVarDBItems, d_varDB[i].label, d_varDB[i].matlIndx, d_varDB[i].domainID, d_varDB[i].levelIndx); } __syncthreads(); printThread(); printBlock(); printf("\n"); // we know this is fatal and why, so just stop kernel execution __threadfence(); asm("trap;"); } #else //__________________________________ // CPU code printf(" \nERROR: %s( \"%s\", levelIndx: %i, patchID: %i, matl: %i) unknown variable in DW %s\n", msg, label, levelIndx, patchID, matlIndx, _internalName); for (int i = 0; i < d_numVarDBItems; i++) { printf(" Available varDB labels(%i): \"%-15s\" matl: %i, patchID: %i, level: %i\n", d_numVarDBItems, d_varDB[i].label, d_varDB[i].matlIndx, d_varDB[i].domainID, d_varDB[i].levelIndx); } #endif } //______________________________________________________________________ // __host__ void* GPUDataWarehouse::getPlacementNewBuffer() { return placementNewBuffer; } //______________________________________________________________________ // Returns true if threadID and blockID are 0. // Useful in conditional statements for limiting output. // __device__ bool GPUDataWarehouse::isThread0_Blk0(){ int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; bool test (blockID == 0 && threadID == 0); return test; } //______________________________________________________________________ // Returns true if threadID = 0 for this block // Useful in conditional statements for limiting output. // __device__ bool GPUDataWarehouse::isThread0(){ int threadID = threadIdx.x + threadIdx.y + threadIdx.z; bool test (threadID == 0 ); return test; } //______________________________________________________________________ // Output the threadID // __device__ void GPUDataWarehouse::printThread(){ int threadID = threadIdx.x + threadIdx.y + threadIdx.z; printf( "Thread [%i,%i,%i], ID: %i\n", threadIdx.x,threadIdx.y,threadIdx.z, threadID); } //______________________________________________________________________ // Output the blockID // __device__ void GPUDataWarehouse::printBlock(){ int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; printf( "Block [%i,%i,%i], ID: %i\n", blockIdx.x,blockIdx.y,blockIdx.z, blockID); }
f3b11ba546d2253b7b3937c5225621f189bdf4ff.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include "error_helper.hpp" int main() { // error: invalid value double *a = 0, *b = 0; error_check(hipMemcpy(a, b, 10, hipMemcpyHostToDevice)); return 0; }
f3b11ba546d2253b7b3937c5225621f189bdf4ff.cu
#include <iostream> #include <cuda_runtime.h> #include "error_helper.hpp" int main() { // error: invalid value double *a = 0, *b = 0; error_check(cudaMemcpy(a, b, 10, cudaMemcpyHostToDevice)); return 0; }
f2b23679fc0a5ee975e94481a037e93e412515b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2008-2009 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdio> #include "ocuutil/thread.h" #include "ocuequation/sol_laplaciancent1d.h" //*********************************************************************************** // Kernels (currently must be outside of namespaces) //*********************************************************************************** __global__ void Sol_Laplacian1DCentered_apply_stencil(float inv_h2, float *deriv_densitydt, float *density, int nx) { int i = threadIdx.x + __umul24(blockIdx.x, blockDim.x); i--; // note that density & deriv_densitydt are both shifted so that they point to the "0" element, even though the arrays start // at element -1. hence by offsetting i as above, we will get better coalescing for the cost of an added test if i>=0 if (i>=0 && i < nx) deriv_densitydt[i] = inv_h2 * (density[i-1] - 2.0f * density[i] + density[i+1]); } __global__ void Sol_Laplacian1DCentered_apply_boundary_conditions(float *density, ocu::BoundaryCondition left, ocu::BoundaryCondition right, int nx, float h) { if (left.type == ocu::BC_PERIODIC) { density[-1] = density[nx-1]; } else if (left.type == ocu::BC_DIRICHELET) { density[-1] = 2 * left.value - density[0]; } else { // (left.type == ocu::BC_NEUMANN) density[-1] = density[0] + h * left.value; } if (right.type == ocu::BC_PERIODIC) { density[nx] = density[1]; } else if (right.type == ocu::BC_DIRICHELET) { density[nx] = 2 * right.value - density[nx-1]; } else { // (right.type == ocu::BC_NEUMANN) density[nx] = density[nx-1] + h * right.value; } } namespace ocu { bool Sol_LaplacianCentered1DDevice::initialize_storage(int nx) { density.init(nx, 1); deriv_densitydt.init(nx, 1); // pad so that memory accesses will be better coalesced _nx = nx; return true; } void Sol_LaplacianCentered1DDevice::apply_boundary_conditions() { dim3 Dg(1); dim3 Db(1); hipLaunchKernelGGL(( Sol_Laplacian1DCentered_apply_boundary_conditions), dim3(Db), dim3(Db), 0, ThreadManager::get_compute_stream(), &density.at(0), left, right, nx(), h()); hipError_t er = hipGetLastError(); if (er != (unsigned int) hipSuccess) { printf("[ERROR] Sol_LaplacianCentered1DDevice::apply_boundary_conditions - CUDA error \"%s\"\n", hipGetErrorString(er)); } } bool Sol_LaplacianCentered1DDevice::solve() { // centered differencing float inv_h2 = coefficient() / (h() * h()); apply_boundary_conditions(); // launch nx+1 threads dim3 Dg((nx()+1+255) / 256); dim3 Db(256); PreKernel(); hipLaunchKernelGGL(( Sol_Laplacian1DCentered_apply_stencil), dim3(Dg), dim3(Db), 0, ThreadManager::get_compute_stream(), inv_h2, &deriv_densitydt.at(0), &density.at(0), nx()); PostKernel("Sol_LaplacianCentered1DDevice::solve"); return !any_error(); } bool Sol_LaplacianCentered1DDeviceNew::initialize_storage(int nx, Grid1DDeviceF *density_val) { density = density_val; if (density->nx() != nx) { printf("[ERROR] Sol_LaplacianCentered1DDeviceNew::initialize_storage - density width %d != %d\n", density->nx(), nx); return false; } deriv_densitydt.init(nx, 1); _nx = nx; return true; } bool Sol_LaplacianCentered1DDeviceNew::solve() { // centered differencing float inv_h2 = coefficient() / (h() * h()); // launch nx+1 threads dim3 Dg((nx()+1+255) / 256); dim3 Db(256); PreKernel(); hipLaunchKernelGGL(( Sol_Laplacian1DCentered_apply_stencil), dim3(Dg), dim3(Db), 0, ThreadManager::get_compute_stream(), inv_h2, &deriv_densitydt.at(0), &density->at(0), nx()); // Sol_Laplacian1DCentered_apply_stencil<<<Dg, Db>>>(inv_h2, &deriv_densitydt.at(0), &density->at(0), nx()); PostKernel("Sol_LaplacianCentered1DDevice::solve"); return !any_error(); } }
f2b23679fc0a5ee975e94481a037e93e412515b7.cu
/* * Copyright 2008-2009 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdio> #include "ocuutil/thread.h" #include "ocuequation/sol_laplaciancent1d.h" //*********************************************************************************** // Kernels (currently must be outside of namespaces) //*********************************************************************************** __global__ void Sol_Laplacian1DCentered_apply_stencil(float inv_h2, float *deriv_densitydt, float *density, int nx) { int i = threadIdx.x + __umul24(blockIdx.x, blockDim.x); i--; // note that density & deriv_densitydt are both shifted so that they point to the "0" element, even though the arrays start // at element -1. hence by offsetting i as above, we will get better coalescing for the cost of an added test if i>=0 if (i>=0 && i < nx) deriv_densitydt[i] = inv_h2 * (density[i-1] - 2.0f * density[i] + density[i+1]); } __global__ void Sol_Laplacian1DCentered_apply_boundary_conditions(float *density, ocu::BoundaryCondition left, ocu::BoundaryCondition right, int nx, float h) { if (left.type == ocu::BC_PERIODIC) { density[-1] = density[nx-1]; } else if (left.type == ocu::BC_DIRICHELET) { density[-1] = 2 * left.value - density[0]; } else { // (left.type == ocu::BC_NEUMANN) density[-1] = density[0] + h * left.value; } if (right.type == ocu::BC_PERIODIC) { density[nx] = density[1]; } else if (right.type == ocu::BC_DIRICHELET) { density[nx] = 2 * right.value - density[nx-1]; } else { // (right.type == ocu::BC_NEUMANN) density[nx] = density[nx-1] + h * right.value; } } namespace ocu { bool Sol_LaplacianCentered1DDevice::initialize_storage(int nx) { density.init(nx, 1); deriv_densitydt.init(nx, 1); // pad so that memory accesses will be better coalesced _nx = nx; return true; } void Sol_LaplacianCentered1DDevice::apply_boundary_conditions() { dim3 Dg(1); dim3 Db(1); Sol_Laplacian1DCentered_apply_boundary_conditions<<<Db, Db, 0, ThreadManager::get_compute_stream()>>>(&density.at(0), left, right, nx(), h()); cudaError_t er = cudaGetLastError(); if (er != (unsigned int) CUDA_SUCCESS) { printf("[ERROR] Sol_LaplacianCentered1DDevice::apply_boundary_conditions - CUDA error \"%s\"\n", cudaGetErrorString(er)); } } bool Sol_LaplacianCentered1DDevice::solve() { // centered differencing float inv_h2 = coefficient() / (h() * h()); apply_boundary_conditions(); // launch nx+1 threads dim3 Dg((nx()+1+255) / 256); dim3 Db(256); PreKernel(); Sol_Laplacian1DCentered_apply_stencil<<<Dg, Db, 0, ThreadManager::get_compute_stream()>>>(inv_h2, &deriv_densitydt.at(0), &density.at(0), nx()); PostKernel("Sol_LaplacianCentered1DDevice::solve"); return !any_error(); } bool Sol_LaplacianCentered1DDeviceNew::initialize_storage(int nx, Grid1DDeviceF *density_val) { density = density_val; if (density->nx() != nx) { printf("[ERROR] Sol_LaplacianCentered1DDeviceNew::initialize_storage - density width %d != %d\n", density->nx(), nx); return false; } deriv_densitydt.init(nx, 1); _nx = nx; return true; } bool Sol_LaplacianCentered1DDeviceNew::solve() { // centered differencing float inv_h2 = coefficient() / (h() * h()); // launch nx+1 threads dim3 Dg((nx()+1+255) / 256); dim3 Db(256); PreKernel(); Sol_Laplacian1DCentered_apply_stencil<<<Dg, Db, 0, ThreadManager::get_compute_stream()>>>(inv_h2, &deriv_densitydt.at(0), &density->at(0), nx()); // Sol_Laplacian1DCentered_apply_stencil<<<Dg, Db>>>(inv_h2, &deriv_densitydt.at(0), &density->at(0), nx()); PostKernel("Sol_LaplacianCentered1DDevice::solve"); return !any_error(); } }
02c05b9c20ef0378cd3a412006d6d252ac0543fe.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _SUBTRACT_KERNEL_H_ #define _SUBTRACT_KERNEL_H_ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> template <typename T> __global__ void subtractKernel(T * a, T * b, T * c, int n){ int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n){ c[index] = a[index] - b[index]; } } /* Wrapper function for subtractKernel n - array size */ template <typename T> void subtract(T * a, T * b, T * c, unsigned int n, unsigned int threadsPerBlock){ dim3 grid((int) ceil(n/(float)threadsPerBlock), 1, 1); dim3 block(threadsPerBlock, 1, 1); //copy a & b to device memory & allocate memory for d_out hipLaunchKernelGGL(( subtractKernel<T>), dim3(grid), dim3(block), 0, 0, a, b, c, n); //check if launch was successful hipError_t cudaerr = hipDeviceSynchronize(); if (cudaerr != hipSuccess) printf("add kernel launch failed with error \"%s\".\n", hipGetErrorString(cudaerr)); //copy output of kernel from device memory to host memory } // Initialize templates for float, double and int template void subtract<float>(float * a, float * b, float * c, unsigned int n, unsigned int threadsPerBlock); template void subtract<double>(double * a, double * b, double * c, unsigned int n, unsigned int threadsPerBlock); template void subtract<int>(int * a, int * b, int * c, unsigned int n, unsigned int threadsPerBlock); #endif
02c05b9c20ef0378cd3a412006d6d252ac0543fe.cu
#ifndef _SUBTRACT_KERNEL_H_ #define _SUBTRACT_KERNEL_H_ #include <cuda_runtime.h> #include <cuda.h> #include <stdio.h> template <typename T> __global__ void subtractKernel(T * a, T * b, T * c, int n){ int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n){ c[index] = a[index] - b[index]; } } /* Wrapper function for subtractKernel n - array size */ template <typename T> void subtract(T * a, T * b, T * c, unsigned int n, unsigned int threadsPerBlock){ dim3 grid((int) ceil(n/(float)threadsPerBlock), 1, 1); dim3 block(threadsPerBlock, 1, 1); //copy a & b to device memory & allocate memory for d_out subtractKernel<T><<<grid, block>>>(a, b, c, n); //check if launch was successful cudaError_t cudaerr = cudaDeviceSynchronize(); if (cudaerr != CUDA_SUCCESS) printf("add kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); //copy output of kernel from device memory to host memory } // Initialize templates for float, double and int template void subtract<float>(float * a, float * b, float * c, unsigned int n, unsigned int threadsPerBlock); template void subtract<double>(double * a, double * b, double * c, unsigned int n, unsigned int threadsPerBlock); template void subtract<int>(int * a, int * b, int * c, unsigned int n, unsigned int threadsPerBlock); #endif
f009875e7057a03ffb4d2eaaedb521d2e72b69d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // CUDA przykad (c) Andrzej ukaszewski 2010 // Dodawanie macierzy na GPU: kompilacja: nvcc addmat.cu #include <cstdio> __global__ void AddMatrixKernel1(float *A, float *B, float *C, int N) { int adres = threadIdx.x + N * blockIdx.x; C[adres] = A[adres] + B[adres]; } void GPUMatrixAdd(float *A, float *B, float *C, int N) { int size = N*N*sizeof(float); float *Ad, *Bd, *Cd; // macierze na GPU hipMalloc(&Ad, size); hipMemcpy(Ad, A, size, hipMemcpyHostToDevice); hipMalloc(&Bd, size); hipMemcpy(Bd, B, size, hipMemcpyHostToDevice); hipMalloc(&Cd, size); // Wywoanie jdra np.: hipLaunchKernelGGL(( AddMatrixKernel1), dim3(N),dim3(N), 0, 0, Ad,Bd,Cd,N); // N blokw po N wtkw hipMemcpy(C, Cd, size, hipMemcpyDeviceToHost); hipFree(Ad); hipFree(Bd); hipFree(Cd); } int main() { float A[4]={ 1., 1., 1., 1. }; float B[4]={ 2., 3., 4., 5. }; float C[4]; printf("A[1]=%f\n", A[1]); printf("B[1]=%f\n", B[1]); GPUMatrixAdd(A,B,C,2); printf("C[1]=%f\n", C[1]); return 0; }
f009875e7057a03ffb4d2eaaedb521d2e72b69d9.cu
// CUDA przykład (c) Andrzej Łukaszewski 2010 // Dodawanie macierzy na GPU: kompilacja: nvcc addmat.cu #include <cstdio> __global__ void AddMatrixKernel1(float *A, float *B, float *C, int N) { int adres = threadIdx.x + N * blockIdx.x; C[adres] = A[adres] + B[adres]; } void GPUMatrixAdd(float *A, float *B, float *C, int N) { int size = N*N*sizeof(float); float *Ad, *Bd, *Cd; // macierze na GPU cudaMalloc(&Ad, size); cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice); cudaMalloc(&Bd, size); cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice); cudaMalloc(&Cd, size); // Wywołanie jądra np.: AddMatrixKernel1<<<N,N>>>(Ad,Bd,Cd,N); // N bloków po N wątków cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost); cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); } int main() { float A[4]={ 1., 1., 1., 1. }; float B[4]={ 2., 3., 4., 5. }; float C[4]; printf("A[1]=%f\n", A[1]); printf("B[1]=%f\n", B[1]); GPUMatrixAdd(A,B,C,2); printf("C[1]=%f\n", C[1]); return 0; }
ccaa12a28e2cd3d60ff8165c1b2a0725d24ad6d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Kernel.h" #include <stdio.h> #include <math.h> #include <float.h> //---------------------------------------------------------------------------------------------------------- __device__ void Filtering_1D_vertical_shared_dev(float *inputImageKernel, float *outputImagekernel, int imageWidth, int imageHeight, float *pKernel, int iKernelSize, int row, int col) { unsigned int iOffset = row * imageWidth + col; float sum = 0; int iWin = iKernelSize / 2; if (row < iWin) { for (int k = 0; k <= iWin; k++) { sum += inputImageKernel[iOffset + (k + row) * imageWidth] * pKernel[k + iWin]; //reverese of data with the rest of the kernel } for (int k = 1; k <= iWin; k++) { sum += inputImageKernel[iOffset + abs(k - row) * imageWidth] * pKernel[iWin - k]; } } else if (row >= imageHeight - iWin) { for (int k = 0; k <= iWin; k++) { sum += inputImageKernel[iOffset - ((k + (imageHeight - row)) * imageWidth)] * pKernel[k + iWin]; //reverese of data with the rest of the kernel } for (int k = 1; k <= iWin; k++) { sum += inputImageKernel[iOffset - (abs(k - (imageHeight - row)) * imageWidth)] * pKernel[iWin - k]; } } else { for (int k = -iWin; k <= iWin; k++) { int iNewRow = row + k; if (iNewRow >= 0 && iNewRow < imageHeight) sum += inputImageKernel[iOffset + k * imageWidth] * pKernel[k + iWin]; } } outputImagekernel[iOffset] = sum; } //---------------------------------------------------------------------------------------------------------- __device__ void Filtering_1D_horizontal_shared_dev(float *inputImageKernel, float *outputImagekernel, int imageWidth, int imageHeight, float *pKernel, int iKernelSize, int row, int col) { unsigned int iOffset = row * imageWidth + col; float sum = 0; int iWin = iKernelSize / 2; if (col < iWin) { for (int k = 0; k <= iWin; k++) { sum += inputImageKernel[iOffset + (k + col)] * pKernel[k + iWin]; //reverese of data with the rest of the kernel } for (int k = 1; k <= iWin; k++) { sum += inputImageKernel[iOffset + abs(k - col)] * pKernel[iWin - k]; } } else if (col >= imageWidth - iWin) { for (int k = 0; k <= iWin; k++) { sum += inputImageKernel[iOffset - ((k + (imageWidth - col)))] * pKernel[k + iWin]; //reverese of data with the rest of the kernel } for (int k = 1; k <= iWin; k++) { sum += inputImageKernel[iOffset - (abs(k - (imageWidth - col)))] * pKernel[iWin - k]; } } else { for (int k = -iWin; k <= iWin; k++) { int iNewCol = col + k; if (iNewCol >= 0 && iNewCol < imageWidth) sum += inputImageKernel[iOffset + k] * pKernel[k + iWin]; } } outputImagekernel[iOffset] = sum; } //------------------------------------------------------------------- __global__ void Filtering_1D_shared_Kernel(float *inputImageKernel, float *outputImagekernel, int imageWidth, int imageHeight, float *pKernel_g, int iKernelSize, int dir) { // Set row and colum for thread. int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float pKernel[111]; //up to win=5 for (int i = 0; i < iKernelSize; i++) pKernel[i] = pKernel_g[i]; __syncthreads(); //make sure all threads have loaded the kernel in the shared mem if (dir == 0) //horizontal only { Filtering_1D_horizontal_shared_dev(inputImageKernel, outputImagekernel, imageWidth, imageHeight, pKernel, iKernelSize, row, col); } else { Filtering_1D_vertical_shared_dev(inputImageKernel, outputImagekernel, imageWidth, imageHeight, pKernel, iKernelSize, row, col); } } //-------------------------------------------------------------------------------- hipError_t Filtering_1D_shared_Cuda(float *pInData, float *pOutData, int iWidth, int iHeight, float *pKernel, int iKernelSize, int dir) { float *d_InData = 0; float *d_OutData = 0; float *d_pKernel = 0; int iFrameSize = iWidth * iHeight; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. //cudaStatus = hipSetDevice(0); //if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); // goto Error; //} // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&d_InData, iFrameSize * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&d_OutData, iFrameSize * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&d_pKernel, iKernelSize * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(d_InData, pInData, iFrameSize * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } //copy kernel cudaStatus = hipMemcpy(d_pKernel, pKernel, iKernelSize * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } int TILE_SIZE_X = 16; int TILE_SIZE_Y = 16; dim3 dimBlock(TILE_SIZE_X, TILE_SIZE_Y); dim3 dimGrid((int)ceil((float)iWidth / (float)TILE_SIZE_X), (int)ceil((float)iHeight / (float)TILE_SIZE_Y)); // Launch a kernel on the GPU with one thread for each element. Filtering_1D_shared_Kernel << <dimGrid, dimBlock >> > (d_InData, d_OutData, iWidth, iHeight, d_pKernel, iKernelSize, dir); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(pOutData, d_OutData, iFrameSize * sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(d_InData); hipFree(d_OutData); hipFree(d_pKernel); return cudaStatus; }
ccaa12a28e2cd3d60ff8165c1b2a0725d24ad6d4.cu
#include "Kernel.h" #include <stdio.h> #include <math.h> #include <float.h> //---------------------------------------------------------------------------------------------------------- __device__ void Filtering_1D_vertical_shared_dev(float *inputImageKernel, float *outputImagekernel, int imageWidth, int imageHeight, float *pKernel, int iKernelSize, int row, int col) { unsigned int iOffset = row * imageWidth + col; float sum = 0; int iWin = iKernelSize / 2; if (row < iWin) { for (int k = 0; k <= iWin; k++) { sum += inputImageKernel[iOffset + (k + row) * imageWidth] * pKernel[k + iWin]; //reverese of data with the rest of the kernel } for (int k = 1; k <= iWin; k++) { sum += inputImageKernel[iOffset + abs(k - row) * imageWidth] * pKernel[iWin - k]; } } else if (row >= imageHeight - iWin) { for (int k = 0; k <= iWin; k++) { sum += inputImageKernel[iOffset - ((k + (imageHeight - row)) * imageWidth)] * pKernel[k + iWin]; //reverese of data with the rest of the kernel } for (int k = 1; k <= iWin; k++) { sum += inputImageKernel[iOffset - (abs(k - (imageHeight - row)) * imageWidth)] * pKernel[iWin - k]; } } else { for (int k = -iWin; k <= iWin; k++) { int iNewRow = row + k; if (iNewRow >= 0 && iNewRow < imageHeight) sum += inputImageKernel[iOffset + k * imageWidth] * pKernel[k + iWin]; } } outputImagekernel[iOffset] = sum; } //---------------------------------------------------------------------------------------------------------- __device__ void Filtering_1D_horizontal_shared_dev(float *inputImageKernel, float *outputImagekernel, int imageWidth, int imageHeight, float *pKernel, int iKernelSize, int row, int col) { unsigned int iOffset = row * imageWidth + col; float sum = 0; int iWin = iKernelSize / 2; if (col < iWin) { for (int k = 0; k <= iWin; k++) { sum += inputImageKernel[iOffset + (k + col)] * pKernel[k + iWin]; //reverese of data with the rest of the kernel } for (int k = 1; k <= iWin; k++) { sum += inputImageKernel[iOffset + abs(k - col)] * pKernel[iWin - k]; } } else if (col >= imageWidth - iWin) { for (int k = 0; k <= iWin; k++) { sum += inputImageKernel[iOffset - ((k + (imageWidth - col)))] * pKernel[k + iWin]; //reverese of data with the rest of the kernel } for (int k = 1; k <= iWin; k++) { sum += inputImageKernel[iOffset - (abs(k - (imageWidth - col)))] * pKernel[iWin - k]; } } else { for (int k = -iWin; k <= iWin; k++) { int iNewCol = col + k; if (iNewCol >= 0 && iNewCol < imageWidth) sum += inputImageKernel[iOffset + k] * pKernel[k + iWin]; } } outputImagekernel[iOffset] = sum; } //------------------------------------------------------------------- __global__ void Filtering_1D_shared_Kernel(float *inputImageKernel, float *outputImagekernel, int imageWidth, int imageHeight, float *pKernel_g, int iKernelSize, int dir) { // Set row and colum for thread. int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float pKernel[111]; //up to win=5 for (int i = 0; i < iKernelSize; i++) pKernel[i] = pKernel_g[i]; __syncthreads(); //make sure all threads have loaded the kernel in the shared mem if (dir == 0) //horizontal only { Filtering_1D_horizontal_shared_dev(inputImageKernel, outputImagekernel, imageWidth, imageHeight, pKernel, iKernelSize, row, col); } else { Filtering_1D_vertical_shared_dev(inputImageKernel, outputImagekernel, imageWidth, imageHeight, pKernel, iKernelSize, row, col); } } //-------------------------------------------------------------------------------- cudaError_t Filtering_1D_shared_Cuda(float *pInData, float *pOutData, int iWidth, int iHeight, float *pKernel, int iKernelSize, int dir) { float *d_InData = 0; float *d_OutData = 0; float *d_pKernel = 0; int iFrameSize = iWidth * iHeight; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. //cudaStatus = cudaSetDevice(0); //if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); // goto Error; //} // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&d_InData, iFrameSize * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&d_OutData, iFrameSize * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&d_pKernel, iKernelSize * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(d_InData, pInData, iFrameSize * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } //copy kernel cudaStatus = cudaMemcpy(d_pKernel, pKernel, iKernelSize * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } int TILE_SIZE_X = 16; int TILE_SIZE_Y = 16; dim3 dimBlock(TILE_SIZE_X, TILE_SIZE_Y); dim3 dimGrid((int)ceil((float)iWidth / (float)TILE_SIZE_X), (int)ceil((float)iHeight / (float)TILE_SIZE_Y)); // Launch a kernel on the GPU with one thread for each element. Filtering_1D_shared_Kernel << <dimGrid, dimBlock >> > (d_InData, d_OutData, iWidth, iHeight, d_pKernel, iKernelSize, dir); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(pOutData, d_OutData, iFrameSize * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(d_InData); cudaFree(d_OutData); cudaFree(d_pKernel); return cudaStatus; }
387f60955e57aa57db9c149d3b37f21b45ed6123.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_code.cuh" #include "../../third_party/others/OpenGlCudaHelper.h" #include <algorithm> __global__ void gol_step ( const ubyte_ptr m_data, ubyte_ptr m_data_out, const unsigned matrix_height, const unsigned matrix_width ) { unsigned n_size = matrix_height * matrix_width; for (unsigned cell_id = blockIdx.x * blockDim.x + threadIdx.x; cell_id < n_size; cell_id += blockDim.x * gridDim.x) { uint x1 = cell_id % matrix_width; uint y1 = cell_id - x1; // position 0 to matrix y1 as array uint x0 = (x1 + matrix_width - 1) % matrix_width; // left uint x2 = (x1 + 1) % matrix_width; // right uint y0 = (y1 + n_size - matrix_width) % n_size; // up uint y2 = (y1 + matrix_width) % n_size; // down // Count alive cells. uint alive_cells = m_data[x0 + y0] + m_data[x1 + y0] + m_data[x2 + y0] + m_data[x0 + y1] + 0 + m_data[x2 + y1] + m_data[x0 + y2] + m_data[x1 + y2] + m_data[x2 + y2]; m_data_out[x1 + y1] = ( alive_cells == 3 || alive_cells == 6 || (alive_cells == 2 && m_data[x1 + y1]) ) ? 1 : 0; } } bool gol_iterate ( ubyte_ptr& m_data, ubyte_ptr& m_data_out, const size_t matrix_height, const size_t matrix_width, const size_t iterations, const ushort threads ) { //if ((matrix_height * matrix_width) % threads != 0) // return false; size_t required_blocks = (matrix_height * matrix_width) / threads; ushort blocks = (ushort) ::min((size_t)32768, required_blocks); for (size_t i = 0; i < iterations; ++i) { hipLaunchKernelGGL(( gol_step), dim3(blocks), dim3(threads), 0, 0, m_data, m_data_out, uint(matrix_height), uint(matrix_width)); std::swap(m_data, m_data_out); } mf::checkCudaErrors(hipDeviceSynchronize()); return true; }
387f60955e57aa57db9c149d3b37f21b45ed6123.cu
#include "cuda_code.cuh" #include "../../third_party/others/OpenGlCudaHelper.h" #include <algorithm> __global__ void gol_step ( const ubyte_ptr m_data, ubyte_ptr m_data_out, const unsigned matrix_height, const unsigned matrix_width ) { unsigned n_size = matrix_height * matrix_width; for (unsigned cell_id = blockIdx.x * blockDim.x + threadIdx.x; cell_id < n_size; cell_id += blockDim.x * gridDim.x) { uint x1 = cell_id % matrix_width; uint y1 = cell_id - x1; // position 0 to matrix y1 as array uint x0 = (x1 + matrix_width - 1) % matrix_width; // left uint x2 = (x1 + 1) % matrix_width; // right uint y0 = (y1 + n_size - matrix_width) % n_size; // up uint y2 = (y1 + matrix_width) % n_size; // down // Count alive cells. uint alive_cells = m_data[x0 + y0] + m_data[x1 + y0] + m_data[x2 + y0] + m_data[x0 + y1] + 0 + m_data[x2 + y1] + m_data[x0 + y2] + m_data[x1 + y2] + m_data[x2 + y2]; m_data_out[x1 + y1] = ( alive_cells == 3 || alive_cells == 6 || (alive_cells == 2 && m_data[x1 + y1]) ) ? 1 : 0; } } bool gol_iterate ( ubyte_ptr& m_data, ubyte_ptr& m_data_out, const size_t matrix_height, const size_t matrix_width, const size_t iterations, const ushort threads ) { //if ((matrix_height * matrix_width) % threads != 0) // return false; size_t required_blocks = (matrix_height * matrix_width) / threads; ushort blocks = (ushort) std::min((size_t)32768, required_blocks); for (size_t i = 0; i < iterations; ++i) { gol_step<<<blocks, threads>>> (m_data, m_data_out, uint(matrix_height), uint(matrix_width)); std::swap(m_data, m_data_out); } mf::checkCudaErrors(cudaDeviceSynchronize()); return true; }
d79028da1f80ea4e680021d1b81fbf901a8ed6e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <stdlib.h> //for rand(),malloc(),free() const int WIDTH = 1024; // total width is 1024*1024 const int TILE_WIDTH = 32; //block will be(TILE_WIDTH,TILE_WIDTH) constexpr int GRID_WIDTH = 128;//(WIDTH / TILE_WIDTH); //grid will be (GRID_WIDTH,GRID_WIDTH) __global__ void matmaul(float*c, const float*a, const float*b, const int width){ int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0F; for(register int k = 0; k < width; ++k){ float lhs = a[y * width + k]; float rhs = b[k * width + x]; sum += lhs * rhs; } c[y * width + x] = sum; } void genData(float* ptr, unsigned int size){ for(int i = 0 ; i < size; i++){ *ptr++ =(float)(rand()%1000) / 1000.0F; } } int main(void){ printf("23"); float** a = new float*[WIDTH]; float** b = new float*[WIDTH]; float** c = new float*[WIDTH]; printf("23"); for(int i = 0; i < WIDTH; ++i){ a[i] = new float[WIDTH]; b[i] = new float[WIDTH]; c[i] = new float[WIDTH]; } printf("23"); //generate source data genData(&(a[0][0]),WIDTH * WIDTH); genData(&(b[0][0]),WIDTH * WIDTH); //device-side data float* dev_a = 0; float* dev_b = 0; float* dev_c = 0; printf("23"); //allocate device memory hipMalloc((void**)&dev_a,WIDTH*WIDTH*sizeof(float)); hipMalloc((void**)&dev_b,WIDTH*WIDTH*sizeof(float)); hipMalloc((void**)&dev_c,WIDTH*WIDTH*sizeof(float)); //copy from host to device hipMemcpy(dev_a, a, WIDTH*WIDTH*sizeof(float), hipMemcpyHostToDevice); // dev_a = a; hipMemcpy(dev_b, b, WIDTH*WIDTH*sizeof(float), hipMemcpyHostToDevice); // dev_a = a; //CUDA:launch the kernel dim3 dimGrid(GRID_WIDTH, GRID_WIDTH, 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); hipLaunchKernelGGL(( matmaul), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_c,dev_a,dev_b,WIDTH); // copy from device to host hipMemcpy(c, dev_c, WIDTH * WIDTH * sizeof(float),hipMemcpyDeviceToHost); // c = dev_c; //free device memory hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); //print the result for(int y = 0; y < WIDTH; ++y){ for(int x = 0; x < WIDTH; ++x){ printf("%5f ", c[y][x]); } printf("\n"); } return 0; }
d79028da1f80ea4e680021d1b81fbf901a8ed6e6.cu
#include <cstdio> #include <stdlib.h> //for rand(),malloc(),free() const int WIDTH = 1024; // total width is 1024*1024 const int TILE_WIDTH = 32; //block will be(TILE_WIDTH,TILE_WIDTH) constexpr int GRID_WIDTH = 128;//(WIDTH / TILE_WIDTH); //grid will be (GRID_WIDTH,GRID_WIDTH) __global__ void matmaul(float*c, const float*a, const float*b, const int width){ int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0F; for(register int k = 0; k < width; ++k){ float lhs = a[y * width + k]; float rhs = b[k * width + x]; sum += lhs * rhs; } c[y * width + x] = sum; } void genData(float* ptr, unsigned int size){ for(int i = 0 ; i < size; i++){ *ptr++ =(float)(rand()%1000) / 1000.0F; } } int main(void){ printf("23"); float** a = new float*[WIDTH]; float** b = new float*[WIDTH]; float** c = new float*[WIDTH]; printf("23"); for(int i = 0; i < WIDTH; ++i){ a[i] = new float[WIDTH]; b[i] = new float[WIDTH]; c[i] = new float[WIDTH]; } printf("23"); //generate source data genData(&(a[0][0]),WIDTH * WIDTH); genData(&(b[0][0]),WIDTH * WIDTH); //device-side data float* dev_a = 0; float* dev_b = 0; float* dev_c = 0; printf("23"); //allocate device memory cudaMalloc((void**)&dev_a,WIDTH*WIDTH*sizeof(float)); cudaMalloc((void**)&dev_b,WIDTH*WIDTH*sizeof(float)); cudaMalloc((void**)&dev_c,WIDTH*WIDTH*sizeof(float)); //copy from host to device cudaMemcpy(dev_a, a, WIDTH*WIDTH*sizeof(float), cudaMemcpyHostToDevice); // dev_a = a; cudaMemcpy(dev_b, b, WIDTH*WIDTH*sizeof(float), cudaMemcpyHostToDevice); // dev_a = a; //CUDA:launch the kernel dim3 dimGrid(GRID_WIDTH, GRID_WIDTH, 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); matmaul<<<dimGrid,dimBlock>>>(dev_c,dev_a,dev_b,WIDTH); // copy from device to host cudaMemcpy(c, dev_c, WIDTH * WIDTH * sizeof(float),cudaMemcpyDeviceToHost); // c = dev_c; //free device memory cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); //print the result for(int y = 0; y < WIDTH; ++y){ for(int x = 0; x < WIDTH; ++x){ printf("%5f ", c[y][x]); } printf("\n"); } return 0; }
9aa9bde1c7abbf19127d0ec1bf6a8c46ee05d042.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "shared.h" #include "block_size.h" void cuda_last_error_check (const char *message); // Add rows kernel & related operations __global__ void add_rows_gpu_kernel(float* mat, float* out, int n, int m); void add_rows_gpu(float* rowsum, float* mat1d, int n, int m, struct Timer* timer); // Add columns kernel & related operations __global__ void add_cols_gpu_kernel(float* mat, float* out, int n, int m); void add_columns_gpu(float* rowsum, float* mat1d, int n, int m, struct Timer* timer); // Reduce vector kernel & related operations __global__ void reduce_vector_gpu_kernel(float* vec, float* result, int n); void reduce_vector_gpu(float* vec, float* result, int n, struct Timer* timer); extern struct Options options; // Global config var void perform_gpu_operations(float* mat1d, struct Stats* stats) { int n = options.rows; int m = options.cols; float* rowsum = (float*) malloc(n*sizeof(float)); add_rows_gpu(rowsum, mat1d, n, m, &(stats->add_rows)); float* colsum = (float*) malloc(n*sizeof(float)); add_columns_gpu(colsum, mat1d, n, m, &(stats->add_columns)); float rowsum_reduced; reduce_vector_gpu(rowsum, &rowsum_reduced, n, &(stats->reduce_vector_rows)); float colsum_reduced; reduce_vector_gpu(colsum, &colsum_reduced, m, &(stats->reduce_vector_cols)); print_compute_results((char*) "GPU Results:", rowsum, colsum, rowsum_reduced, colsum_reduced, n, m); // Free memory free(rowsum); free(colsum); } void add_rows_gpu(float* rowsum, float* mat1d, int n, int m, struct Timer* timer) { // Compute execution GPU config dim3 dimBlock(BLOCK_SIZE, 1); int blocks_in_grid = (int) ceil((double) n / BLOCK_SIZE); dim3 dimGrid(blocks_in_grid, 1); // Device: alloc float* mat1d_GPU; float* rowsum_GPU; hipMalloc((void**) &mat1d_GPU, n*m*sizeof(float)); hipMalloc((void**) &rowsum_GPU, n*sizeof(float)); // Host->Device copy hipMemcpy(mat1d_GPU, mat1d, n*m*sizeof(float), hipMemcpyHostToDevice); // Device: execution + timing start_timer(timer); hipLaunchKernelGGL(( add_rows_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, mat1d_GPU, rowsum_GPU, n, m); end_timer(timer); cuda_last_error_check("add_rows_gpu"); // Device->Host copy hipMemcpy(rowsum, rowsum_GPU, n*sizeof(float), hipMemcpyDeviceToHost); hipFree(mat1d_GPU); hipFree(rowsum_GPU); } void add_columns_gpu(float* colsum, float* mat1d, int n, int m, struct Timer* timer) { // Compute execution GPU config dim3 dimBlock(1, BLOCK_SIZE); int blocks_in_grid = (int) ceil((double) n / BLOCK_SIZE); dim3 dimGrid(blocks_in_grid, 1); // Device: alloc float* mat1d_GPU; float* colsum_GPU; hipMalloc((void**) &mat1d_GPU, n*m*sizeof(float)); hipMalloc((void**) &colsum_GPU, m*sizeof(float)); // Host->Device copy hipMemcpy(mat1d_GPU, mat1d, n*m*sizeof(float), hipMemcpyHostToDevice); // Device: execution + timing start_timer(timer); hipLaunchKernelGGL(( add_cols_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, mat1d_GPU, colsum_GPU, n, m); end_timer(timer); cuda_last_error_check("add_columns_gpu"); // Device->Host copy hipMemcpy(colsum, colsum_GPU, m*sizeof(float), hipMemcpyDeviceToHost); hipFree(mat1d_GPU); hipFree(colsum_GPU); } void reduce_vector_gpu(float* vec, float* result, int n, struct Timer* timer) { // Compute execution GPU config dim3 dimBlock(1, 1); int blocks_in_grid = (int) ceil((double) n / BLOCK_SIZE); dim3 dimGrid(blocks_in_grid, 1); // Device: alloc float* vec_GPU; float* result_GPU; hipMalloc((void**) &vec_GPU, n*sizeof(float)); hipMalloc((void**) &result_GPU, sizeof(float)); // Host->Device copy hipMemcpy(vec_GPU, vec, n*sizeof(float), hipMemcpyHostToDevice); // Device: execution + timing start_timer(timer); hipLaunchKernelGGL(( reduce_vector_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, vec_GPU, result_GPU, n); end_timer(timer); cuda_last_error_check("reduce_vector_gpu"); // Device->Host copy hipMemcpy(vec, vec_GPU, n*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(result, result_GPU, sizeof(float), hipMemcpyDeviceToHost); hipFree(result_GPU); } // Kernels __global__ void add_rows_gpu_kernel(float* mat, float* out, int n, int m) { int x = blockIdx.x * BLOCK_SIZE + threadIdx.x; int y = threadIdx.y; if (x < n && y == 0) { // Only 0th thread in the y dimension is used out[x] = 0; for (int i = 0; i < m; i++) { out[x] += mat[i+(x*m)]; } } } __global__ void add_cols_gpu_kernel(float* mat, float* out, int n, int m) { int x = threadIdx.x; int y = blockIdx.x * BLOCK_SIZE + threadIdx.y; if (y < m && x == 0) { // Only 0th thread in the x dimension is used out[y] = 0; for (int i = 0; i < n; i++) { out[y] += mat[(i*m)+y]; } } } __global__ void reduce_vector_gpu_kernel(float* vec, float* result, int n) { int x = threadIdx.x; int y = threadIdx.y; if (x == 0 && y == 0) { // Only 1 thread used *result = 0; for (int i = 0; i < n; i++) { *result += vec[i]; } } } // Cuda error check util void cuda_last_error_check (const char *message) { hipError_t err = hipGetLastError(); if(hipSuccess != err) { printf("[CUDA] [ERROR] %s: %s\n", message, hipGetErrorString(err)); exit(EXIT_FAILURE); } }
9aa9bde1c7abbf19127d0ec1bf6a8c46ee05d042.cu
#include <stdio.h> #include "shared.h" #include "block_size.h" void cuda_last_error_check (const char *message); // Add rows kernel & related operations __global__ void add_rows_gpu_kernel(float* mat, float* out, int n, int m); void add_rows_gpu(float* rowsum, float* mat1d, int n, int m, struct Timer* timer); // Add columns kernel & related operations __global__ void add_cols_gpu_kernel(float* mat, float* out, int n, int m); void add_columns_gpu(float* rowsum, float* mat1d, int n, int m, struct Timer* timer); // Reduce vector kernel & related operations __global__ void reduce_vector_gpu_kernel(float* vec, float* result, int n); void reduce_vector_gpu(float* vec, float* result, int n, struct Timer* timer); extern struct Options options; // Global config var void perform_gpu_operations(float* mat1d, struct Stats* stats) { int n = options.rows; int m = options.cols; float* rowsum = (float*) malloc(n*sizeof(float)); add_rows_gpu(rowsum, mat1d, n, m, &(stats->add_rows)); float* colsum = (float*) malloc(n*sizeof(float)); add_columns_gpu(colsum, mat1d, n, m, &(stats->add_columns)); float rowsum_reduced; reduce_vector_gpu(rowsum, &rowsum_reduced, n, &(stats->reduce_vector_rows)); float colsum_reduced; reduce_vector_gpu(colsum, &colsum_reduced, m, &(stats->reduce_vector_cols)); print_compute_results((char*) "GPU Results:", rowsum, colsum, rowsum_reduced, colsum_reduced, n, m); // Free memory free(rowsum); free(colsum); } void add_rows_gpu(float* rowsum, float* mat1d, int n, int m, struct Timer* timer) { // Compute execution GPU config dim3 dimBlock(BLOCK_SIZE, 1); int blocks_in_grid = (int) ceil((double) n / BLOCK_SIZE); dim3 dimGrid(blocks_in_grid, 1); // Device: alloc float* mat1d_GPU; float* rowsum_GPU; cudaMalloc((void**) &mat1d_GPU, n*m*sizeof(float)); cudaMalloc((void**) &rowsum_GPU, n*sizeof(float)); // Host->Device copy cudaMemcpy(mat1d_GPU, mat1d, n*m*sizeof(float), cudaMemcpyHostToDevice); // Device: execution + timing start_timer(timer); add_rows_gpu_kernel<<<dimGrid, dimBlock>>>(mat1d_GPU, rowsum_GPU, n, m); end_timer(timer); cuda_last_error_check("add_rows_gpu"); // Device->Host copy cudaMemcpy(rowsum, rowsum_GPU, n*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(mat1d_GPU); cudaFree(rowsum_GPU); } void add_columns_gpu(float* colsum, float* mat1d, int n, int m, struct Timer* timer) { // Compute execution GPU config dim3 dimBlock(1, BLOCK_SIZE); int blocks_in_grid = (int) ceil((double) n / BLOCK_SIZE); dim3 dimGrid(blocks_in_grid, 1); // Device: alloc float* mat1d_GPU; float* colsum_GPU; cudaMalloc((void**) &mat1d_GPU, n*m*sizeof(float)); cudaMalloc((void**) &colsum_GPU, m*sizeof(float)); // Host->Device copy cudaMemcpy(mat1d_GPU, mat1d, n*m*sizeof(float), cudaMemcpyHostToDevice); // Device: execution + timing start_timer(timer); add_cols_gpu_kernel<<<dimGrid, dimBlock>>>(mat1d_GPU, colsum_GPU, n, m); end_timer(timer); cuda_last_error_check("add_columns_gpu"); // Device->Host copy cudaMemcpy(colsum, colsum_GPU, m*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(mat1d_GPU); cudaFree(colsum_GPU); } void reduce_vector_gpu(float* vec, float* result, int n, struct Timer* timer) { // Compute execution GPU config dim3 dimBlock(1, 1); int blocks_in_grid = (int) ceil((double) n / BLOCK_SIZE); dim3 dimGrid(blocks_in_grid, 1); // Device: alloc float* vec_GPU; float* result_GPU; cudaMalloc((void**) &vec_GPU, n*sizeof(float)); cudaMalloc((void**) &result_GPU, sizeof(float)); // Host->Device copy cudaMemcpy(vec_GPU, vec, n*sizeof(float), cudaMemcpyHostToDevice); // Device: execution + timing start_timer(timer); reduce_vector_gpu_kernel<<<dimGrid, dimBlock>>>(vec_GPU, result_GPU, n); end_timer(timer); cuda_last_error_check("reduce_vector_gpu"); // Device->Host copy cudaMemcpy(vec, vec_GPU, n*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(result, result_GPU, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(result_GPU); } // Kernels __global__ void add_rows_gpu_kernel(float* mat, float* out, int n, int m) { int x = blockIdx.x * BLOCK_SIZE + threadIdx.x; int y = threadIdx.y; if (x < n && y == 0) { // Only 0th thread in the y dimension is used out[x] = 0; for (int i = 0; i < m; i++) { out[x] += mat[i+(x*m)]; } } } __global__ void add_cols_gpu_kernel(float* mat, float* out, int n, int m) { int x = threadIdx.x; int y = blockIdx.x * BLOCK_SIZE + threadIdx.y; if (y < m && x == 0) { // Only 0th thread in the x dimension is used out[y] = 0; for (int i = 0; i < n; i++) { out[y] += mat[(i*m)+y]; } } } __global__ void reduce_vector_gpu_kernel(float* vec, float* result, int n) { int x = threadIdx.x; int y = threadIdx.y; if (x == 0 && y == 0) { // Only 1 thread used *result = 0; for (int i = 0; i < n; i++) { *result += vec[i]; } } } // Cuda error check util void cuda_last_error_check (const char *message) { cudaError_t err = cudaGetLastError(); if(cudaSuccess != err) { printf("[CUDA] [ERROR] %s: %s\n", message, cudaGetErrorString(err)); exit(EXIT_FAILURE); } }
cd9818ab2581e118083bc99024249661b35dae03.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*========================================================================== SHA1 KERNEL * Copyright (c) 2008, NetSysLab at the University of British Columbia * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the University nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY NetSysLab ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NetSysLab BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. DESCRIPTION CPU version of the storeGPU library. ==========================================================================*/ /*========================================================================== INCLUDES ==========================================================================*/ #include <string.h> #include <stdio.h> #include "cust.h" /*========================================================================== DATA DECLARATIONS ==========================================================================*/ /*-------------------------------------------------------------------------- TYPE DEFINITIONS --------------------------------------------------------------------------*/ typedef struct { unsigned long total[2]; /*!< number of bytes processed */ unsigned long state[5]; /*!< intermediate digest state */ unsigned char buffer[64]; /*!< data block being processed */ } sha1_context; /*-------------------------------------------------------------------------- FUNCTION PROTOTYPES --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- CONSTANTS --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- GLOBAL VARIABLES --------------------------------------------------------------------------*/ __device__ static const unsigned char sha1_padding[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /*-------------------------------------------------------------------------- MACROS --------------------------------------------------------------------------*/ #ifndef _CRT_SECURE_NO_DEPRECATE #define _CRT_SECURE_NO_DEPRECATE 1 #endif /* * 32-bit integer manipulation macros (big endian) */ #ifndef GET_UINT32_BE #define GET_UINT32_BE(n,b,i) \ { \ (n) = ( (unsigned long) (b)[(i) ] << 24 ) \ | ( (unsigned long) (b)[(i) + 1] << 16 ) \ | ( (unsigned long) (b)[(i) + 2] << 8 ) \ | ( (unsigned long) (b)[(i) + 3] ); \ } #endif #ifndef PUT_UINT32_BE #define PUT_UINT32_BE(n,b,i) \ { \ (b)[(i) ] = (unsigned char) ( (n) >> 24 ); \ (b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \ (b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \ (b)[(i) + 3] = (unsigned char) ( (n) ); \ } #endif #ifdef FEATURE_SHARED_MEMORY // current thread stride. #undef SHARED_MEMORY_INDEX #define SHARED_MEMORY_INDEX(index) (32 * (index) + (threadIdx.x & 0x1F)) #endif /* FEATURE_SHARED_MEMORY */ /*-------------------------------------------------------------------------- LOCAL FUNCTIONS --------------------------------------------------------------------------*/ #ifndef FEATURE_SHARED_MEMORY /* * SHA-1 context setup */ /*=========================================================================== FUNCTION SHA1_GPU_STARTS DESCRIPTION SHA-1 context setup DEPENDENCIES None RETURN VALUE None ===========================================================================*/ __device__ void sha1_starts( sha1_context *ctx ) { ctx->total[0] = 0; ctx->total[1] = 0; ctx->state[0] = 0x67452301; ctx->state[1] = 0xEFCDAB89; ctx->state[2] = 0x98BADCFE; ctx->state[3] = 0x10325476; ctx->state[4] = 0xC3D2E1F0; } /*=========================================================================== FUNCTION SHA1_GPU_PROCESS DESCRIPTION SHA1 process buffer DEPENDENCIES None RETURN VALUE None ===========================================================================*/ __device__ void sha1_process( sha1_context *ctx, unsigned char data[64] ) { unsigned long temp, W[16], A, B, C, D, E; GET_UINT32_BE( W[ 0], data, 0 ); GET_UINT32_BE( W[ 1], data, 4 ); GET_UINT32_BE( W[ 2], data, 8 ); GET_UINT32_BE( W[ 3], data, 12 ); GET_UINT32_BE( W[ 4], data, 16 ); GET_UINT32_BE( W[ 5], data, 20 ); GET_UINT32_BE( W[ 6], data, 24 ); GET_UINT32_BE( W[ 7], data, 28 ); GET_UINT32_BE( W[ 8], data, 32 ); GET_UINT32_BE( W[ 9], data, 36 ); GET_UINT32_BE( W[10], data, 40 ); GET_UINT32_BE( W[11], data, 44 ); GET_UINT32_BE( W[12], data, 48 ); GET_UINT32_BE( W[13], data, 52 ); GET_UINT32_BE( W[14], data, 56 ); GET_UINT32_BE( W[15], data, 60 ); #undef S #define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n))) #undef R #define R(t) \ ( \ temp = W[(t - 3) & 0x0F] ^ W[(t - 8) & 0x0F] ^ \ W[(t - 14) & 0x0F] ^ W[ t & 0x0F], \ ( W[t & 0x0F] = S(temp,1) ) \ ) #undef P #define P(a,b,c,d,e,x) \ { \ e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \ } A = ctx->state[0]; B = ctx->state[1]; C = ctx->state[2]; D = ctx->state[3]; E = ctx->state[4]; #define F(x,y,z) (z ^ (x & (y ^ z))) #define K 0x5A827999 P( A, B, C, D, E, W[0] ); P( E, A, B, C, D, W[1] ); P( D, E, A, B, C, W[2] ); P( C, D, E, A, B, W[3] ); P( B, C, D, E, A, W[4] ); P( A, B, C, D, E, W[5] ); P( E, A, B, C, D, W[6] ); P( D, E, A, B, C, W[7] ); P( C, D, E, A, B, W[8] ); P( B, C, D, E, A, W[9] ); P( A, B, C, D, E, W[10] ); P( E, A, B, C, D, W[11] ); P( D, E, A, B, C, W[12] ); P( C, D, E, A, B, W[13] ); P( B, C, D, E, A, W[14] ); P( A, B, C, D, E, W[15] ); P( E, A, B, C, D, R(16) ); P( D, E, A, B, C, R(17) ); P( C, D, E, A, B, R(18) ); P( B, C, D, E, A, R(19) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0x6ED9EBA1 P( A, B, C, D, E, R(20) ); P( E, A, B, C, D, R(21) ); P( D, E, A, B, C, R(22) ); P( C, D, E, A, B, R(23) ); P( B, C, D, E, A, R(24) ); P( A, B, C, D, E, R(25) ); P( E, A, B, C, D, R(26) ); P( D, E, A, B, C, R(27) ); P( C, D, E, A, B, R(28) ); P( B, C, D, E, A, R(29) ); P( A, B, C, D, E, R(30) ); P( E, A, B, C, D, R(31) ); P( D, E, A, B, C, R(32) ); P( C, D, E, A, B, R(33) ); P( B, C, D, E, A, R(34) ); P( A, B, C, D, E, R(35) ); P( E, A, B, C, D, R(36) ); P( D, E, A, B, C, R(37) ); P( C, D, E, A, B, R(38) ); P( B, C, D, E, A, R(39) ); #undef K #undef F #define F(x,y,z) ((x & y) | (z & (x | y))) #define K 0x8F1BBCDC P( A, B, C, D, E, R(40) ); P( E, A, B, C, D, R(41) ); P( D, E, A, B, C, R(42) ); P( C, D, E, A, B, R(43) ); P( B, C, D, E, A, R(44) ); P( A, B, C, D, E, R(45) ); P( E, A, B, C, D, R(46) ); P( D, E, A, B, C, R(47) ); P( C, D, E, A, B, R(48) ); P( B, C, D, E, A, R(49) ); P( A, B, C, D, E, R(50) ); P( E, A, B, C, D, R(51) ); P( D, E, A, B, C, R(52) ); P( C, D, E, A, B, R(53) ); P( B, C, D, E, A, R(54) ); P( A, B, C, D, E, R(55) ); P( E, A, B, C, D, R(56) ); P( D, E, A, B, C, R(57) ); P( C, D, E, A, B, R(58) ); P( B, C, D, E, A, R(59) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0xCA62C1D6 P( A, B, C, D, E, R(60) ); P( E, A, B, C, D, R(61) ); P( D, E, A, B, C, R(62) ); P( C, D, E, A, B, R(63) ); P( B, C, D, E, A, R(64) ); P( A, B, C, D, E, R(65) ); P( E, A, B, C, D, R(66) ); P( D, E, A, B, C, R(67) ); P( C, D, E, A, B, R(68) ); P( B, C, D, E, A, R(69) ); P( A, B, C, D, E, R(70) ); P( E, A, B, C, D, R(71) ); P( D, E, A, B, C, R(72) ); P( C, D, E, A, B, R(73) ); P( B, C, D, E, A, R(74) ); P( A, B, C, D, E, R(75) ); P( E, A, B, C, D, R(76) ); P( D, E, A, B, C, R(77) ); P( C, D, E, A, B, R(78) ); P( B, C, D, E, A, R(79) ); #undef K #undef F ctx->state[0] += A; ctx->state[1] += B; ctx->state[2] += C; ctx->state[3] += D; ctx->state[4] += E; } /*=========================================================================== FUNCTION SHA1_CPU_UPDATE DESCRIPTION SHA1 update buffer DEPENDENCIES None RETURN VALUE None ===========================================================================*/ __device__ void sha1_update( sha1_context *ctx, unsigned char *input, int ilen ) { int fill; unsigned long left; if( ilen <= 0 ) return; left = ctx->total[0] & 0x3F; fill = 64 - left; ctx->total[0] += ilen; ctx->total[0] &= 0xFFFFFFFF; if ( ctx->total[0] < (unsigned long) ilen ) ctx->total[1]++; if ( left && ilen >= fill ) { /*memcpy( (void *) (ctx->buffer + left), (void *) input, fill );*/ for (int i = 0; i < fill; i++) { ctx->buffer[i+left] = input[i]; } sha1_process( ctx, ctx->buffer ); input += fill; ilen -= fill; left = 0; } while ( ilen >= 64 ) { sha1_process( ctx, input ); input += 64; ilen -= 64; } if ( ilen > 0 ) { /*memcpy( (void *) (ctx->buffer + left), (void *) input, ilen );*/ for (int i = 0; i < ilen; i++) { ctx->buffer[i+left] = input[i]; } } } /*=========================================================================== FUNCTION SHA1_CPU_FINISH DESCRIPTION SHA1 final digest DEPENDENCIES None RETURN VALUE None ===========================================================================*/ __device__ void sha1_finish( sha1_context *ctx, unsigned char *output ) { unsigned long last, padn; unsigned long high, low; unsigned char msglen[8]; high = ( ctx->total[0] >> 29 ) | ( ctx->total[1] << 3 ); low = ( ctx->total[0] << 3 ); PUT_UINT32_BE( high, msglen, 0 ); PUT_UINT32_BE( low, msglen, 4 ); last = ctx->total[0] & 0x3F; padn = ( last < 56 ) ? ( 56 - last ) : ( 120 - last ); sha1_update( ctx, (unsigned char *) sha1_padding, padn ); sha1_update( ctx, msglen, 8 ); PUT_UINT32_BE( ctx->state[0], output, 0 ); #ifndef FEATURE_REDUCED_HASH_SIZE PUT_UINT32_BE( ctx->state[1], output, 4 ); PUT_UINT32_BE( ctx->state[2], output, 8 ); PUT_UINT32_BE( ctx->state[3], output, 12 ); PUT_UINT32_BE( ctx->state[4], output, 16 ); #endif } /*=========================================================================== FUNCTION SHA1_INTERNAL DESCRIPTION Does the real sha1 algorithm DEPENDENCIES None RETURN VALUE output is the hash result ===========================================================================*/ __device__ void sha1_internal( unsigned char *input, int ilen, unsigned char *output ) { sha1_context ctx; sha1_starts( &ctx ); sha1_update( &ctx, input, ilen ); sha1_finish( &ctx, output ); memset( &ctx, 0, sizeof( sha1_context ) ); } #endif #ifdef FEATURE_SHARED_MEMORY /*=========================================================================== FUNCTION SHA1_INTERNAL DESCRIPTION Does the real sha1 algorithm. DEPENDENCIES None RETURN VALUE output is the hash result ===========================================================================*/ __device__ unsigned long macroRFunction(int t, unsigned int *sharedMemory) { return sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)]; } __device__ static void sha1_internal( unsigned int *input, unsigned int *sharedMemory, unsigned int chunkSize, unsigned char *output ) { /* Number of passes (512 bit blocks) we have to do */ int numberOfPasses = chunkSize / 64 + 1; /* Used during the hashing process */ unsigned long temp, A, B, C, D ,E; //unsigned long shared14, shared15; /* Needed to do the little endian stuff */ unsigned char *data = (unsigned char *)sharedMemory; /* Will hold the hash value through the intermediate stages of SHA1 algorithm */ unsigned int state0 = 0x67452301; unsigned int state1 = 0xEFCDAB89; unsigned int state2 = 0x98BADCFE; unsigned int state3 = 0x10325476; unsigned int state4 = 0xC3D2E1F0; /* int x0 = SHARED_MEMORY_INDEX(0); int x1 = SHARED_MEMORY_INDEX(1); int x2 = SHARED_MEMORY_INDEX(2); int x3 = SHARED_MEMORY_INDEX(3); int x4 = SHARED_MEMORY_INDEX(4); int x5 = SHARED_MEMORY_INDEX(5); int x6 = SHARED_MEMORY_INDEX(6); int x7 = SHARED_MEMORY_INDEX(7); int x8 = SHARED_MEMORY_INDEX(8); int x9 = SHARED_MEMORY_INDEX(9); int x10 = SHARED_MEMORY_INDEX(10); int x11 = SHARED_MEMORY_INDEX(11); int x12 = SHARED_MEMORY_INDEX(12); int x13 = SHARED_MEMORY_INDEX(13); int x14 = SHARED_MEMORY_INDEX(14); int x15 = SHARED_MEMORY_INDEX(15); */ #undef GET_CACHED_INDEX #define GET_CACHED_INDEX(index) SHARED_MEMORY_INDEX(index)//(x##index) for( int index = 0 ; index < (numberOfPasses) ; index++ ) { /* Move data to the thread's shared memory space */ sharedMemory[GET_CACHED_INDEX(0)] = input[0 + 16 * index]; sharedMemory[GET_CACHED_INDEX(1)] = input[1 + 16 * index]; sharedMemory[GET_CACHED_INDEX(2)] = input[2 + 16 * index]; sharedMemory[GET_CACHED_INDEX(3)] = input[3 + 16 * index]; sharedMemory[GET_CACHED_INDEX(4)] = input[4 + 16 * index]; sharedMemory[GET_CACHED_INDEX(5)] = input[5 + 16 * index]; sharedMemory[GET_CACHED_INDEX(6)] = input[6 + 16 * index]; sharedMemory[GET_CACHED_INDEX(7)] = input[7 + 16 * index]; sharedMemory[GET_CACHED_INDEX(8)] = input[8 + 16 * index]; sharedMemory[GET_CACHED_INDEX(9)] = input[9 + 16 * index]; sharedMemory[GET_CACHED_INDEX(10)] = input[10 + 16 * index]; sharedMemory[GET_CACHED_INDEX(11)] = input[11 + 16 * index]; sharedMemory[GET_CACHED_INDEX(12)] = input[12 + 16 * index]; /* Testing the code with and without this if statement shows that it has no effect on performance. */ if(index == numberOfPasses -1 ) { /* The last pass will contain the size of the chunk size (according to official SHA1 algorithm). */ sharedMemory[GET_CACHED_INDEX(13)] = 0x00000080; PUT_UINT32_BE( chunkSize >> 29, data, GET_CACHED_INDEX(14) * 4 ); PUT_UINT32_BE( chunkSize << 3, data, GET_CACHED_INDEX(15) * 4 ); } else { sharedMemory[GET_CACHED_INDEX(13)] = input[13 + 16 * index]; sharedMemory[GET_CACHED_INDEX(14)] = input[14 + 16 * index]; sharedMemory[GET_CACHED_INDEX(15)] = input[15 + 16 * index]; } /* Get the little endian stuff done. */ GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(0)], data, GET_CACHED_INDEX(0) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(1)], data, GET_CACHED_INDEX(1) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(2)], data, GET_CACHED_INDEX(2) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(3)], data, GET_CACHED_INDEX(3) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(4)], data, GET_CACHED_INDEX(4) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(5)], data, GET_CACHED_INDEX(5) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(6)], data, GET_CACHED_INDEX(6) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(7)], data, GET_CACHED_INDEX(7) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(8)], data, GET_CACHED_INDEX(8) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(9)], data, GET_CACHED_INDEX(9) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(10)], data, GET_CACHED_INDEX(10) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(11)], data, GET_CACHED_INDEX(11) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(12)], data, GET_CACHED_INDEX(12) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(13)], data, GET_CACHED_INDEX(13) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(14)], data, GET_CACHED_INDEX(14) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(15)], data, GET_CACHED_INDEX(15) * 4 ); #undef S #define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n))) #undef R #define R(t) \ ( \ temp = macroRFunction(t, sharedMemory) , \ ( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \ ) /* #define R(t) \ ( \ temp = sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^ \ sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)], \ ( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \ ) */ #undef P #define P(a,b,c,d,e,x) \ { \ e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \ } A = state0; B = state1; C = state2; D = state3; E = state4; #define F(x,y,z) (z ^ (x & (y ^ z))) #define K 0x5A827999 P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(0)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(1)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(2)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(3)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(4)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(5)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(6)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(7)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(8)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(9)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(10)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(11)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(12)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(13)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(14)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(15)] ); P( E, A, B, C, D, R(16) ); P( D, E, A, B, C, R(17) ); P( C, D, E, A, B, R(18) ); P( B, C, D, E, A, R(19) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0x6ED9EBA1 P( A, B, C, D, E, R(20) ); P( E, A, B, C, D, R(21) ); P( D, E, A, B, C, R(22) ); P( C, D, E, A, B, R(23) ); P( B, C, D, E, A, R(24) ); P( A, B, C, D, E, R(25) ); P( E, A, B, C, D, R(26) ); P( D, E, A, B, C, R(27) ); P( C, D, E, A, B, R(28) ); P( B, C, D, E, A, R(29) ); P( A, B, C, D, E, R(30) ); P( E, A, B, C, D, R(31) ); P( D, E, A, B, C, R(32) ); P( C, D, E, A, B, R(33) ); P( B, C, D, E, A, R(34) ); P( A, B, C, D, E, R(35) ); P( E, A, B, C, D, R(36) ); P( D, E, A, B, C, R(37) ); P( C, D, E, A, B, R(38) ); P( B, C, D, E, A, R(39) ); #undef K #undef F #define F(x,y,z) ((x & y) | (z & (x | y))) #define K 0x8F1BBCDC P( A, B, C, D, E, R(40) ); P( E, A, B, C, D, R(41) ); P( D, E, A, B, C, R(42) ); P( C, D, E, A, B, R(43) ); P( B, C, D, E, A, R(44) ); P( A, B, C, D, E, R(45) ); P( E, A, B, C, D, R(46) ); P( D, E, A, B, C, R(47) ); P( C, D, E, A, B, R(48) ); P( B, C, D, E, A, R(49) ); P( A, B, C, D, E, R(50) ); P( E, A, B, C, D, R(51) ); P( D, E, A, B, C, R(52) ); P( C, D, E, A, B, R(53) ); P( B, C, D, E, A, R(54) ); P( A, B, C, D, E, R(55) ); P( E, A, B, C, D, R(56) ); P( D, E, A, B, C, R(57) ); P( C, D, E, A, B, R(58) ); P( B, C, D, E, A, R(59) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0xCA62C1D6 P( A, B, C, D, E, R(60) ); P( E, A, B, C, D, R(61) ); P( D, E, A, B, C, R(62) ); P( C, D, E, A, B, R(63) ); P( B, C, D, E, A, R(64) ); P( A, B, C, D, E, R(65) ); P( E, A, B, C, D, R(66) ); P( D, E, A, B, C, R(67) ); P( C, D, E, A, B, R(68) ); P( B, C, D, E, A, R(69) ); P( A, B, C, D, E, R(70) ); P( E, A, B, C, D, R(71) ); P( D, E, A, B, C, R(72) ); P( C, D, E, A, B, R(73) ); P( B, C, D, E, A, R(74) ); P( A, B, C, D, E, R(75) ); P( E, A, B, C, D, R(76) ); P( D, E, A, B, C, R(77) ); P( C, D, E, A, B, R(78) ); P( B, C, D, E, A, R(79) ); #undef K #undef F state0 += A; state1 += B; state2 += C; state3 += D; state4 += E; } /* Got the hash, store it in the output buffer. */ PUT_UINT32_BE( state0, output, 0 ); #ifndef FEATURE_REDUCED_HASH_SIZE PUT_UINT32_BE( state1, output, 4 ); PUT_UINT32_BE( state2, output, 8 ); PUT_UINT32_BE( state3, output, 12 ); PUT_UINT32_BE( state4, output, 16 ); #endif } __device__ static void sha1_internal_overlap( unsigned int *input, unsigned int *sharedMemory, unsigned int chunkSize, unsigned char *output ) { /* Number of passes (512 bit blocks) we have to do */ int numberOfPasses = chunkSize / 64 + 1; /* Used during the hashing process */ unsigned long temp, A, B, C, D ,E; //unsigned long shared14, shared15; /* Needed to do the big endian stuff */ unsigned char *data = (unsigned char *)sharedMemory; // number of padding bytes. int numPadBytes = 0; int numPadInt = 0; //int numPadRemain = 0; /* Will hold the hash value through the intermediate stages of SHA1 algorithm */ unsigned int state0 = 0x67452301; unsigned int state1 = 0xEFCDAB89; unsigned int state2 = 0x98BADCFE; unsigned int state3 = 0x10325476; unsigned int state4 = 0xC3D2E1F0; int x0 = SHARED_MEMORY_INDEX(0); int x1 = SHARED_MEMORY_INDEX(1); int x2 = SHARED_MEMORY_INDEX(2); int x3 = SHARED_MEMORY_INDEX(3); int x4 = SHARED_MEMORY_INDEX(4); int x5 = SHARED_MEMORY_INDEX(5); int x6 = SHARED_MEMORY_INDEX(6); int x7 = SHARED_MEMORY_INDEX(7); int x8 = SHARED_MEMORY_INDEX(8); int x9 = SHARED_MEMORY_INDEX(9); int x10 = SHARED_MEMORY_INDEX(10); int x11 = SHARED_MEMORY_INDEX(11); int x12 = SHARED_MEMORY_INDEX(12); int x13 = SHARED_MEMORY_INDEX(13); int x14 = SHARED_MEMORY_INDEX(14); int x15 = SHARED_MEMORY_INDEX(15); #undef GET_CACHED_INDEX #define GET_CACHED_INDEX(index) (x##index) for( int index = 0 ; index < (numberOfPasses) ; index++ ) { if(index == numberOfPasses -1 ){ numPadBytes = (64-12) - (chunkSize - (numberOfPasses-1)*64); numPadInt = numPadBytes/sizeof(int); /*numPadRemain = numPadBytes-numPadInt*sizeof(int); printf("\nLast loop chunkSize = %d, numberOfPasses= %d and \nnumPadBytes = %d, numPadInt =%d, numPadRemain = %d\n", chunkSize,numberOfPasses,numPadBytes,numPadInt,numPadRemain);*/ int i=0; for(i=0;i<numPadInt;i++){ sharedMemory[SHARED_MEMORY_INDEX(13-i)] = 0; } int j=0; for(j=0;j<(16-3-numPadInt);j++){ //printf("j= %d\n",j); sharedMemory[SHARED_MEMORY_INDEX(j)] = input[j + 16 * index]; } /* The last pass will contain the size of the chunk size (according to official SHA1 algorithm). */ sharedMemory[SHARED_MEMORY_INDEX(13-i)] = 0x00000080; //printf("the last one at %d\n",13-i); PUT_UINT32_BE( chunkSize >> 29, data, GET_CACHED_INDEX(14) * 4 ); PUT_UINT32_BE( chunkSize << 3, data, GET_CACHED_INDEX(15) * 4 ); } else{ /* Move data to the thread's shared memory space */ //printf("Not last loop\n"); sharedMemory[GET_CACHED_INDEX(0)] = input[0 + 16 * index]; sharedMemory[GET_CACHED_INDEX(1)] = input[1 + 16 * index]; sharedMemory[GET_CACHED_INDEX(2)] = input[2 + 16 * index]; sharedMemory[GET_CACHED_INDEX(3)] = input[3 + 16 * index]; sharedMemory[GET_CACHED_INDEX(4)] = input[4 + 16 * index]; sharedMemory[GET_CACHED_INDEX(5)] = input[5 + 16 * index]; sharedMemory[GET_CACHED_INDEX(6)] = input[6 + 16 * index]; sharedMemory[GET_CACHED_INDEX(7)] = input[7 + 16 * index]; sharedMemory[GET_CACHED_INDEX(8)] = input[8 + 16 * index]; sharedMemory[GET_CACHED_INDEX(9)] = input[9 + 16 * index]; sharedMemory[GET_CACHED_INDEX(10)] = input[10 + 16 * index]; sharedMemory[GET_CACHED_INDEX(11)] = input[11 + 16 * index]; sharedMemory[GET_CACHED_INDEX(12)] = input[12 + 16 * index]; sharedMemory[GET_CACHED_INDEX(13)] = input[13 + 16 * index]; sharedMemory[GET_CACHED_INDEX(14)] = input[14 + 16 * index]; sharedMemory[GET_CACHED_INDEX(15)] = input[15 + 16 * index]; } /* int k=0; printf("\nGPU DATA\n"); for(k=0;k<16;k++){ printf("%d\t",sharedMemory[SHARED_MEMORY_INDEX(k)]); } printf("\n\n");*/ /* Get the little endian stuff done. */ GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(0)], data, GET_CACHED_INDEX(0) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(1)], data, GET_CACHED_INDEX(1) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(2)], data, GET_CACHED_INDEX(2) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(3)], data, GET_CACHED_INDEX(3) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(4)], data, GET_CACHED_INDEX(4) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(5)], data, GET_CACHED_INDEX(5) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(6)], data, GET_CACHED_INDEX(6) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(7)], data, GET_CACHED_INDEX(7) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(8)], data, GET_CACHED_INDEX(8) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(9)], data, GET_CACHED_INDEX(9) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(10)], data, GET_CACHED_INDEX(10) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(11)], data, GET_CACHED_INDEX(11) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(12)], data, GET_CACHED_INDEX(12) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(13)], data, GET_CACHED_INDEX(13) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(14)], data, GET_CACHED_INDEX(14) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(15)], data, GET_CACHED_INDEX(15) * 4 ); #undef S #define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n))) #undef R #define R(t) \ ( \ temp = macroRFunction(t, sharedMemory) , \ ( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \ ) /* #define R(t) \ ( \ temp = sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^ \ sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)], \ ( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \ ) */ #undef P #define P(a,b,c,d,e,x) \ { \ e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \ } A = state0; B = state1; C = state2; D = state3; E = state4; #define F(x,y,z) (z ^ (x & (y ^ z))) #define K 0x5A827999 P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(0)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(1)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(2)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(3)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(4)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(5)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(6)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(7)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(8)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(9)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(10)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(11)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(12)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(13)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(14)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(15)] ); P( E, A, B, C, D, R(16) ); P( D, E, A, B, C, R(17) ); P( C, D, E, A, B, R(18) ); P( B, C, D, E, A, R(19) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0x6ED9EBA1 P( A, B, C, D, E, R(20) ); P( E, A, B, C, D, R(21) ); P( D, E, A, B, C, R(22) ); P( C, D, E, A, B, R(23) ); P( B, C, D, E, A, R(24) ); P( A, B, C, D, E, R(25) ); P( E, A, B, C, D, R(26) ); P( D, E, A, B, C, R(27) ); P( C, D, E, A, B, R(28) ); P( B, C, D, E, A, R(29) ); P( A, B, C, D, E, R(30) ); P( E, A, B, C, D, R(31) ); P( D, E, A, B, C, R(32) ); P( C, D, E, A, B, R(33) ); P( B, C, D, E, A, R(34) ); P( A, B, C, D, E, R(35) ); P( E, A, B, C, D, R(36) ); P( D, E, A, B, C, R(37) ); P( C, D, E, A, B, R(38) ); P( B, C, D, E, A, R(39) ); #undef K #undef F #define F(x,y,z) ((x & y) | (z & (x | y))) #define K 0x8F1BBCDC P( A, B, C, D, E, R(40) ); P( E, A, B, C, D, R(41) ); P( D, E, A, B, C, R(42) ); P( C, D, E, A, B, R(43) ); P( B, C, D, E, A, R(44) ); P( A, B, C, D, E, R(45) ); P( E, A, B, C, D, R(46) ); P( D, E, A, B, C, R(47) ); P( C, D, E, A, B, R(48) ); P( B, C, D, E, A, R(49) ); P( A, B, C, D, E, R(50) ); P( E, A, B, C, D, R(51) ); P( D, E, A, B, C, R(52) ); P( C, D, E, A, B, R(53) ); P( B, C, D, E, A, R(54) ); P( A, B, C, D, E, R(55) ); P( E, A, B, C, D, R(56) ); P( D, E, A, B, C, R(57) ); P( C, D, E, A, B, R(58) ); P( B, C, D, E, A, R(59) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0xCA62C1D6 P( A, B, C, D, E, R(60) ); P( E, A, B, C, D, R(61) ); P( D, E, A, B, C, R(62) ); P( C, D, E, A, B, R(63) ); P( B, C, D, E, A, R(64) ); P( A, B, C, D, E, R(65) ); P( E, A, B, C, D, R(66) ); P( D, E, A, B, C, R(67) ); P( C, D, E, A, B, R(68) ); P( B, C, D, E, A, R(69) ); P( A, B, C, D, E, R(70) ); P( E, A, B, C, D, R(71) ); P( D, E, A, B, C, R(72) ); P( C, D, E, A, B, R(73) ); P( B, C, D, E, A, R(74) ); P( A, B, C, D, E, R(75) ); P( E, A, B, C, D, R(76) ); P( D, E, A, B, C, R(77) ); P( C, D, E, A, B, R(78) ); P( B, C, D, E, A, R(79) ); #undef K #undef F state0 += A; state1 += B; state2 += C; state3 += D; state4 += E; } /* Got the hash, store it in the output buffer. */ PUT_UINT32_BE( state0, output, 0 ); #ifndef FEATURE_REDUCED_HASH_SIZE PUT_UINT32_BE( state1, output, 4 ); PUT_UINT32_BE( state2, output, 8 ); PUT_UINT32_BE( state3, output, 12 ); PUT_UINT32_BE( state4, output, 16 ); #endif } #endif /*-------------------------------------------------------------------------- GLOBAL FUNCTIONS --------------------------------------------------------------------------*/ /*=========================================================================== FUNCTION SHA1 DESCRIPTION Main sha1 hash function DEPENDENCIES GPU must be initialized RETURN VALUE output: the hash result ===========================================================================*/ __global__ void sha1( unsigned char *input, int chunkSize, int totalThreads, int padSize, unsigned char *scratch ) { // get the current thread index int threadIndex = threadIdx.x + blockDim.x * blockIdx.x; int chunkIndex = threadIndex * chunkSize; int hashIndex = threadIndex * SHA1_HASH_SIZE; if(threadIndex >= totalThreads) return; if ((threadIndex == (totalThreads - 1)) && (padSize > 0)) { for(int i = 0 ; i < padSize ; i++) input[chunkIndex + chunkSize - padSize + i] = 0; } #ifdef FEATURE_SHARED_MEMORY __shared__ unsigned int sharedMemory[4 * 1024 - 32]; unsigned int *sharedMemoryIndex = sharedMemory + ((threadIdx.x >> 5) * 512); unsigned char *tempInput = input + chunkIndex; unsigned int *inputIndex = (unsigned int *)(tempInput); sha1_internal(inputIndex, sharedMemoryIndex, chunkSize, scratch + hashIndex ); #else sha1_internal(input + chunkIndex, chunkSize, scratch + hashIndex ); #endif /* FEATURE_SHARED_MEMORY */ } __global__ void sha1_overlap( unsigned char *input, int chunkSize, int offset, int totalThreads, int padSize, unsigned char *output ) { int threadIndex = threadIdx.x + blockDim.x * blockIdx.x; int chunkIndex = threadIndex * offset; int hashIndex = threadIndex * SHA1_HASH_SIZE; if(threadIndex >= totalThreads) return; if ((threadIndex == (totalThreads - 1))) { chunkSize-= padSize; } #ifdef FEATURE_SHARED_MEMORY __shared__ unsigned int sharedMemory[4 * 1024 - 32]; //NOTE : SAMER : this can exceed the size of the shared memory unsigned int *sharedMemoryIndex = sharedMemory + ((threadIdx.x >> 5) * 512); unsigned int *inputIndex = (unsigned int *)(input + chunkIndex); sha1_internal_overlap(inputIndex, sharedMemoryIndex, chunkSize, output + hashIndex ); #else sha1_internal(input + chunkIndex, chunkSize, output + hashIndex ); #endif /* FEATURE_SHARED_MEMORY */ }
cd9818ab2581e118083bc99024249661b35dae03.cu
/*========================================================================== SHA1 KERNEL * Copyright (c) 2008, NetSysLab at the University of British Columbia * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the University nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY NetSysLab ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NetSysLab BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. DESCRIPTION CPU version of the storeGPU library. ==========================================================================*/ /*========================================================================== INCLUDES ==========================================================================*/ #include <string.h> #include <stdio.h> #include "cust.h" /*========================================================================== DATA DECLARATIONS ==========================================================================*/ /*-------------------------------------------------------------------------- TYPE DEFINITIONS --------------------------------------------------------------------------*/ typedef struct { unsigned long total[2]; /*!< number of bytes processed */ unsigned long state[5]; /*!< intermediate digest state */ unsigned char buffer[64]; /*!< data block being processed */ } sha1_context; /*-------------------------------------------------------------------------- FUNCTION PROTOTYPES --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- CONSTANTS --------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- GLOBAL VARIABLES --------------------------------------------------------------------------*/ __device__ static const unsigned char sha1_padding[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /*-------------------------------------------------------------------------- MACROS --------------------------------------------------------------------------*/ #ifndef _CRT_SECURE_NO_DEPRECATE #define _CRT_SECURE_NO_DEPRECATE 1 #endif /* * 32-bit integer manipulation macros (big endian) */ #ifndef GET_UINT32_BE #define GET_UINT32_BE(n,b,i) \ { \ (n) = ( (unsigned long) (b)[(i) ] << 24 ) \ | ( (unsigned long) (b)[(i) + 1] << 16 ) \ | ( (unsigned long) (b)[(i) + 2] << 8 ) \ | ( (unsigned long) (b)[(i) + 3] ); \ } #endif #ifndef PUT_UINT32_BE #define PUT_UINT32_BE(n,b,i) \ { \ (b)[(i) ] = (unsigned char) ( (n) >> 24 ); \ (b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \ (b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \ (b)[(i) + 3] = (unsigned char) ( (n) ); \ } #endif #ifdef FEATURE_SHARED_MEMORY // current thread stride. #undef SHARED_MEMORY_INDEX #define SHARED_MEMORY_INDEX(index) (32 * (index) + (threadIdx.x & 0x1F)) #endif /* FEATURE_SHARED_MEMORY */ /*-------------------------------------------------------------------------- LOCAL FUNCTIONS --------------------------------------------------------------------------*/ #ifndef FEATURE_SHARED_MEMORY /* * SHA-1 context setup */ /*=========================================================================== FUNCTION SHA1_GPU_STARTS DESCRIPTION SHA-1 context setup DEPENDENCIES None RETURN VALUE None ===========================================================================*/ __device__ void sha1_starts( sha1_context *ctx ) { ctx->total[0] = 0; ctx->total[1] = 0; ctx->state[0] = 0x67452301; ctx->state[1] = 0xEFCDAB89; ctx->state[2] = 0x98BADCFE; ctx->state[3] = 0x10325476; ctx->state[4] = 0xC3D2E1F0; } /*=========================================================================== FUNCTION SHA1_GPU_PROCESS DESCRIPTION SHA1 process buffer DEPENDENCIES None RETURN VALUE None ===========================================================================*/ __device__ void sha1_process( sha1_context *ctx, unsigned char data[64] ) { unsigned long temp, W[16], A, B, C, D, E; GET_UINT32_BE( W[ 0], data, 0 ); GET_UINT32_BE( W[ 1], data, 4 ); GET_UINT32_BE( W[ 2], data, 8 ); GET_UINT32_BE( W[ 3], data, 12 ); GET_UINT32_BE( W[ 4], data, 16 ); GET_UINT32_BE( W[ 5], data, 20 ); GET_UINT32_BE( W[ 6], data, 24 ); GET_UINT32_BE( W[ 7], data, 28 ); GET_UINT32_BE( W[ 8], data, 32 ); GET_UINT32_BE( W[ 9], data, 36 ); GET_UINT32_BE( W[10], data, 40 ); GET_UINT32_BE( W[11], data, 44 ); GET_UINT32_BE( W[12], data, 48 ); GET_UINT32_BE( W[13], data, 52 ); GET_UINT32_BE( W[14], data, 56 ); GET_UINT32_BE( W[15], data, 60 ); #undef S #define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n))) #undef R #define R(t) \ ( \ temp = W[(t - 3) & 0x0F] ^ W[(t - 8) & 0x0F] ^ \ W[(t - 14) & 0x0F] ^ W[ t & 0x0F], \ ( W[t & 0x0F] = S(temp,1) ) \ ) #undef P #define P(a,b,c,d,e,x) \ { \ e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \ } A = ctx->state[0]; B = ctx->state[1]; C = ctx->state[2]; D = ctx->state[3]; E = ctx->state[4]; #define F(x,y,z) (z ^ (x & (y ^ z))) #define K 0x5A827999 P( A, B, C, D, E, W[0] ); P( E, A, B, C, D, W[1] ); P( D, E, A, B, C, W[2] ); P( C, D, E, A, B, W[3] ); P( B, C, D, E, A, W[4] ); P( A, B, C, D, E, W[5] ); P( E, A, B, C, D, W[6] ); P( D, E, A, B, C, W[7] ); P( C, D, E, A, B, W[8] ); P( B, C, D, E, A, W[9] ); P( A, B, C, D, E, W[10] ); P( E, A, B, C, D, W[11] ); P( D, E, A, B, C, W[12] ); P( C, D, E, A, B, W[13] ); P( B, C, D, E, A, W[14] ); P( A, B, C, D, E, W[15] ); P( E, A, B, C, D, R(16) ); P( D, E, A, B, C, R(17) ); P( C, D, E, A, B, R(18) ); P( B, C, D, E, A, R(19) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0x6ED9EBA1 P( A, B, C, D, E, R(20) ); P( E, A, B, C, D, R(21) ); P( D, E, A, B, C, R(22) ); P( C, D, E, A, B, R(23) ); P( B, C, D, E, A, R(24) ); P( A, B, C, D, E, R(25) ); P( E, A, B, C, D, R(26) ); P( D, E, A, B, C, R(27) ); P( C, D, E, A, B, R(28) ); P( B, C, D, E, A, R(29) ); P( A, B, C, D, E, R(30) ); P( E, A, B, C, D, R(31) ); P( D, E, A, B, C, R(32) ); P( C, D, E, A, B, R(33) ); P( B, C, D, E, A, R(34) ); P( A, B, C, D, E, R(35) ); P( E, A, B, C, D, R(36) ); P( D, E, A, B, C, R(37) ); P( C, D, E, A, B, R(38) ); P( B, C, D, E, A, R(39) ); #undef K #undef F #define F(x,y,z) ((x & y) | (z & (x | y))) #define K 0x8F1BBCDC P( A, B, C, D, E, R(40) ); P( E, A, B, C, D, R(41) ); P( D, E, A, B, C, R(42) ); P( C, D, E, A, B, R(43) ); P( B, C, D, E, A, R(44) ); P( A, B, C, D, E, R(45) ); P( E, A, B, C, D, R(46) ); P( D, E, A, B, C, R(47) ); P( C, D, E, A, B, R(48) ); P( B, C, D, E, A, R(49) ); P( A, B, C, D, E, R(50) ); P( E, A, B, C, D, R(51) ); P( D, E, A, B, C, R(52) ); P( C, D, E, A, B, R(53) ); P( B, C, D, E, A, R(54) ); P( A, B, C, D, E, R(55) ); P( E, A, B, C, D, R(56) ); P( D, E, A, B, C, R(57) ); P( C, D, E, A, B, R(58) ); P( B, C, D, E, A, R(59) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0xCA62C1D6 P( A, B, C, D, E, R(60) ); P( E, A, B, C, D, R(61) ); P( D, E, A, B, C, R(62) ); P( C, D, E, A, B, R(63) ); P( B, C, D, E, A, R(64) ); P( A, B, C, D, E, R(65) ); P( E, A, B, C, D, R(66) ); P( D, E, A, B, C, R(67) ); P( C, D, E, A, B, R(68) ); P( B, C, D, E, A, R(69) ); P( A, B, C, D, E, R(70) ); P( E, A, B, C, D, R(71) ); P( D, E, A, B, C, R(72) ); P( C, D, E, A, B, R(73) ); P( B, C, D, E, A, R(74) ); P( A, B, C, D, E, R(75) ); P( E, A, B, C, D, R(76) ); P( D, E, A, B, C, R(77) ); P( C, D, E, A, B, R(78) ); P( B, C, D, E, A, R(79) ); #undef K #undef F ctx->state[0] += A; ctx->state[1] += B; ctx->state[2] += C; ctx->state[3] += D; ctx->state[4] += E; } /*=========================================================================== FUNCTION SHA1_CPU_UPDATE DESCRIPTION SHA1 update buffer DEPENDENCIES None RETURN VALUE None ===========================================================================*/ __device__ void sha1_update( sha1_context *ctx, unsigned char *input, int ilen ) { int fill; unsigned long left; if( ilen <= 0 ) return; left = ctx->total[0] & 0x3F; fill = 64 - left; ctx->total[0] += ilen; ctx->total[0] &= 0xFFFFFFFF; if ( ctx->total[0] < (unsigned long) ilen ) ctx->total[1]++; if ( left && ilen >= fill ) { /*memcpy( (void *) (ctx->buffer + left), (void *) input, fill );*/ for (int i = 0; i < fill; i++) { ctx->buffer[i+left] = input[i]; } sha1_process( ctx, ctx->buffer ); input += fill; ilen -= fill; left = 0; } while ( ilen >= 64 ) { sha1_process( ctx, input ); input += 64; ilen -= 64; } if ( ilen > 0 ) { /*memcpy( (void *) (ctx->buffer + left), (void *) input, ilen );*/ for (int i = 0; i < ilen; i++) { ctx->buffer[i+left] = input[i]; } } } /*=========================================================================== FUNCTION SHA1_CPU_FINISH DESCRIPTION SHA1 final digest DEPENDENCIES None RETURN VALUE None ===========================================================================*/ __device__ void sha1_finish( sha1_context *ctx, unsigned char *output ) { unsigned long last, padn; unsigned long high, low; unsigned char msglen[8]; high = ( ctx->total[0] >> 29 ) | ( ctx->total[1] << 3 ); low = ( ctx->total[0] << 3 ); PUT_UINT32_BE( high, msglen, 0 ); PUT_UINT32_BE( low, msglen, 4 ); last = ctx->total[0] & 0x3F; padn = ( last < 56 ) ? ( 56 - last ) : ( 120 - last ); sha1_update( ctx, (unsigned char *) sha1_padding, padn ); sha1_update( ctx, msglen, 8 ); PUT_UINT32_BE( ctx->state[0], output, 0 ); #ifndef FEATURE_REDUCED_HASH_SIZE PUT_UINT32_BE( ctx->state[1], output, 4 ); PUT_UINT32_BE( ctx->state[2], output, 8 ); PUT_UINT32_BE( ctx->state[3], output, 12 ); PUT_UINT32_BE( ctx->state[4], output, 16 ); #endif } /*=========================================================================== FUNCTION SHA1_INTERNAL DESCRIPTION Does the real sha1 algorithm DEPENDENCIES None RETURN VALUE output is the hash result ===========================================================================*/ __device__ void sha1_internal( unsigned char *input, int ilen, unsigned char *output ) { sha1_context ctx; sha1_starts( &ctx ); sha1_update( &ctx, input, ilen ); sha1_finish( &ctx, output ); memset( &ctx, 0, sizeof( sha1_context ) ); } #endif #ifdef FEATURE_SHARED_MEMORY /*=========================================================================== FUNCTION SHA1_INTERNAL DESCRIPTION Does the real sha1 algorithm. DEPENDENCIES None RETURN VALUE output is the hash result ===========================================================================*/ __device__ unsigned long macroRFunction(int t, unsigned int *sharedMemory) { return sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)]; } __device__ static void sha1_internal( unsigned int *input, unsigned int *sharedMemory, unsigned int chunkSize, unsigned char *output ) { /* Number of passes (512 bit blocks) we have to do */ int numberOfPasses = chunkSize / 64 + 1; /* Used during the hashing process */ unsigned long temp, A, B, C, D ,E; //unsigned long shared14, shared15; /* Needed to do the little endian stuff */ unsigned char *data = (unsigned char *)sharedMemory; /* Will hold the hash value through the intermediate stages of SHA1 algorithm */ unsigned int state0 = 0x67452301; unsigned int state1 = 0xEFCDAB89; unsigned int state2 = 0x98BADCFE; unsigned int state3 = 0x10325476; unsigned int state4 = 0xC3D2E1F0; /* int x0 = SHARED_MEMORY_INDEX(0); int x1 = SHARED_MEMORY_INDEX(1); int x2 = SHARED_MEMORY_INDEX(2); int x3 = SHARED_MEMORY_INDEX(3); int x4 = SHARED_MEMORY_INDEX(4); int x5 = SHARED_MEMORY_INDEX(5); int x6 = SHARED_MEMORY_INDEX(6); int x7 = SHARED_MEMORY_INDEX(7); int x8 = SHARED_MEMORY_INDEX(8); int x9 = SHARED_MEMORY_INDEX(9); int x10 = SHARED_MEMORY_INDEX(10); int x11 = SHARED_MEMORY_INDEX(11); int x12 = SHARED_MEMORY_INDEX(12); int x13 = SHARED_MEMORY_INDEX(13); int x14 = SHARED_MEMORY_INDEX(14); int x15 = SHARED_MEMORY_INDEX(15); */ #undef GET_CACHED_INDEX #define GET_CACHED_INDEX(index) SHARED_MEMORY_INDEX(index)//(x##index) for( int index = 0 ; index < (numberOfPasses) ; index++ ) { /* Move data to the thread's shared memory space */ sharedMemory[GET_CACHED_INDEX(0)] = input[0 + 16 * index]; sharedMemory[GET_CACHED_INDEX(1)] = input[1 + 16 * index]; sharedMemory[GET_CACHED_INDEX(2)] = input[2 + 16 * index]; sharedMemory[GET_CACHED_INDEX(3)] = input[3 + 16 * index]; sharedMemory[GET_CACHED_INDEX(4)] = input[4 + 16 * index]; sharedMemory[GET_CACHED_INDEX(5)] = input[5 + 16 * index]; sharedMemory[GET_CACHED_INDEX(6)] = input[6 + 16 * index]; sharedMemory[GET_CACHED_INDEX(7)] = input[7 + 16 * index]; sharedMemory[GET_CACHED_INDEX(8)] = input[8 + 16 * index]; sharedMemory[GET_CACHED_INDEX(9)] = input[9 + 16 * index]; sharedMemory[GET_CACHED_INDEX(10)] = input[10 + 16 * index]; sharedMemory[GET_CACHED_INDEX(11)] = input[11 + 16 * index]; sharedMemory[GET_CACHED_INDEX(12)] = input[12 + 16 * index]; /* Testing the code with and without this if statement shows that it has no effect on performance. */ if(index == numberOfPasses -1 ) { /* The last pass will contain the size of the chunk size (according to official SHA1 algorithm). */ sharedMemory[GET_CACHED_INDEX(13)] = 0x00000080; PUT_UINT32_BE( chunkSize >> 29, data, GET_CACHED_INDEX(14) * 4 ); PUT_UINT32_BE( chunkSize << 3, data, GET_CACHED_INDEX(15) * 4 ); } else { sharedMemory[GET_CACHED_INDEX(13)] = input[13 + 16 * index]; sharedMemory[GET_CACHED_INDEX(14)] = input[14 + 16 * index]; sharedMemory[GET_CACHED_INDEX(15)] = input[15 + 16 * index]; } /* Get the little endian stuff done. */ GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(0)], data, GET_CACHED_INDEX(0) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(1)], data, GET_CACHED_INDEX(1) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(2)], data, GET_CACHED_INDEX(2) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(3)], data, GET_CACHED_INDEX(3) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(4)], data, GET_CACHED_INDEX(4) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(5)], data, GET_CACHED_INDEX(5) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(6)], data, GET_CACHED_INDEX(6) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(7)], data, GET_CACHED_INDEX(7) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(8)], data, GET_CACHED_INDEX(8) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(9)], data, GET_CACHED_INDEX(9) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(10)], data, GET_CACHED_INDEX(10) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(11)], data, GET_CACHED_INDEX(11) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(12)], data, GET_CACHED_INDEX(12) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(13)], data, GET_CACHED_INDEX(13) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(14)], data, GET_CACHED_INDEX(14) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(15)], data, GET_CACHED_INDEX(15) * 4 ); #undef S #define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n))) #undef R #define R(t) \ ( \ temp = macroRFunction(t, sharedMemory) , \ ( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \ ) /* #define R(t) \ ( \ temp = sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^ \ sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)], \ ( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \ ) */ #undef P #define P(a,b,c,d,e,x) \ { \ e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \ } A = state0; B = state1; C = state2; D = state3; E = state4; #define F(x,y,z) (z ^ (x & (y ^ z))) #define K 0x5A827999 P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(0)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(1)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(2)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(3)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(4)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(5)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(6)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(7)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(8)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(9)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(10)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(11)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(12)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(13)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(14)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(15)] ); P( E, A, B, C, D, R(16) ); P( D, E, A, B, C, R(17) ); P( C, D, E, A, B, R(18) ); P( B, C, D, E, A, R(19) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0x6ED9EBA1 P( A, B, C, D, E, R(20) ); P( E, A, B, C, D, R(21) ); P( D, E, A, B, C, R(22) ); P( C, D, E, A, B, R(23) ); P( B, C, D, E, A, R(24) ); P( A, B, C, D, E, R(25) ); P( E, A, B, C, D, R(26) ); P( D, E, A, B, C, R(27) ); P( C, D, E, A, B, R(28) ); P( B, C, D, E, A, R(29) ); P( A, B, C, D, E, R(30) ); P( E, A, B, C, D, R(31) ); P( D, E, A, B, C, R(32) ); P( C, D, E, A, B, R(33) ); P( B, C, D, E, A, R(34) ); P( A, B, C, D, E, R(35) ); P( E, A, B, C, D, R(36) ); P( D, E, A, B, C, R(37) ); P( C, D, E, A, B, R(38) ); P( B, C, D, E, A, R(39) ); #undef K #undef F #define F(x,y,z) ((x & y) | (z & (x | y))) #define K 0x8F1BBCDC P( A, B, C, D, E, R(40) ); P( E, A, B, C, D, R(41) ); P( D, E, A, B, C, R(42) ); P( C, D, E, A, B, R(43) ); P( B, C, D, E, A, R(44) ); P( A, B, C, D, E, R(45) ); P( E, A, B, C, D, R(46) ); P( D, E, A, B, C, R(47) ); P( C, D, E, A, B, R(48) ); P( B, C, D, E, A, R(49) ); P( A, B, C, D, E, R(50) ); P( E, A, B, C, D, R(51) ); P( D, E, A, B, C, R(52) ); P( C, D, E, A, B, R(53) ); P( B, C, D, E, A, R(54) ); P( A, B, C, D, E, R(55) ); P( E, A, B, C, D, R(56) ); P( D, E, A, B, C, R(57) ); P( C, D, E, A, B, R(58) ); P( B, C, D, E, A, R(59) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0xCA62C1D6 P( A, B, C, D, E, R(60) ); P( E, A, B, C, D, R(61) ); P( D, E, A, B, C, R(62) ); P( C, D, E, A, B, R(63) ); P( B, C, D, E, A, R(64) ); P( A, B, C, D, E, R(65) ); P( E, A, B, C, D, R(66) ); P( D, E, A, B, C, R(67) ); P( C, D, E, A, B, R(68) ); P( B, C, D, E, A, R(69) ); P( A, B, C, D, E, R(70) ); P( E, A, B, C, D, R(71) ); P( D, E, A, B, C, R(72) ); P( C, D, E, A, B, R(73) ); P( B, C, D, E, A, R(74) ); P( A, B, C, D, E, R(75) ); P( E, A, B, C, D, R(76) ); P( D, E, A, B, C, R(77) ); P( C, D, E, A, B, R(78) ); P( B, C, D, E, A, R(79) ); #undef K #undef F state0 += A; state1 += B; state2 += C; state3 += D; state4 += E; } /* Got the hash, store it in the output buffer. */ PUT_UINT32_BE( state0, output, 0 ); #ifndef FEATURE_REDUCED_HASH_SIZE PUT_UINT32_BE( state1, output, 4 ); PUT_UINT32_BE( state2, output, 8 ); PUT_UINT32_BE( state3, output, 12 ); PUT_UINT32_BE( state4, output, 16 ); #endif } __device__ static void sha1_internal_overlap( unsigned int *input, unsigned int *sharedMemory, unsigned int chunkSize, unsigned char *output ) { /* Number of passes (512 bit blocks) we have to do */ int numberOfPasses = chunkSize / 64 + 1; /* Used during the hashing process */ unsigned long temp, A, B, C, D ,E; //unsigned long shared14, shared15; /* Needed to do the big endian stuff */ unsigned char *data = (unsigned char *)sharedMemory; // number of padding bytes. int numPadBytes = 0; int numPadInt = 0; //int numPadRemain = 0; /* Will hold the hash value through the intermediate stages of SHA1 algorithm */ unsigned int state0 = 0x67452301; unsigned int state1 = 0xEFCDAB89; unsigned int state2 = 0x98BADCFE; unsigned int state3 = 0x10325476; unsigned int state4 = 0xC3D2E1F0; int x0 = SHARED_MEMORY_INDEX(0); int x1 = SHARED_MEMORY_INDEX(1); int x2 = SHARED_MEMORY_INDEX(2); int x3 = SHARED_MEMORY_INDEX(3); int x4 = SHARED_MEMORY_INDEX(4); int x5 = SHARED_MEMORY_INDEX(5); int x6 = SHARED_MEMORY_INDEX(6); int x7 = SHARED_MEMORY_INDEX(7); int x8 = SHARED_MEMORY_INDEX(8); int x9 = SHARED_MEMORY_INDEX(9); int x10 = SHARED_MEMORY_INDEX(10); int x11 = SHARED_MEMORY_INDEX(11); int x12 = SHARED_MEMORY_INDEX(12); int x13 = SHARED_MEMORY_INDEX(13); int x14 = SHARED_MEMORY_INDEX(14); int x15 = SHARED_MEMORY_INDEX(15); #undef GET_CACHED_INDEX #define GET_CACHED_INDEX(index) (x##index) for( int index = 0 ; index < (numberOfPasses) ; index++ ) { if(index == numberOfPasses -1 ){ numPadBytes = (64-12) - (chunkSize - (numberOfPasses-1)*64); numPadInt = numPadBytes/sizeof(int); /*numPadRemain = numPadBytes-numPadInt*sizeof(int); printf("\nLast loop chunkSize = %d, numberOfPasses= %d and \nnumPadBytes = %d, numPadInt =%d, numPadRemain = %d\n", chunkSize,numberOfPasses,numPadBytes,numPadInt,numPadRemain);*/ int i=0; for(i=0;i<numPadInt;i++){ sharedMemory[SHARED_MEMORY_INDEX(13-i)] = 0; } int j=0; for(j=0;j<(16-3-numPadInt);j++){ //printf("j= %d\n",j); sharedMemory[SHARED_MEMORY_INDEX(j)] = input[j + 16 * index]; } /* The last pass will contain the size of the chunk size (according to official SHA1 algorithm). */ sharedMemory[SHARED_MEMORY_INDEX(13-i)] = 0x00000080; //printf("the last one at %d\n",13-i); PUT_UINT32_BE( chunkSize >> 29, data, GET_CACHED_INDEX(14) * 4 ); PUT_UINT32_BE( chunkSize << 3, data, GET_CACHED_INDEX(15) * 4 ); } else{ /* Move data to the thread's shared memory space */ //printf("Not last loop\n"); sharedMemory[GET_CACHED_INDEX(0)] = input[0 + 16 * index]; sharedMemory[GET_CACHED_INDEX(1)] = input[1 + 16 * index]; sharedMemory[GET_CACHED_INDEX(2)] = input[2 + 16 * index]; sharedMemory[GET_CACHED_INDEX(3)] = input[3 + 16 * index]; sharedMemory[GET_CACHED_INDEX(4)] = input[4 + 16 * index]; sharedMemory[GET_CACHED_INDEX(5)] = input[5 + 16 * index]; sharedMemory[GET_CACHED_INDEX(6)] = input[6 + 16 * index]; sharedMemory[GET_CACHED_INDEX(7)] = input[7 + 16 * index]; sharedMemory[GET_CACHED_INDEX(8)] = input[8 + 16 * index]; sharedMemory[GET_CACHED_INDEX(9)] = input[9 + 16 * index]; sharedMemory[GET_CACHED_INDEX(10)] = input[10 + 16 * index]; sharedMemory[GET_CACHED_INDEX(11)] = input[11 + 16 * index]; sharedMemory[GET_CACHED_INDEX(12)] = input[12 + 16 * index]; sharedMemory[GET_CACHED_INDEX(13)] = input[13 + 16 * index]; sharedMemory[GET_CACHED_INDEX(14)] = input[14 + 16 * index]; sharedMemory[GET_CACHED_INDEX(15)] = input[15 + 16 * index]; } /* int k=0; printf("\nGPU DATA\n"); for(k=0;k<16;k++){ printf("%d\t",sharedMemory[SHARED_MEMORY_INDEX(k)]); } printf("\n\n");*/ /* Get the little endian stuff done. */ GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(0)], data, GET_CACHED_INDEX(0) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(1)], data, GET_CACHED_INDEX(1) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(2)], data, GET_CACHED_INDEX(2) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(3)], data, GET_CACHED_INDEX(3) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(4)], data, GET_CACHED_INDEX(4) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(5)], data, GET_CACHED_INDEX(5) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(6)], data, GET_CACHED_INDEX(6) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(7)], data, GET_CACHED_INDEX(7) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(8)], data, GET_CACHED_INDEX(8) * 4 ); GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(9)], data, GET_CACHED_INDEX(9) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(10)], data, GET_CACHED_INDEX(10) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(11)], data, GET_CACHED_INDEX(11) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(12)], data, GET_CACHED_INDEX(12) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(13)], data, GET_CACHED_INDEX(13) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(14)], data, GET_CACHED_INDEX(14) * 4 ); GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(15)], data, GET_CACHED_INDEX(15) * 4 ); #undef S #define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n))) #undef R #define R(t) \ ( \ temp = macroRFunction(t, sharedMemory) , \ ( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \ ) /* #define R(t) \ ( \ temp = sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^ \ sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)], \ ( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \ ) */ #undef P #define P(a,b,c,d,e,x) \ { \ e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \ } A = state0; B = state1; C = state2; D = state3; E = state4; #define F(x,y,z) (z ^ (x & (y ^ z))) #define K 0x5A827999 P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(0)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(1)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(2)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(3)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(4)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(5)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(6)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(7)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(8)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(9)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(10)] ); P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(11)] ); P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(12)] ); P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(13)] ); P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(14)] ); P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(15)] ); P( E, A, B, C, D, R(16) ); P( D, E, A, B, C, R(17) ); P( C, D, E, A, B, R(18) ); P( B, C, D, E, A, R(19) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0x6ED9EBA1 P( A, B, C, D, E, R(20) ); P( E, A, B, C, D, R(21) ); P( D, E, A, B, C, R(22) ); P( C, D, E, A, B, R(23) ); P( B, C, D, E, A, R(24) ); P( A, B, C, D, E, R(25) ); P( E, A, B, C, D, R(26) ); P( D, E, A, B, C, R(27) ); P( C, D, E, A, B, R(28) ); P( B, C, D, E, A, R(29) ); P( A, B, C, D, E, R(30) ); P( E, A, B, C, D, R(31) ); P( D, E, A, B, C, R(32) ); P( C, D, E, A, B, R(33) ); P( B, C, D, E, A, R(34) ); P( A, B, C, D, E, R(35) ); P( E, A, B, C, D, R(36) ); P( D, E, A, B, C, R(37) ); P( C, D, E, A, B, R(38) ); P( B, C, D, E, A, R(39) ); #undef K #undef F #define F(x,y,z) ((x & y) | (z & (x | y))) #define K 0x8F1BBCDC P( A, B, C, D, E, R(40) ); P( E, A, B, C, D, R(41) ); P( D, E, A, B, C, R(42) ); P( C, D, E, A, B, R(43) ); P( B, C, D, E, A, R(44) ); P( A, B, C, D, E, R(45) ); P( E, A, B, C, D, R(46) ); P( D, E, A, B, C, R(47) ); P( C, D, E, A, B, R(48) ); P( B, C, D, E, A, R(49) ); P( A, B, C, D, E, R(50) ); P( E, A, B, C, D, R(51) ); P( D, E, A, B, C, R(52) ); P( C, D, E, A, B, R(53) ); P( B, C, D, E, A, R(54) ); P( A, B, C, D, E, R(55) ); P( E, A, B, C, D, R(56) ); P( D, E, A, B, C, R(57) ); P( C, D, E, A, B, R(58) ); P( B, C, D, E, A, R(59) ); #undef K #undef F #define F(x,y,z) (x ^ y ^ z) #define K 0xCA62C1D6 P( A, B, C, D, E, R(60) ); P( E, A, B, C, D, R(61) ); P( D, E, A, B, C, R(62) ); P( C, D, E, A, B, R(63) ); P( B, C, D, E, A, R(64) ); P( A, B, C, D, E, R(65) ); P( E, A, B, C, D, R(66) ); P( D, E, A, B, C, R(67) ); P( C, D, E, A, B, R(68) ); P( B, C, D, E, A, R(69) ); P( A, B, C, D, E, R(70) ); P( E, A, B, C, D, R(71) ); P( D, E, A, B, C, R(72) ); P( C, D, E, A, B, R(73) ); P( B, C, D, E, A, R(74) ); P( A, B, C, D, E, R(75) ); P( E, A, B, C, D, R(76) ); P( D, E, A, B, C, R(77) ); P( C, D, E, A, B, R(78) ); P( B, C, D, E, A, R(79) ); #undef K #undef F state0 += A; state1 += B; state2 += C; state3 += D; state4 += E; } /* Got the hash, store it in the output buffer. */ PUT_UINT32_BE( state0, output, 0 ); #ifndef FEATURE_REDUCED_HASH_SIZE PUT_UINT32_BE( state1, output, 4 ); PUT_UINT32_BE( state2, output, 8 ); PUT_UINT32_BE( state3, output, 12 ); PUT_UINT32_BE( state4, output, 16 ); #endif } #endif /*-------------------------------------------------------------------------- GLOBAL FUNCTIONS --------------------------------------------------------------------------*/ /*=========================================================================== FUNCTION SHA1 DESCRIPTION Main sha1 hash function DEPENDENCIES GPU must be initialized RETURN VALUE output: the hash result ===========================================================================*/ __global__ void sha1( unsigned char *input, int chunkSize, int totalThreads, int padSize, unsigned char *scratch ) { // get the current thread index int threadIndex = threadIdx.x + blockDim.x * blockIdx.x; int chunkIndex = threadIndex * chunkSize; int hashIndex = threadIndex * SHA1_HASH_SIZE; if(threadIndex >= totalThreads) return; if ((threadIndex == (totalThreads - 1)) && (padSize > 0)) { for(int i = 0 ; i < padSize ; i++) input[chunkIndex + chunkSize - padSize + i] = 0; } #ifdef FEATURE_SHARED_MEMORY __shared__ unsigned int sharedMemory[4 * 1024 - 32]; unsigned int *sharedMemoryIndex = sharedMemory + ((threadIdx.x >> 5) * 512); unsigned char *tempInput = input + chunkIndex; unsigned int *inputIndex = (unsigned int *)(tempInput); sha1_internal(inputIndex, sharedMemoryIndex, chunkSize, scratch + hashIndex ); #else sha1_internal(input + chunkIndex, chunkSize, scratch + hashIndex ); #endif /* FEATURE_SHARED_MEMORY */ } __global__ void sha1_overlap( unsigned char *input, int chunkSize, int offset, int totalThreads, int padSize, unsigned char *output ) { int threadIndex = threadIdx.x + blockDim.x * blockIdx.x; int chunkIndex = threadIndex * offset; int hashIndex = threadIndex * SHA1_HASH_SIZE; if(threadIndex >= totalThreads) return; if ((threadIndex == (totalThreads - 1))) { chunkSize-= padSize; } #ifdef FEATURE_SHARED_MEMORY __shared__ unsigned int sharedMemory[4 * 1024 - 32]; //NOTE : SAMER : this can exceed the size of the shared memory unsigned int *sharedMemoryIndex = sharedMemory + ((threadIdx.x >> 5) * 512); unsigned int *inputIndex = (unsigned int *)(input + chunkIndex); sha1_internal_overlap(inputIndex, sharedMemoryIndex, chunkSize, output + hashIndex ); #else sha1_internal(input + chunkIndex, chunkSize, output + hashIndex ); #endif /* FEATURE_SHARED_MEMORY */ }
a696c4c797d5c216c7a001f9c42057555e9eacc1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void expon(float* env, int nhalf) { int i = threadIdx.x + blockDim.x*blockIdx.x; if (i < nhalf) { env[i] = exp(env[i]/nhalf); // exponentiate } }
a696c4c797d5c216c7a001f9c42057555e9eacc1.cu
#include "includes.h" __global__ void expon(float* env, int nhalf) { int i = threadIdx.x + blockDim.x*blockIdx.x; if (i < nhalf) { env[i] = exp(env[i]/nhalf); // exponentiate } }
0039a1d8b84b2ecc06e16b7a2b8c1aede35c385d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/operators/math/normalize/normalize.h" #include "dali/core/math_util.h" #include "dali/core/tensor_layout.h" #include "dali/kernels/normalize/normalize_gpu.h" #include "dali/kernels/reduce/reduce_gpu.h" #include "dali/kernels/common/copy.h" namespace dali { using namespace kernels; // NOLINT template <> class Normalize<GPUBackend> : public NormalizeBase<GPUBackend> { public: explicit Normalize(const OpSpec &spec) : NormalizeBase<GPUBackend>(spec) {} private: friend class NormalizeBase<GPUBackend>; template <typename OutputType, typename InputType> void SetupTyped(const DeviceWorkspace &ws); template <typename OutputType, typename InputType> void RunTyped(DeviceWorkspace &ws); void AllocTempStorage(); void FoldMeans(); void FoldStdDev(); template <typename ParamType, typename InputType> MeanGPU<ParamType, InputType> &GetMeanKernel() { return mean_kernel_.create_or_get<MeanGPU<ParamType, InputType>>(); } template <typename ParamType, typename InputType> InvStdDevGPU<ParamType, InputType> &GetInvStdDevKernel() { return stddev_kernel_.create_or_get<InvStdDevGPU<ParamType, InputType>>(); } template <typename OutputType, typename InputType> NormalizeGPU<OutputType, InputType> &GetNormalizeKernel() { return normalize_kernel_.create_or_get<NormalizeGPU<OutputType, InputType>>(); } TensorListView<StorageGPU, float> BroadcastMean(KernelContext &ctx, float value) const; AnyKernelInstance mean_kernel_, stddev_kernel_, normalize_kernel_; ScratchpadAllocator alloc_; }; DALI_REGISTER_OPERATOR(Normalize, Normalize<GPUBackend>, GPU); namespace { template <typename ToUpdate, typename Other> inline void MaxInPlace(ToUpdate &inout, const Other &other) { auto b1 = dali::begin(inout); auto b2 = dali::begin(other); auto e1 = dali::end(inout); auto e2 = dali::end(other); for (; b1 != e1 && b2 != e2; b1++, b2++) { if (*b1 < *b2) { *b1 = *b2; } } } using scratch_sizes_t = dali::kernels::scratch_sizes_t; class ScratchpadSnapshot { public: explicit ScratchpadSnapshot(PreallocatedScratchpad &scratch) : scratch_(scratch) { for (size_t i = 0; i < ss_.size(); i++) ss_[i] = scratch_.allocs[i].used(); } ~ScratchpadSnapshot() { restore(); } private: void restore() { scratch_.Clear(); // this doesn't clear the memory - just resets the usage counter to 0 for (size_t i = 0; i < ss_.size(); i++) scratch_.allocs[i].alloc(ss_[i]); } scratch_sizes_t ss_; PreallocatedScratchpad &scratch_; }; template <int ndim> int64_t MaxSampleSize(const TensorListShape<ndim> &tls) { int64_t max_sample_size = 0; for (int i = 0; i < tls.num_samples(); i++) { int64_t v = volume(tls.tensor_shape_span(i)); if (v > max_sample_size) max_sample_size = v; } return max_sample_size; } template <typename T> __global__ void Fill(T *data, size_t count, T value) { auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (i < count) data[i] = value; } } // namespace TensorListView<StorageGPU, float> Normalize<GPUBackend>::BroadcastMean(KernelContext &ctx, float value) const { TensorListView<StorageGPU, float> mean_gpu; mean_gpu.shape = param_shape_; mean_gpu.data.resize(param_shape_.num_samples()); // allocate enough memory to hold the largest sample... int64_t max_sample_size = MaxSampleSize(param_shape_); float *gpu_mean_data = ctx.scratchpad->AllocateGPU<float>(max_sample_size); int grid = div_ceil(max_sample_size, 1024); int block = std::min<int64_t>(max_sample_size, 1024); // ...fill it with given value... hipLaunchKernelGGL(( Fill), dim3(grid), dim3(block), 0, ctx.gpu.stream, gpu_mean_data, max_sample_size, value); // ...and reuse the memory for all samples for (auto &ptr : mean_gpu.data) ptr = gpu_mean_data; return mean_gpu; } template <typename OutputType, typename InputType> void Normalize<GPUBackend>::SetupTyped(const DeviceWorkspace &ws) { auto &input = ws.InputRef<GPUBackend>(0); int nsamples = input.ntensor(); KernelContext ctx; ctx.gpu.stream = ws.stream(); ScratchpadEstimator se; int64_t param_volume = param_shape_.num_elements(); // estimate memory requirements for intermediate buffers if (!has_scalar_mean_) { se.add<mm::memory_kind::device, float>(param_volume); } else { if (ShouldCalcStdDev()) { // StdDev kernel requires the mean to have the same shape as the output. // We can save memory by broadcasting the mean only to the size of the largest sample // and repeat the pointer for all samples. se.add<mm::memory_kind::device, float>(MaxSampleSize(param_shape_)); } } if (!has_scalar_stddev_) { se.add<mm::memory_kind::device, float>(param_volume); } // setup and get memory requirements from kernels auto &norm = GetNormalizeKernel<OutputType, InputType>(); // if stddev is calculated internally, it's already inverse bool scale_is_stddev = !ShouldCalcStdDev(); auto req = norm.Setup(ctx, data_shape_, make_span(axes_), has_scalar_mean_, has_scalar_stddev_, scale_is_stddev); if (ShouldCalcMean()) { auto &mean = GetMeanKernel<float, InputType>(); auto mean_req = mean.Setup(ctx, data_shape_, make_span(axes_), true, batch_norm_); assert(mean_req.output_shapes[0] == param_shape_); MaxInPlace(req.scratch_sizes, mean_req.scratch_sizes); } if (ShouldCalcStdDev()) { auto &stddev = GetInvStdDevKernel<float, InputType>(); auto stddev_req = stddev.Setup(ctx, data_shape_, make_span(axes_), true, batch_norm_); assert(stddev_req.output_shapes[0] == param_shape_); MaxInPlace(req.scratch_sizes, stddev_req.scratch_sizes); } se.add<mm::memory_kind::host, char>( req.scratch_sizes[static_cast<int>(mm::memory_kind_id::host)], 64); se.add<mm::memory_kind::pinned, char>( req.scratch_sizes[static_cast<int>(mm::memory_kind_id::pinned)], 64); se.add<mm::memory_kind::device, char>( req.scratch_sizes[static_cast<int>(mm::memory_kind_id::device)], 64); se.add<mm::memory_kind::managed, char>( req.scratch_sizes[static_cast<int>(mm::memory_kind_id::managed)], 64); alloc_.Reserve(se.sizes); } template <typename OutputType, typename InputType> void Normalize<GPUBackend>::RunTyped(DeviceWorkspace &ws) { auto &input = ws.InputRef<GPUBackend>(0); TensorListView<StorageGPU, const InputType> in_view = view<const InputType>(input); auto &output = ws.OutputRef<GPUBackend>(0); TensorListView<StorageGPU, OutputType> out_view = view<OutputType>(output); output.SetLayout(input.GetLayout()); int nsamples = input.ntensor(); hipStream_t stream = ws.stream(); PreallocatedScratchpad scratch = alloc_.GetScratchpad(); KernelContext ctx; ctx.scratchpad = &scratch; ctx.gpu.stream = stream; // Prepare mean and stddev float scalar_mean = has_scalar_mean_ ? spec_.GetArgument<float>("mean") : 0; float scalar_stddev = has_scalar_stddev_ ? spec_.GetArgument<float>("stddev") : 1; OutListGPU<float> mean_gpu, stddev_gpu; if (!has_scalar_mean_) { mean_gpu = scratch.AllocTensorList<mm::memory_kind::device, float>(param_shape_); } else if (ShouldCalcStdDev()) { mean_gpu = BroadcastMean(ctx, scalar_mean); } if (!has_scalar_stddev_) { stddev_gpu = scratch.AllocTensorList<mm::memory_kind::device, float>(param_shape_); } if (ShouldCalcMean()) { // We can't just Clear() the scratchpad to reuse it, because temporary buffers are also // stored there - so let's make a snapshot of current allocation state and restore it // after the kernel Run is done. ScratchpadSnapshot snap(scratch); auto &mean_kernel = GetMeanKernel<float, InputType>(); mean_kernel.Run(ctx, mean_gpu, in_view); } else if (has_tensor_mean_) { kernels::copy(mean_gpu, mean_input_, stream); } if (ShouldCalcStdDev()) { ScratchpadSnapshot snap(scratch); auto &stddev_kernel = GetInvStdDevKernel<float, InputType>(); stddev_kernel.Run(ctx, stddev_gpu, in_view, mean_gpu, degrees_of_freedom_, epsilon_); } else if (has_tensor_stddev_) { kernels::copy(stddev_gpu, stddev_input_, stream); } // finally, run the normalize kernel { ScratchpadSnapshot snap(scratch); auto &norm_kernel = GetNormalizeKernel<OutputType, InputType>(); // if stddev is calculated internally, epsilon has already been included float epsilon = ShouldCalcStdDev() ? 0 : epsilon_; if (has_scalar_mean_) { if (has_scalar_stddev_) { norm_kernel.Run(ctx, out_view, in_view, scalar_mean, scalar_stddev, scale_, shift_, epsilon); } else { norm_kernel.Run(ctx, out_view, in_view, scalar_mean, stddev_gpu, scale_, shift_, epsilon); } } else { if (has_scalar_stddev_) { norm_kernel.Run(ctx, out_view, in_view, mean_gpu, scalar_stddev, scale_, shift_, epsilon); } else { norm_kernel.Run(ctx, out_view, in_view, mean_gpu, stddev_gpu, scale_, shift_, epsilon); } } } } } // namespace dali
0039a1d8b84b2ecc06e16b7a2b8c1aede35c385d.cu
// Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/operators/math/normalize/normalize.h" #include "dali/core/math_util.h" #include "dali/core/tensor_layout.h" #include "dali/kernels/normalize/normalize_gpu.h" #include "dali/kernels/reduce/reduce_gpu.h" #include "dali/kernels/common/copy.h" namespace dali { using namespace kernels; // NOLINT template <> class Normalize<GPUBackend> : public NormalizeBase<GPUBackend> { public: explicit Normalize(const OpSpec &spec) : NormalizeBase<GPUBackend>(spec) {} private: friend class NormalizeBase<GPUBackend>; template <typename OutputType, typename InputType> void SetupTyped(const DeviceWorkspace &ws); template <typename OutputType, typename InputType> void RunTyped(DeviceWorkspace &ws); void AllocTempStorage(); void FoldMeans(); void FoldStdDev(); template <typename ParamType, typename InputType> MeanGPU<ParamType, InputType> &GetMeanKernel() { return mean_kernel_.create_or_get<MeanGPU<ParamType, InputType>>(); } template <typename ParamType, typename InputType> InvStdDevGPU<ParamType, InputType> &GetInvStdDevKernel() { return stddev_kernel_.create_or_get<InvStdDevGPU<ParamType, InputType>>(); } template <typename OutputType, typename InputType> NormalizeGPU<OutputType, InputType> &GetNormalizeKernel() { return normalize_kernel_.create_or_get<NormalizeGPU<OutputType, InputType>>(); } TensorListView<StorageGPU, float> BroadcastMean(KernelContext &ctx, float value) const; AnyKernelInstance mean_kernel_, stddev_kernel_, normalize_kernel_; ScratchpadAllocator alloc_; }; DALI_REGISTER_OPERATOR(Normalize, Normalize<GPUBackend>, GPU); namespace { template <typename ToUpdate, typename Other> inline void MaxInPlace(ToUpdate &inout, const Other &other) { auto b1 = dali::begin(inout); auto b2 = dali::begin(other); auto e1 = dali::end(inout); auto e2 = dali::end(other); for (; b1 != e1 && b2 != e2; b1++, b2++) { if (*b1 < *b2) { *b1 = *b2; } } } using scratch_sizes_t = dali::kernels::scratch_sizes_t; class ScratchpadSnapshot { public: explicit ScratchpadSnapshot(PreallocatedScratchpad &scratch) : scratch_(scratch) { for (size_t i = 0; i < ss_.size(); i++) ss_[i] = scratch_.allocs[i].used(); } ~ScratchpadSnapshot() { restore(); } private: void restore() { scratch_.Clear(); // this doesn't clear the memory - just resets the usage counter to 0 for (size_t i = 0; i < ss_.size(); i++) scratch_.allocs[i].alloc(ss_[i]); } scratch_sizes_t ss_; PreallocatedScratchpad &scratch_; }; template <int ndim> int64_t MaxSampleSize(const TensorListShape<ndim> &tls) { int64_t max_sample_size = 0; for (int i = 0; i < tls.num_samples(); i++) { int64_t v = volume(tls.tensor_shape_span(i)); if (v > max_sample_size) max_sample_size = v; } return max_sample_size; } template <typename T> __global__ void Fill(T *data, size_t count, T value) { auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (i < count) data[i] = value; } } // namespace TensorListView<StorageGPU, float> Normalize<GPUBackend>::BroadcastMean(KernelContext &ctx, float value) const { TensorListView<StorageGPU, float> mean_gpu; mean_gpu.shape = param_shape_; mean_gpu.data.resize(param_shape_.num_samples()); // allocate enough memory to hold the largest sample... int64_t max_sample_size = MaxSampleSize(param_shape_); float *gpu_mean_data = ctx.scratchpad->AllocateGPU<float>(max_sample_size); int grid = div_ceil(max_sample_size, 1024); int block = std::min<int64_t>(max_sample_size, 1024); // ...fill it with given value... Fill<<<grid, block, 0, ctx.gpu.stream>>>(gpu_mean_data, max_sample_size, value); // ...and reuse the memory for all samples for (auto &ptr : mean_gpu.data) ptr = gpu_mean_data; return mean_gpu; } template <typename OutputType, typename InputType> void Normalize<GPUBackend>::SetupTyped(const DeviceWorkspace &ws) { auto &input = ws.InputRef<GPUBackend>(0); int nsamples = input.ntensor(); KernelContext ctx; ctx.gpu.stream = ws.stream(); ScratchpadEstimator se; int64_t param_volume = param_shape_.num_elements(); // estimate memory requirements for intermediate buffers if (!has_scalar_mean_) { se.add<mm::memory_kind::device, float>(param_volume); } else { if (ShouldCalcStdDev()) { // StdDev kernel requires the mean to have the same shape as the output. // We can save memory by broadcasting the mean only to the size of the largest sample // and repeat the pointer for all samples. se.add<mm::memory_kind::device, float>(MaxSampleSize(param_shape_)); } } if (!has_scalar_stddev_) { se.add<mm::memory_kind::device, float>(param_volume); } // setup and get memory requirements from kernels auto &norm = GetNormalizeKernel<OutputType, InputType>(); // if stddev is calculated internally, it's already inverse bool scale_is_stddev = !ShouldCalcStdDev(); auto req = norm.Setup(ctx, data_shape_, make_span(axes_), has_scalar_mean_, has_scalar_stddev_, scale_is_stddev); if (ShouldCalcMean()) { auto &mean = GetMeanKernel<float, InputType>(); auto mean_req = mean.Setup(ctx, data_shape_, make_span(axes_), true, batch_norm_); assert(mean_req.output_shapes[0] == param_shape_); MaxInPlace(req.scratch_sizes, mean_req.scratch_sizes); } if (ShouldCalcStdDev()) { auto &stddev = GetInvStdDevKernel<float, InputType>(); auto stddev_req = stddev.Setup(ctx, data_shape_, make_span(axes_), true, batch_norm_); assert(stddev_req.output_shapes[0] == param_shape_); MaxInPlace(req.scratch_sizes, stddev_req.scratch_sizes); } se.add<mm::memory_kind::host, char>( req.scratch_sizes[static_cast<int>(mm::memory_kind_id::host)], 64); se.add<mm::memory_kind::pinned, char>( req.scratch_sizes[static_cast<int>(mm::memory_kind_id::pinned)], 64); se.add<mm::memory_kind::device, char>( req.scratch_sizes[static_cast<int>(mm::memory_kind_id::device)], 64); se.add<mm::memory_kind::managed, char>( req.scratch_sizes[static_cast<int>(mm::memory_kind_id::managed)], 64); alloc_.Reserve(se.sizes); } template <typename OutputType, typename InputType> void Normalize<GPUBackend>::RunTyped(DeviceWorkspace &ws) { auto &input = ws.InputRef<GPUBackend>(0); TensorListView<StorageGPU, const InputType> in_view = view<const InputType>(input); auto &output = ws.OutputRef<GPUBackend>(0); TensorListView<StorageGPU, OutputType> out_view = view<OutputType>(output); output.SetLayout(input.GetLayout()); int nsamples = input.ntensor(); cudaStream_t stream = ws.stream(); PreallocatedScratchpad scratch = alloc_.GetScratchpad(); KernelContext ctx; ctx.scratchpad = &scratch; ctx.gpu.stream = stream; // Prepare mean and stddev float scalar_mean = has_scalar_mean_ ? spec_.GetArgument<float>("mean") : 0; float scalar_stddev = has_scalar_stddev_ ? spec_.GetArgument<float>("stddev") : 1; OutListGPU<float> mean_gpu, stddev_gpu; if (!has_scalar_mean_) { mean_gpu = scratch.AllocTensorList<mm::memory_kind::device, float>(param_shape_); } else if (ShouldCalcStdDev()) { mean_gpu = BroadcastMean(ctx, scalar_mean); } if (!has_scalar_stddev_) { stddev_gpu = scratch.AllocTensorList<mm::memory_kind::device, float>(param_shape_); } if (ShouldCalcMean()) { // We can't just Clear() the scratchpad to reuse it, because temporary buffers are also // stored there - so let's make a snapshot of current allocation state and restore it // after the kernel Run is done. ScratchpadSnapshot snap(scratch); auto &mean_kernel = GetMeanKernel<float, InputType>(); mean_kernel.Run(ctx, mean_gpu, in_view); } else if (has_tensor_mean_) { kernels::copy(mean_gpu, mean_input_, stream); } if (ShouldCalcStdDev()) { ScratchpadSnapshot snap(scratch); auto &stddev_kernel = GetInvStdDevKernel<float, InputType>(); stddev_kernel.Run(ctx, stddev_gpu, in_view, mean_gpu, degrees_of_freedom_, epsilon_); } else if (has_tensor_stddev_) { kernels::copy(stddev_gpu, stddev_input_, stream); } // finally, run the normalize kernel { ScratchpadSnapshot snap(scratch); auto &norm_kernel = GetNormalizeKernel<OutputType, InputType>(); // if stddev is calculated internally, epsilon has already been included float epsilon = ShouldCalcStdDev() ? 0 : epsilon_; if (has_scalar_mean_) { if (has_scalar_stddev_) { norm_kernel.Run(ctx, out_view, in_view, scalar_mean, scalar_stddev, scale_, shift_, epsilon); } else { norm_kernel.Run(ctx, out_view, in_view, scalar_mean, stddev_gpu, scale_, shift_, epsilon); } } else { if (has_scalar_stddev_) { norm_kernel.Run(ctx, out_view, in_view, mean_gpu, scalar_stddev, scale_, shift_, epsilon); } else { norm_kernel.Run(ctx, out_view, in_view, mean_gpu, stddev_gpu, scale_, shift_, epsilon); } } } } } // namespace dali
690553b88d4ad0837679ce9be37dd04e40a3ac4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> /* #include <sys/time.h> #include <sys/resource.h> double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; }*/ __global__ void multiply_no_shared( int global_array[] , int dim, const int c, const int tile_width) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx<dim) global_array[idx]*=c; } __global__ void multiply( int global_array[] , int dim, const int c, const int tile_width) { extern __shared__ int shared_a[]; int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx<dim){ shared_a[idx]=global_array[idx]; shared_a[idx]*=c; global_array[idx]=shared_a[idx]; } } int main(int argc, char *argv[]){ //Measure time clock_t time_begin; // pointers to host & device arrays int *device_array = 0; int *host_array = 0; unsigned int size_array=16; bool verbose=false; int tile_width =16; if(argc == 3){ size_array=atoi(argv[1]) ; tile_width=atoi(argv[2]); } else if(argc==4){ size_array=atoi(argv[1]); tile_width=atoi(argv[2]); verbose=(argv[3][0]=='v'); } // malloc a host array host_array = (int*)malloc( size_array * sizeof(int)); for(int i=0; i<size_array; i++){ host_array[i]=rand()%10; if(verbose) printf("%i\t", host_array[i]); } if(verbose) printf("\n"); // hipMalloc a device array hipMalloc(&device_array,size_array * sizeof(int)); // download and inspect the result on the host: hipMemcpy(device_array, host_array, sizeof(int)*size_array, hipMemcpyHostToDevice); dim3 bloque(tile_width, tile_width); dim3 grid((int)ceil(double((float)size_array)/double(bloque.x)), ceil(double((float)size_array)/double(bloque.y))); printf("%i threads per block, %i vector\n", tile_width*tile_width, size_array); int shared_mem=sizeof(int); time_begin=clock(); //time_begin=dwalltime(); hipLaunchKernelGGL(( multiply_no_shared), dim3(grid), dim3(bloque), 0, 0, device_array, size_array , 2, tile_width); hipDeviceSynchronize(); // download and inspect the result on the host: hipMemcpy(host_array, device_array, sizeof(int)*size_array, hipMemcpyDeviceToHost); //printf("GPU time without shared memory: %f seconds\n", dwalltime() - time_begin ); printf("GPU time without shared memory: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); for(int i=0; i<size_array; i++){ host_array[i]/=2; } hipMemcpy(device_array, host_array, sizeof(int)*size_array, hipMemcpyHostToDevice); time_begin=clock(); // time_begin=dwalltime(); hipLaunchKernelGGL(( multiply), dim3(grid), dim3(bloque), shared_mem, 0, device_array, size_array , 2, tile_width); hipDeviceSynchronize(); // download and inspect the result on the host: hipMemcpy(host_array, device_array, sizeof(int)*size_array, hipMemcpyDeviceToHost); //printf("GPU time with shared memory: %f seconds\n", dwalltime() - time_begin ); printf("GPU time with shared memory: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); if(verbose){ for(int i=0; i<size_array; i++) printf("%i\t", host_array[i]); } // deallocate memory free(host_array); hipFree(device_array); }
690553b88d4ad0837679ce9be37dd04e40a3ac4b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> /* #include <sys/time.h> #include <sys/resource.h> double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; }*/ __global__ void multiply_no_shared( int global_array[] , int dim, const int c, const int tile_width) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx<dim) global_array[idx]*=c; } __global__ void multiply( int global_array[] , int dim, const int c, const int tile_width) { extern __shared__ int shared_a[]; int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx<dim){ shared_a[idx]=global_array[idx]; shared_a[idx]*=c; global_array[idx]=shared_a[idx]; } } int main(int argc, char *argv[]){ //Measure time clock_t time_begin; // pointers to host & device arrays int *device_array = 0; int *host_array = 0; unsigned int size_array=16; bool verbose=false; int tile_width =16; if(argc == 3){ size_array=atoi(argv[1]) ; tile_width=atoi(argv[2]); } else if(argc==4){ size_array=atoi(argv[1]); tile_width=atoi(argv[2]); verbose=(argv[3][0]=='v'); } // malloc a host array host_array = (int*)malloc( size_array * sizeof(int)); for(int i=0; i<size_array; i++){ host_array[i]=rand()%10; if(verbose) printf("%i\t", host_array[i]); } if(verbose) printf("\n"); // cudaMalloc a device array cudaMalloc(&device_array,size_array * sizeof(int)); // download and inspect the result on the host: cudaMemcpy(device_array, host_array, sizeof(int)*size_array, cudaMemcpyHostToDevice); dim3 bloque(tile_width, tile_width); dim3 grid((int)ceil(double((float)size_array)/double(bloque.x)), ceil(double((float)size_array)/double(bloque.y))); printf("%i threads per block, %i vector\n", tile_width*tile_width, size_array); int shared_mem=sizeof(int); time_begin=clock(); //time_begin=dwalltime(); multiply_no_shared<<<grid, bloque>>>(device_array, size_array , 2, tile_width); cudaThreadSynchronize(); // download and inspect the result on the host: cudaMemcpy(host_array, device_array, sizeof(int)*size_array, cudaMemcpyDeviceToHost); //printf("GPU time without shared memory: %f seconds\n", dwalltime() - time_begin ); printf("GPU time without shared memory: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); for(int i=0; i<size_array; i++){ host_array[i]/=2; } cudaMemcpy(device_array, host_array, sizeof(int)*size_array, cudaMemcpyHostToDevice); time_begin=clock(); // time_begin=dwalltime(); multiply<<<grid, bloque, shared_mem>>>(device_array, size_array , 2, tile_width); cudaThreadSynchronize(); // download and inspect the result on the host: cudaMemcpy(host_array, device_array, sizeof(int)*size_array, cudaMemcpyDeviceToHost); //printf("GPU time with shared memory: %f seconds\n", dwalltime() - time_begin ); printf("GPU time with shared memory: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); if(verbose){ for(int i=0; i<size_array; i++) printf("%i\t", host_array[i]); } // deallocate memory free(host_array); cudaFree(device_array); }
66754879b44e70f6a3d9b2ec28dac12eea8b913d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_tools.h" __global__ void reduce_fe_chisqs ( double *chisqs, // Bufor z chisq const size_t * filtered_sizes, // Ilo znaczcych elementw po filtracji const size_t size // Ilo chisq i filtered_sizes ) { // gid0 - numer elementu z chisqs (czyli jednego chisq) const uint gid0 = blockDim.x * blockIdx.x + threadIdx.x; if(gid0 >= size) return; chisqs[gid0] = chisqs[gid0] / ((double)filtered_sizes[gid0] - 1.0); } extern "C" void reduceFeChisq( double *h_chisq, const size_t *h_sizes, const size_t width ) { // allocating kernel data double *d_chisq; size_t *d_sizes; const size_t double_vector_size = width * sizeof(double); const size_t size_t_vector_size = width * sizeof(size_t); checkCudaErrors(hipMalloc((void**)&d_chisq, double_vector_size)); checkCudaErrors(hipMalloc((void**)&d_sizes, size_t_vector_size)); // copying data checkCudaErrors(hipMemcpy(d_chisq, h_chisq, double_vector_size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_sizes, h_sizes, size_t_vector_size, hipMemcpyHostToDevice)); // instatiating kernel const size_t threadsPerBlock = BLOCK_DIM; const size_t blocksPerGrid = width / threadsPerBlock; // calling kernel hipLaunchKernelGGL(( reduce_fe_chisqs), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_chisq, d_sizes, width); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //copying memory back checkCudaErrors(hipMemcpy(h_chisq, d_chisq, double_vector_size, hipMemcpyDeviceToHost)); // free memory hipFree(d_chisq); hipFree(d_sizes); }
66754879b44e70f6a3d9b2ec28dac12eea8b913d.cu
#include "cuda_tools.h" __global__ void reduce_fe_chisqs ( double *chisqs, // Bufor z chisq const size_t * filtered_sizes, // Ilość znaczących elementów po filtracji const size_t size // Ilość chisq i filtered_sizes ) { // gid0 - numer elementu z chisqs (czyli jednego chisq) const uint gid0 = blockDim.x * blockIdx.x + threadIdx.x; if(gid0 >= size) return; chisqs[gid0] = chisqs[gid0] / ((double)filtered_sizes[gid0] - 1.0); } extern "C" void reduceFeChisq( double *h_chisq, const size_t *h_sizes, const size_t width ) { // allocating kernel data double *d_chisq; size_t *d_sizes; const size_t double_vector_size = width * sizeof(double); const size_t size_t_vector_size = width * sizeof(size_t); checkCudaErrors(cudaMalloc((void**)&d_chisq, double_vector_size)); checkCudaErrors(cudaMalloc((void**)&d_sizes, size_t_vector_size)); // copying data checkCudaErrors(cudaMemcpy(d_chisq, h_chisq, double_vector_size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_sizes, h_sizes, size_t_vector_size, cudaMemcpyHostToDevice)); // instatiating kernel const size_t threadsPerBlock = BLOCK_DIM; const size_t blocksPerGrid = width / threadsPerBlock; // calling kernel reduce_fe_chisqs<<<blocksPerGrid, threadsPerBlock>>>(d_chisq, d_sizes, width); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //copying memory back checkCudaErrors(cudaMemcpy(h_chisq, d_chisq, double_vector_size, cudaMemcpyDeviceToHost)); // free memory cudaFree(d_chisq); cudaFree(d_sizes); }
4b1ee12c1c8a814910cfeb6a52670dc5816891da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020 NVIDIA CORPORATION. * Copyright (c) 2018-2020 Chris Choy (chrischoy@ai.stanford.edu) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "coordinate_map_functors.cuh" #include "coordinate_map_gpu.cuh" #include "gpu.cuh" #include "kernel_map.cuh" #include "kernel_map.hpp" #include "sharedmem.cuh" #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/sort.h> namespace minkowski { namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void remap_inverse_map(map_type __restrict__ map, // coordinate_type const *__restrict__ coordinates, // index_type *__restrict__ inverse_map, // size_type const num_threads, // size_type const coordinate_size // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { auto result = map.find( coordinate<coordinate_type>{&coordinates[x * coordinate_size]}); inverse_map[x] = result->second; } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void insert_and_map_kernel(map_type __restrict__ map, // coordinate_type const *__restrict__ coordinates, // index_type *__restrict__ valid_map_index, // index_type *__restrict__ valid_row_index, // size_type const num_threads, // size_type const coordinate_size, // index_type const unused_key) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { // Returns pair<iterator, (bool)insert_success> auto const result = map.insert(thrust::make_pair( coordinate<coordinate_type>{&coordinates[x * coordinate_size]}, x)); // auto test = &coordinates[x * coordinate_size]; if (result.second) { valid_row_index[x] = x; // success map index. remove failed insertion with success. valid_map_index[x] = result.first.offset(); } else { valid_map_index[x] = unused_key; } } } } // namespace detail /* * Field Map */ namespace detail { template <typename coordinate_field_type, typename coordinate_int_type, typename index_type, bool stride_one> __global__ void quantize_coordinates_kernel( coordinate_field_type const *__restrict__ p_tfield, // coordinate_int_type *__restrict__ p_stensor, // index_type const *__restrict__ p_tensor_stride, // index_type const num_threads, index_type const coordinate_size) { // coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type) // + THREADS * coordinate_size * sizeof(coordinate_type) extern __shared__ index_type sh_tensor_stride[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (stride_one) { if (x < num_threads) { if (x % coordinate_size == 0) p_stensor[x] = lrint(p_tfield[x]); else p_stensor[x] = floor(p_tfield[x]); } } else { for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = p_tensor_stride[i]; } __syncthreads(); if (x < num_threads) { // batch index if (x % coordinate_size == 0) p_stensor[x] = lrint(p_tfield[x]); else { index_type curr_tensor_stride = sh_tensor_stride[((x - 1) % coordinate_size)]; p_stensor[x] = floor(p_tfield[x] / curr_tensor_stride) * curr_tensor_stride; } } } } } // namespace detail template <typename coordinate_field_type, typename coordinate_int_type, template <typename T> class TemplatedAllocator> void CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type, TemplatedAllocator>:: quantize_coordinates(coordinate_int_type *d_dst_coordinates, stride_type const &tensor_stride) const { int64_t const stride_prod = std::accumulate( tensor_stride.begin(), tensor_stride.end(), 1, std::multiplies<>()); // Copy tensor_stride to device index_type *d_tensor_stride = reinterpret_cast<index_type *>( m_byte_allocator.allocate(m_coordinate_size * sizeof(index_type))); CUDA_CHECK(hipMemcpy( d_tensor_stride, // dst tensor_stride.data(), // first element of the dereferenced iter. sizeof(index_type) * m_coordinate_size, // bytes hipMemcpyHostToDevice)); size_type const num_threads = size() * m_coordinate_size; auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS); if (stride_prod == 1) { hipLaunchKernelGGL(( detail::quantize_coordinates_kernel<coordinate_field_type, coordinate_int_type, index_type, true>) , dim3(num_blocks), dim3(CUDA_NUM_THREADS), m_coordinate_size * sizeof(index_type), 0, const_coordinate_data(), d_dst_coordinates, d_tensor_stride, num_threads, m_coordinate_size); } else { hipLaunchKernelGGL(( detail::quantize_coordinates_kernel<coordinate_field_type, coordinate_int_type, index_type, false>) , dim3(num_blocks), dim3(CUDA_NUM_THREADS), m_coordinate_size * sizeof(index_type), 0, const_coordinate_data(), d_dst_coordinates, d_tensor_stride, num_threads, m_coordinate_size); } } /* * @brief Given a key iterator begin-end pair and a value iterator begin-end * pair, insert all elements. * * @note The key and value iterators can be 1) pointers, 2) coordinate or vector * iterators. * * @return none */ template <typename coordinate_type, template <typename T> class TemplatedAllocator> template <bool remap> void CoordinateMapGPU<coordinate_type, TemplatedAllocator>::insert( coordinate_iterator<coordinate_type> key_first, coordinate_iterator<coordinate_type> key_last) { size_type const N = key_last - key_first; LOG_DEBUG("key iterator length", N); if (N == 0) { m_size = 0; return; } m_valid_row_index.allocate(N); m_valid_map_index.allocate(N); // Copy the coordinates to m_coordinate base_type::reserve(N); CUDA_CHECK( hipMemcpy(coordinate_data(), // dst key_first->data(), // first element of the dereferenced iter. sizeof(coordinate_type) * N * m_coordinate_size, // bytes hipMemcpyDeviceToDevice)); CUDA_CHECK(hipDeviceSynchronize()); LOG_DEBUG("Reserved and copiedm", N, "x", m_coordinate_size, "coordinates"); // compute cuda kernel call params size_type const num_threads = N; LOG_DEBUG("nm_threads", num_threads); size_type const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS); LOG_DEBUG("nm_blocks", num_blocks); index_type const unused_key = std::numeric_limits<index_type>::max(); LOG_DEBUG("unused_key", unused_key); hipLaunchKernelGGL(( detail::insert_and_map_kernel<coordinate_type, size_type, index_type, map_type>), dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0, *m_map, // const_coordinate_data(), // m_valid_map_index.data(), // m_valid_row_index.data(), // num_threads, m_coordinate_size, unused_key); CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("Map size:", m_map->size()); // Valid row index auto valid_begin = thrust::make_zip_iterator( thrust::make_tuple(m_valid_map_index.begin(), m_valid_row_index.begin())); size_type const number_of_valid = thrust::remove_if(thrust::device, valid_begin, thrust::make_zip_iterator(thrust::make_tuple( m_valid_map_index.end(), m_valid_row_index.end())), detail::is_first<index_type>(unused_key)) - valid_begin; m_valid_row_index.resize(number_of_valid); m_valid_map_index.resize(number_of_valid); m_size = number_of_valid; LOG_DEBUG("Number of successful insertion", m_size); if (remap // When remapping && number_of_valid != N // when the # of inserted items differ from the # // of successful insertions ) { m_inverse_row_index.allocate(N); thrust::counting_iterator<uint32_t> count_begin{0}; thrust::for_each(count_begin, count_begin + number_of_valid, detail::update_value_with_offset<index_type, map_type>{ *m_map, m_valid_map_index.data()}); size_type const num_threads = N; auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS); hipLaunchKernelGGL(( detail::remap_inverse_map<coordinate_type, size_type, index_type, map_type>) , dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0, *m_map, // const_coordinate_data(), // m_inverse_row_index.data(), // num_threads, m_coordinate_size); LOG_DEBUG("Remapping finished"); } } // namespace minkowski template <typename coordinate_type, template <typename T> class TemplatedAllocator> template <bool remap> std::pair<gpu_storage<default_types::index_type, TemplatedAllocator<char>>, gpu_storage<default_types::index_type, TemplatedAllocator<char>>> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::insert_and_map( coordinate_iterator<coordinate_type> key_first, coordinate_iterator<coordinate_type> key_last) { LOG_DEBUG("insert_and_map"); insert<remap>(key_first, key_last); return std::make_pair(m_valid_row_index, m_inverse_row_index); } template <typename coordinate_type, template <typename T> class TemplatedAllocator> void CoordinateMapGPU<coordinate_type, TemplatedAllocator>:: initialize_valid_indices(size_t const N_unique) { m_valid_row_index.resize(N_unique); m_valid_map_index.resize(N_unique); m_size = N_unique; // Insert coordinates auto insert = detail::insert_coordinate<coordinate_type, map_type, index_type *>{ *m_map, // map const_coordinate_data(), // coordinates, m_valid_row_index.data(), // valid row m_valid_map_index.data(), // iter offset m_coordinate_size}; thrust::counting_iterator<uint32_t> count_begin{0}; thrust::for_each(thrust::device, count_begin, count_begin + N_unique, insert); } /* * @brief given a key iterator begin-end pair find all valid keys and its * index. * * @return a pair of (valid index, query value) vectors. */ template <typename coordinate_type, template <typename T> class TemplatedAllocator> std::pair<gpu_storage<default_types::index_type, TemplatedAllocator<char>>, gpu_storage<default_types::index_type, TemplatedAllocator<char>>> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::find( coordinate_iterator<coordinate_type> key_first, coordinate_iterator<coordinate_type> key_last) const { size_type N = key_last - key_first; LOG_DEBUG(N, "queries for find."); auto const find_functor = detail::find_coordinate<coordinate_type, map_type>( *m_map, key_first->data(), m_unused_element, m_coordinate_size); LOG_DEBUG("Find functor initialized."); auto const invalid_functor = detail::is_unused_pair<coordinate_type, mapped_type>(m_unused_element); LOG_DEBUG("Valid functor initialized."); thrust::counting_iterator<index_type> index{0}; gpu_storage<index_type, byte_allocator_type> input_index(N); gpu_storage<index_type, byte_allocator_type> results(N); LOG_DEBUG("Initialized functors."); thrust::sequence(thrust::device, input_index.begin(), input_index.end()); thrust::transform(thrust::device, index, index + N, results.begin(), find_functor); size_type const number_of_valid = thrust::remove_if(thrust::device, thrust::make_zip_iterator(thrust::make_tuple( input_index.begin(), results.begin())), thrust::make_zip_iterator(thrust::make_tuple( input_index.end(), results.end())), invalid_functor) - thrust::make_zip_iterator( thrust::make_tuple(input_index.begin(), results.begin())); LOG_DEBUG("Number of valid", number_of_valid); input_index.resize(number_of_valid); results.resize(number_of_valid); return std::make_pair(input_index, results); } namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type> __global__ void stride_copy(coordinate_type const *__restrict__ src_coordinates, // index_type const *__restrict__ src_valid_row_index, // index_type const *__restrict__ stride, // coordinate_type *__restrict__ dst_coordinates, // size_type const num_threads, size_type const coordinate_size) { extern __shared__ size_type sh_stride[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) sh_stride[i] = stride[i]; __syncthreads(); if (x < num_threads) { const index_type src_start = src_valid_row_index[x] * coordinate_size; const index_type dst_start = x * coordinate_size; dst_coordinates[dst_start] = src_coordinates[src_start]; for (index_type j = 1; j < coordinate_size; ++j) { dst_coordinates[dst_start + j] = (__float2int_rd( __fdiv_rd(src_coordinates[src_start + j], sh_stride[j - 1]))) * sh_stride[j - 1]; // (__double2int_rd( // __ddiv_rn(src_coordinates[src_start + j], sh_stride[j - 1]))) * // sh_stride[j - 1]; } } } } // namespace detail /* * @brief given a key iterator begin-end pair find all valid keys and its * index. * * @return a pair of (valid index, query value) vectors. */ template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride( stride_type const &stride) const { // Over estimate the reserve size to be size(); size_type const N = size(); LOG_DEBUG("Strided map with kernel stride:", stride); self_type stride_map( N, m_coordinate_size, m_hashtable_occupancy, detail::stride_tensor_stride(base_type::m_tensor_stride, stride), m_map_allocator, base_type::m_byte_allocator); // stride coordinates size_type const num_threads = N; auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS); hipLaunchKernelGGL(( detail::stride_copy<coordinate_type, size_type, index_type>) , dim3(num_blocks), dim3(CUDA_NUM_THREADS), m_coordinate_size * sizeof(size_type), 0, const_coordinate_data(), // m_valid_row_index.cbegin(), // m_device_tensor_stride.cbegin(), // stride_map.coordinate_data(), // num_threads, m_coordinate_size); LOG_DEBUG("Stride copy done."); auto &stride_valid_row_index = stride_map.m_valid_row_index; auto &stride_valid_map_index = stride_map.m_valid_map_index; stride_valid_row_index.resize(N); // row indices stride_valid_map_index.resize(N); // map offset // Insert coordinates index_type const unused_key = std::numeric_limits<index_type>::max(); LOG_DEBUG("unused_key", unused_key); hipLaunchKernelGGL(( detail::insert_and_map_kernel<coordinate_type, size_type, index_type, map_type>), dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0, *stride_map.m_map, // stride_map.const_coordinate_data(), // stride_valid_map_index.data(), // stride_valid_row_index.data(), // num_threads, m_coordinate_size, unused_key); CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("Stride map size:", m_map->size()); // Valid row index auto valid_begin = thrust::make_zip_iterator( thrust::make_tuple(stride_valid_map_index.begin(), // stride_valid_row_index.begin())); size_type const number_of_valid = thrust::remove_if(thrust::device, // valid_begin, // thrust::make_zip_iterator( thrust::make_tuple(stride_valid_map_index.end(), // stride_valid_row_index.end())), detail::is_first<index_type>(unused_key)) - valid_begin; stride_valid_row_index.resize(number_of_valid); stride_valid_map_index.resize(number_of_valid); stride_map.m_size = number_of_valid; LOG_DEBUG("Reduced to", number_of_valid); // remap values thrust::counting_iterator<uint32_t> count_begin{0}; thrust::for_each(count_begin, count_begin + number_of_valid, detail::update_value_with_offset<index_type, map_type>{ *stride_map.m_map, stride_map.m_valid_map_index.data()}); LOG_DEBUG("Stride remap done"); return stride_map; } namespace detail { template <typename coordinate_type, typename index_type> __device__ bool is_coordinate_aligned(coordinate_type *point, index_type *out_tensor_stride, uint32_t const size) { for (uint32_t i = 0; i < size - 1; ++i) { if (point[i + 1] % out_tensor_stride[i] != 0) return false; } return true; } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void kernel_region_insert( size_type const num_threads, // map_type __restrict__ out_map, // coordinate_type const *const __restrict__ p_in_coordinates, // index_type const *const __restrict__ in_valid_row_index, // coordinate_type *__restrict__ p_out_coordinates, // index_type *__restrict__ out_valid_row_index, // index_type *__restrict__ out_valid_map_index, // gpu_kernel_region<coordinate_type> kernel, // size_type const *const __restrict__ out_tensor_stride, // index_type const unused_key) { // extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; size_type const coordinate_size = kernel.coordinate_size(); size_type const volume = kernel.volume(); // clang-format off size_type *sh_size = reinterpret_cast<size_type *>(sh_all); size_type *sh_tensor_stride = sh_size; size_type *sh_kernel_size = sh_tensor_stride + coordinate_size; size_type *sh_dilation = sh_kernel_size + coordinate_size; size_type *sh_out_tensor_stride = sh_dilation + coordinate_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_out_tensor_stride + coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; // clang-format on for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = kernel.tensor_stride()[i]; sh_kernel_size[i] = kernel.kernel_size()[i]; sh_dilation[i] = kernel.dilation()[i]; sh_out_tensor_stride[i] = out_tensor_stride[i]; } __syncthreads(); auto sh_kernel = gpu_kernel_region<coordinate_type>( kernel, sh_tensor_stride, sh_kernel_size, sh_dilation); coordinate<coordinate_type> curr_coordinate(sh_tmp); if (x < num_threads) { // iterate over values index_type out_index = x * volume; // set bounds for the valid keys for (uint32_t kernel_ind = 0; kernel_ind < volume; ++kernel_ind) { sh_kernel.coordinate_at( kernel_ind, &p_in_coordinates[in_valid_row_index[x] * coordinate_size], sh_tmp); // Creating generative conv transpose if (kernel.is_transpose()) { // initialize out coordinate for (uint32_t i = 0; i < coordinate_size; ++i) p_out_coordinates[out_index * coordinate_size + i] = curr_coordinate[i]; auto const result = out_map.insert(thrust::make_pair( coordinate<coordinate_type>{ &p_out_coordinates[out_index * coordinate_size]}, out_index)); if (result.second) { // row index in the out_coordinates out_valid_row_index[out_index] = out_index; // offset in the coordinate map out_valid_map_index[out_index] = result.first.offset(); } else { out_valid_row_index[out_index] = unused_key; } ++out_index; } else { // skip if the coordinate is not aligned if (!is_coordinate_aligned(sh_tmp, sh_out_tensor_stride, coordinate_size)) { out_valid_row_index[out_index] = unused_key; ++out_index; } else { // initialize out coordinate for (uint32_t i = 0; i < coordinate_size; ++i) p_out_coordinates[out_index * coordinate_size + i] = curr_coordinate[i]; auto const result = out_map.insert(thrust::make_pair( coordinate<coordinate_type>{ &p_out_coordinates[out_index * coordinate_size]}, out_index)); if (result.second) { // row index in the out_coordinates out_valid_row_index[out_index] = out_index; // offset in the coordinate map out_valid_map_index[out_index] = result.first.offset(); } else { out_valid_row_index[out_index] = unused_key; } ++out_index; } } } } } } // namespace detail /* * @brief generate a region strided coordinate map * * @return a gpu_coordinate_map */ template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride_region( cpu_kernel_region<coordinate_type> &kernel, stride_type const &out_tensor_stride) const { ASSERT(m_coordinate_size == kernel.coordinate_size(), "Invalid kernel coordinate_size"); gpu_kernel_region<coordinate_type> gpu_kernel(kernel.to_gpu()); // Over estimate the reserve size to be size(); size_type const N_in = size(); size_type const N_out = N_in * kernel.volume(); LOG_DEBUG("Stride region out tensor stride:", out_tensor_stride, "with capacity:", N_out); self_type stride_map(N_out, m_coordinate_size, m_hashtable_occupancy, out_tensor_stride, m_map_allocator, base_type::m_byte_allocator); index_storage_type d_out_tensor_stride(out_tensor_stride); auto &out_valid_row_index = stride_map.m_valid_row_index; auto &out_valid_map_index = stride_map.m_valid_map_index; out_valid_row_index.resize(N_out); out_valid_map_index.resize(N_out); index_type const unused_key = std::numeric_limits<index_type>::max(); // (THREAD * D + 3 * D) * 4 uint32_t const shared_memory_size_in_bytes = 4 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation CUDA_NUM_THREADS * m_coordinate_size * sizeof(coordinate_type); // tmp hipLaunchKernelGGL(( detail::kernel_region_insert<coordinate_type, size_type, index_type, map_type>) , dim3(GET_BLOCKS(N_in, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), shared_memory_size_in_bytes, 0, N_in, // *stride_map.m_map, // const_coordinate_data(), // m_valid_row_index.cbegin(), // stride_map.coordinate_data(), // out_valid_row_index.data(), // out_valid_map_index.data(), // gpu_kernel, // d_out_tensor_stride.cbegin(), // unused_key); // CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("kernel_region_insert done"); // LOG_DEBUG("valid row index", out_valid_row_index); // LOG_DEBUG("valid map offset", out_valid_map_index); // remove unused_keys auto valid_begin = thrust::make_zip_iterator( thrust::make_tuple(out_valid_row_index.begin(), // out_valid_map_index.begin())); size_type const number_of_valid = thrust::remove_if(thrust::device, // valid_begin, // thrust::make_zip_iterator( thrust::make_tuple(out_valid_row_index.end(), // out_valid_map_index.end())), detail::is_first<index_type>(unused_key)) - valid_begin; out_valid_row_index.resize(number_of_valid); out_valid_map_index.resize(number_of_valid); stride_map.m_size = number_of_valid; LOG_DEBUG("Reduced to", number_of_valid); // remap values thrust::counting_iterator<index_type> count_begin{0}; thrust::for_each(count_begin, count_begin + number_of_valid, detail::update_value_with_offset<index_type, map_type>{ *stride_map.m_map, out_valid_map_index.data()}); LOG_DEBUG("Stride remap done"); return stride_map; } namespace detail { template <typename dst_coordinate_type, typename src_coordinate_type, typename size_type, typename index_type, bool stride_src> __global__ void copy_column_with_valid( dst_coordinate_type *__restrict__ dst_coordinates, // size_type const num_threads, // src_coordinate_type const *__restrict__ src_coordinates, // index_type const *__restrict__ src_valid_row_index, // size_type const coordinate_size) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { if (stride_src) dst_coordinates[x] = src_coordinates[src_valid_row_index[x] * coordinate_size]; else dst_coordinates[x * coordinate_size] = src_coordinates[src_valid_row_index[x]]; } } template <typename dst_coordinate_type, typename src_coordinate_type, typename size_type, bool stride_src> __global__ void copy_column(dst_coordinate_type *__restrict__ dst_coordinates, // size_type const num_threads, // src_coordinate_type const *__restrict__ src_coordinates, // size_type const coordinate_size) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { if (stride_src) dst_coordinates[x] = src_coordinates[x * coordinate_size]; else dst_coordinates[x * coordinate_size] = src_coordinates[x]; } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::origin() const { size_type const N = size(); LOG_DEBUG("Origin map from in map size:", N); // tensor stride is set to {0,..., 0} for the origin map. stride_type origin_tensor_stride(m_coordinate_size - 1); std::for_each(origin_tensor_stride.begin(), origin_tensor_stride.end(), [](auto &i) { i = 0; }); // thrust unique for unique batch index coordinate_type *d_batch_indices = reinterpret_cast<coordinate_type *>( m_byte_allocator.allocate(N * sizeof(coordinate_type))); hipLaunchKernelGGL(( detail::copy_column_with_valid<coordinate_type, coordinate_type, size_type, index_type, true>) , dim3(GET_BLOCKS(N, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0, d_batch_indices, N, const_coordinate_data(), m_valid_row_index.cbegin(), m_coordinate_size); #ifdef DEBUG CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("copied batch indices"); #endif // Sort and unique thrust::sort(thrust::device, d_batch_indices, d_batch_indices + N); #ifdef DEBUG CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("sorted batch indices"); #endif auto d_batch_indices_end = thrust::unique(thrust::device, d_batch_indices, d_batch_indices + N); size_type const N_unique = d_batch_indices_end - d_batch_indices; #ifdef DEBUG size_t Nsize = std::min<int>(N_unique, 100); std::vector<coordinate_type> tmp(Nsize); CUDA_CHECK(hipMemcpy(tmp.data(), d_batch_indices, Nsize * sizeof(coordinate_type), hipMemcpyDeviceToHost)); LOG_DEBUG("sort and unique batch", tmp); CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("unique done"); #endif // Create origin map LOG_DEBUG("Origin map with size:", N_unique, " tensor stride:", origin_tensor_stride); self_type origin_map(N_unique, m_coordinate_size, m_hashtable_occupancy, origin_tensor_stride, m_map_allocator, base_type::m_byte_allocator); CUDA_CHECK( hipMemset(origin_map.coordinate_data(), 0, N_unique * m_coordinate_size * sizeof(coordinate_type))); hipLaunchKernelGGL(( detail::copy_column<coordinate_type, coordinate_type, size_type, false>) , dim3(GET_BLOCKS(N_unique, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0, origin_map.coordinate_data(), N_unique, d_batch_indices, m_coordinate_size); #ifdef DEBUG CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("copied batch indices to the origin_map"); #endif auto &origin_valid_row_index = origin_map.m_valid_row_index; auto &origin_valid_map_index = origin_map.m_valid_map_index; origin_valid_row_index.resize(N_unique); origin_valid_map_index.resize(N_unique); origin_map.m_size = N_unique; // Insert coordinates auto insert = detail::insert_coordinate<coordinate_type, map_type, index_type *>{ *origin_map.m_map, // map origin_map.const_coordinate_data(), // coordinates, origin_valid_row_index.data(), // valid row origin_valid_map_index.data(), // iter offset m_coordinate_size}; thrust::counting_iterator<uint32_t> count_begin{0}; thrust::for_each(thrust::device, count_begin, count_begin + N_unique, insert); #ifdef DEBUG CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("origin map insertion"); #endif m_byte_allocator.deallocate((char *)d_batch_indices, N * sizeof(coordinate_type)); return origin_map; } template <typename coordinate_type, typename coordinate_int_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_int_type, TemplatedAllocator> CoordinateFieldMapGPU<coordinate_type, coordinate_int_type, TemplatedAllocator>::origin() const { size_type const N = size(); LOG_DEBUG("Origin map from in map size:", N); // tensor stride is set to {0,..., 0} for the origin map. stride_type origin_tensor_stride(m_coordinate_size - 1); std::for_each(origin_tensor_stride.begin(), origin_tensor_stride.end(), [](auto &i) { i = 0; }); // thrust unique for unique batch index coordinate_int_type *d_batch_indices = reinterpret_cast<coordinate_int_type *>( m_byte_allocator.allocate(N * sizeof(coordinate_int_type))); hipLaunchKernelGGL(( detail::copy_column<coordinate_int_type, coordinate_type, size_type, true>) , dim3(GET_BLOCKS(N, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0, d_batch_indices, N, const_coordinate_data(), m_coordinate_size); // Sort and unique thrust::sort(thrust::device, d_batch_indices, d_batch_indices + N); auto d_batch_indices_end = thrust::unique(thrust::device, d_batch_indices, d_batch_indices + N); size_type const N_unique = d_batch_indices_end - d_batch_indices; // Create origin map LOG_DEBUG("Origin map with size:", N_unique, " tensor stride:", origin_tensor_stride); CoordinateMapGPU<coordinate_int_type, TemplatedAllocator> origin_map( N_unique, m_coordinate_size, 50, origin_tensor_stride); CUDA_CHECK( hipMemset(origin_map.coordinate_data(), 0, N_unique * m_coordinate_size * sizeof(coordinate_int_type))); hipLaunchKernelGGL(( detail::copy_column<coordinate_int_type, coordinate_int_type, size_type, false>) , dim3(GET_BLOCKS(N_unique, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0, origin_map.coordinate_data(), N_unique, d_batch_indices, m_coordinate_size); m_byte_allocator.deallocate((char *)d_batch_indices, N * sizeof(coordinate_type)); origin_map.initialize_valid_indices(N_unique); return origin_map; } namespace detail { template <typename coordinate_field_type, // typename coordinate_int_type, // typename size_type, // typename index_type, // typename map_type> __global__ void origin_field_map_kernel( size_type const num_threads, // coordinate_field_type const *__restrict__ d_field_coords, // map_type const __restrict__ origin_map, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, // index_type *__restrict__ p_kernels, // size_type const coordinate_size) { extern __shared__ coordinate_int_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; // clang-format off coordinate_int_type *sh_tmp = sh_all + tx * coordinate_size; // clang-format on if (x < num_threads) for (index_type i = 0; i < coordinate_size; ++i) sh_tmp[i] = 0; __syncthreads(); if (x < num_threads) { sh_tmp[0] = coordinate_int_type(lroundf(d_field_coords[x * coordinate_size])); auto origin_iter = origin_map.find(coordinate<coordinate_int_type>(sh_tmp)); auto out_index = origin_iter->second; p_in_maps[x] = x; p_out_maps[x] = out_index; // origin_map row index // For kernel_map decompose() p_kernels[x] = out_index; } } } // namespace detail template <typename coordinate_field_type, typename coordinate_int_type, template <typename T> class TemplatedAllocator> CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type, TemplatedAllocator>::kernel_map_type CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type, TemplatedAllocator>:: origin_map(CoordinateMapGPU<coordinate_int_type, TemplatedAllocator> const &origin_map, uint32_t thread_dim) const { ASSERT(std::all_of(origin_map.get_tensor_stride().begin(), origin_map.get_tensor_stride().end(), [](auto const &i) { return i == 0; }), "Invalid origin tensor stride", origin_map.get_tensor_stride()); // reserve size(); size_type const in_size = size(); LOG_DEBUG("in_map size:", in_size, "origin_map size:", origin_map.size()); // (THREAD * D) * 4 uint32_t const shared_memory_size_in_bytes = thread_dim * m_coordinate_size * sizeof(coordinate_int_type); // tmp size_type const num_threads = in_size; auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); LOG_DEBUG("origin_map num block", num_blocks); LOG_DEBUG("origin_map shared_memory size", shared_memory_size_in_bytes); LOG_DEBUG("origin_map threads dim", thread_dim); LOG_DEBUG("origin_map num threads", num_threads); kernel_map_type kernel_map(in_size, base_type::m_byte_allocator); CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("Allocated kernel_map."); hipLaunchKernelGGL(( detail::origin_field_map_kernel<coordinate_field_type, coordinate_int_type, size_type, index_type, int_hash_map_type>) , dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0, num_threads, // const_coordinate_data(), // origin_map.const_hash_map(), // kernel_map.in_maps.begin(), // kernel_map.out_maps.begin(), // kernel_map.kernels.begin(), // m_coordinate_size); CUDA_CHECK(hipStreamSynchronize(0)); kernel_map.decompose(); LOG_DEBUG("origin map decomposed"); return kernel_map; } namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void prune_copy_and_insert( size_type const num_threads, // size_type const coordinate_size, // index_type const unused_map_offset, // index_type const *const __restrict__ in_valid_row_index, // coordinate_type const *const __restrict__ in_coordinates, // bool const *const __restrict__ keep_begin, // index_type const *const __restrict__ inclusive_scan_keep, // map_type __restrict__ out_map, // coordinate_type *__restrict__ out_coordinates, // index_type *__restrict__ out_valid_row_index, // index_type *__restrict__ out_valid_map_offset // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { if (!keep_begin[x]) { out_valid_map_offset[x] = unused_map_offset; } else { // If keep, auto out_row_index = (x < 1) ? 0 : inclusive_scan_keep[x - 1]; coordinate_type const *curr_in_coord = &in_coordinates[in_valid_row_index[x] * coordinate_size]; coordinate_type *curr_out_coord = &out_coordinates[out_row_index * coordinate_size]; for (index_type i = 0; i < coordinate_size; ++i) curr_out_coord[i] = curr_in_coord[i]; // insert to the out_map auto coord = coordinate<coordinate_type>{curr_out_coord}; // remap the value in the next kernel call auto result = out_map.insert(thrust::make_pair(coord, 0)); out_valid_row_index[x] = out_row_index; if (result.second) out_valid_map_offset[x] = result.first.offset(); else out_valid_map_offset[x] = unused_map_offset; } } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void remap(size_type const num_threads, // map_type const __restrict__ out_map, // index_type *__restrict__ out_valid_map_offset // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { auto &pair = out_map.data()[out_valid_map_offset[x]]; pair.second = x; } } template <typename Dtype, typename Stype> __global__ void typed_copy(uint32_t const num_threads, // Dtype *__restrict__ dst, // Stype const *__restrict__ src // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { dst[x] = src[x]; } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::prune( bool const *keep_begin, bool const *keep_end) const { size_type const N = size(); ASSERT(N == keep_end - keep_begin, "Invalid keep size"); LOG_DEBUG("Prune size:", N); // exclusive sum for coordinate copy. auto const inclusive_scan_size = N * sizeof(index_type); index_type *d_inclusive_scan = (index_type *)m_byte_allocator.allocate(inclusive_scan_size); // bool -> index_type hipLaunchKernelGGL(( detail::typed_copy), dim3(GET_BLOCKS(N, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0, N, d_inclusive_scan, keep_begin); CUDA_CHECK(hipStreamSynchronize(0)); thrust::inclusive_scan(thrust::device, d_inclusive_scan, d_inclusive_scan + N, d_inclusive_scan); index_type N_pruned; CUDA_CHECK(hipMemcpy(&N_pruned, d_inclusive_scan + N - 1, sizeof(index_type), hipMemcpyDeviceToHost)); LOG_DEBUG("Pruned N:", N_pruned); // create a coordinate_map self_type pruned_map(N, m_coordinate_size, m_hashtable_occupancy, base_type::m_tensor_stride, m_map_allocator, base_type::m_byte_allocator); // Copy and insert kernel that first checks keep[i] is true and insert at // inclusive_scan[i - 1]. auto &out_valid_map_offset = pruned_map.m_valid_map_index; auto &out_valid_row_index = pruned_map.m_valid_row_index; out_valid_map_offset.resize(N); out_valid_row_index.resize(N); index_type const unused_map_offset = std::numeric_limits<index_type>::max(); hipLaunchKernelGGL(( detail::prune_copy_and_insert<coordinate_type, size_type, index_type, map_type>) , dim3(GET_BLOCKS(N, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0, N, m_coordinate_size, unused_map_offset, m_valid_row_index.cbegin(), const_coordinate_data(), keep_begin, d_inclusive_scan, *(pruned_map.m_map), pruned_map.coordinate_data(), out_valid_row_index.data(), out_valid_map_offset.data()); CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("Pruned hash map size:", pruned_map.size()); // Remove not inserted rows auto valid_begin = thrust::make_zip_iterator(thrust::make_tuple( out_valid_map_offset.begin(), out_valid_row_index.begin())); size_type const number_of_valid = thrust::remove_if( thrust::device, valid_begin, thrust::make_zip_iterator(thrust::make_tuple( out_valid_map_offset.end(), out_valid_row_index.end())), detail::is_first<index_type>(unused_map_offset)) - valid_begin; LOG_DEBUG("number of valid rows:", number_of_valid); out_valid_map_offset.resize(number_of_valid); out_valid_row_index.resize(number_of_valid); pruned_map.m_size = number_of_valid; // remap the final map values hipLaunchKernelGGL(( detail::remap<coordinate_type, size_type, index_type, map_type>) , dim3(GET_BLOCKS(number_of_valid, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0, number_of_valid, *(pruned_map.m_map), out_valid_map_offset.data()); CUDA_CHECK(hipStreamSynchronize(0)); m_byte_allocator.deallocate((char *)d_inclusive_scan, inclusive_scan_size); return pruned_map; } // Merge namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void copy_coordinates_by_offset(map_type __restrict__ map, // coordinate_type *__restrict__ coordinates, // index_type const *__restrict__ map_offsets, // size_type const num_threads, // size_type const coordinate_size // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { typename map_type::value_type const *p_value = map.data() + map_offsets[x]; // Compute Capabilities 3.5 or newer coordinate_type *dst_coordinate = coordinates + p_value->second * coordinate_size; for (index_type i = 0; i < coordinate_size; ++i) dst_coordinate[i] = p_value->first[i]; } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void copy_coordinates_by_valid_row( // map_type __restrict__ map, // coordinate_type const *__restrict__ in_coordinates, // coordinate_type *__restrict__ out_coordinates, // index_type const *__restrict__ valid_row, // size_type const num_threads, // size_type const coordinate_size // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { // Compute Capabilities 3.5 or newer index_type const row_index = x / coordinate_size; index_type const col_index = x % coordinate_size; out_coordinates[row_index * coordinate_size + col_index] = in_coordinates[valid_row[row_index] * coordinate_size + col_index]; } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void insert_and_map_kernel_with_offset( map_type __restrict__ map, // coordinate_type const *__restrict__ coordinates, // index_type const coordinate_row_offset, // index_type *__restrict__ valid_map_index, // index_type *__restrict__ valid_row_index, // size_type const num_threads, // size_type const coordinate_size, // index_type const unused_key) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { // m_map.insert(pair); // Returns pair<iterator, (bool)insert_success> auto const result = map.insert(thrust::make_pair( coordinate<coordinate_type>{&coordinates[x * coordinate_size]}, x)); if (result.second) { valid_row_index[x] = x + coordinate_row_offset; // success map index. remove failed insertion with success. valid_map_index[x] = result.first.offset(); } else { valid_map_index[x] = unused_key; } } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::merge( std::vector<std::reference_wrapper<self_type>> const &maps) const { // reserve size size_t all_size = std::accumulate( maps.begin(), maps.end(), 0, [](size_t sum, const self_type &map) { return sum + map.size(); }); LOG_DEBUG("Out merge map capacity:", all_size); self_type merged_map(all_size, m_coordinate_size, m_hashtable_occupancy, base_type::m_tensor_stride, m_map_allocator, base_type::m_byte_allocator); merged_map.m_valid_row_index.resize(all_size); merged_map.m_valid_map_index.resize(all_size); // Copy valid coordinates to the merged map coordinate_type *curr_coordinates = merged_map.coordinate_data(); index_type *curr_valid_map_offset = merged_map.m_valid_map_index.data(); index_type *curr_valid_row_index = merged_map.m_valid_row_index.data(); index_type const unused_key = std::numeric_limits<index_type>::max(); index_type row_offset{0}; for (self_type const &map : maps) { size_type const num_threads = map.size(); if (num_threads == 0) continue; size_type const num_blocks = GET_BLOCKS(num_threads * m_coordinate_size, CUDA_NUM_THREADS); LOG_DEBUG("Current merge map size:", num_threads); hipLaunchKernelGGL(( detail::copy_coordinates_by_valid_row<coordinate_type, size_type, index_type, map_type>) , dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0, map.const_coordinate_data(), // curr_coordinates, // map.m_valid_row_index.cdata(), // num_threads * m_coordinate_size, // m_coordinate_size); hipLaunchKernelGGL(( detail::insert_and_map_kernel_with_offset<coordinate_type, size_type, index_type, map_type>) , dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0, *(merged_map.m_map), curr_coordinates, // row_offset, // curr_valid_map_offset, // curr_valid_row_index, // num_threads, m_coordinate_size, unused_key); CUDA_CHECK(hipStreamSynchronize(0)); curr_coordinates += num_threads * m_coordinate_size; curr_valid_map_offset += num_threads; curr_valid_row_index += num_threads; row_offset += num_threads; } // Remove invalid maps auto valid_begin = thrust::make_zip_iterator( thrust::make_tuple(merged_map.m_valid_map_index.begin(), merged_map.m_valid_row_index.begin())); size_type const number_of_valid = thrust::remove_if(thrust::device, valid_begin, thrust::make_zip_iterator(thrust::make_tuple( merged_map.m_valid_map_index.end(), merged_map.m_valid_row_index.end())), detail::is_first<index_type>(unused_key)) - valid_begin; // remap the final map row index and the map offset hipLaunchKernelGGL(( detail::remap<coordinate_type, size_type, index_type, map_type>) , dim3(GET_BLOCKS(number_of_valid, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0, number_of_valid, *(merged_map.m_map), merged_map.m_valid_map_index.data()); merged_map.m_valid_row_index.resize(number_of_valid); merged_map.m_valid_map_index.resize(number_of_valid); merged_map.m_size = number_of_valid; return merged_map; } namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void count_kernel(map_type const __restrict__ in_map, // map_type const __restrict__ out_map, // index_type const *const __restrict__ out_valid_map_index, // size_type const num_threads, // gpu_kernel_region<coordinate_type> kernel, // index_type *__restrict__ p_count_per_thread) { extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; size_type const coordinate_size = kernel.coordinate_size(); size_type const volume = kernel.volume(); // clang-format off size_type *sh_size = reinterpret_cast<size_type *>(sh_all); size_type *sh_tensor_stride = sh_size; size_type *sh_kernel_size = sh_tensor_stride + coordinate_size; size_type *sh_dilation = sh_kernel_size + coordinate_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; // clang-format on auto const equal = out_map.get_key_equal(); // kernel_maps for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = kernel.tensor_stride()[i]; sh_kernel_size[i] = kernel.kernel_size()[i]; sh_dilation[i] = kernel.dilation()[i]; } __syncthreads(); auto sh_kernel = gpu_kernel_region<coordinate_type>( kernel, sh_tensor_stride, sh_kernel_size, sh_dilation); coordinate<coordinate_type> point(sh_tmp); auto const unused_key = out_map.get_unused_key(); if (x < num_threads) { size_type count = 0; typename map_type::value_type const &out_value = out_map.data()[out_valid_map_index[x]]; // valid_index guarantees that it contains a valid value if (!equal(out_value.first, unused_key)) { for (auto kernel_ind = 0; kernel_ind < volume; ++kernel_ind) { sh_kernel.coordinate_at(kernel_ind, out_value.first.data(), sh_tmp); if (in_map.find(point) != in_map.end()) { ++count; } } } p_count_per_thread[x] = count; } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void preallocated_kernel_map_iteration( map_type const __restrict__ in_map, // map_type const __restrict__ out_map, // index_type const *const __restrict__ out_valid_map_index, // size_type const num_threads, // gpu_kernel_region<coordinate_type> kernel, // index_type const *const __restrict__ inclusive_count_cumsum_per_thread, // index_type *__restrict__ p_kernels, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps) { extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; size_type const coordinate_size = kernel.coordinate_size(); size_type const volume = kernel.volume(); // clang-format off size_type *sh_size = reinterpret_cast<size_type *>(sh_all); size_type *sh_tensor_stride = sh_size; size_type *sh_kernel_size = sh_tensor_stride + coordinate_size; size_type *sh_dilation = sh_kernel_size + coordinate_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; // clang-format on auto const equal = out_map.get_key_equal(); for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = kernel.tensor_stride()[i]; sh_kernel_size[i] = kernel.kernel_size()[i]; sh_dilation[i] = kernel.dilation()[i]; } __syncthreads(); auto sh_kernel = gpu_kernel_region<coordinate_type>( kernel, sh_tensor_stride, sh_kernel_size, sh_dilation); coordinate<coordinate_type> curr_coordinate(sh_tmp); auto const unused_key = out_map.get_unused_key(); if (x < num_threads) { // iterate over values auto kernel_map_index = (x < 1) ? 0 : inclusive_count_cumsum_per_thread[x - 1]; typename map_type::value_type const &out_value = out_map.data()[out_valid_map_index[x]]; if (!equal(out_value.first, unused_key)) { // set bounds for the valid keys for (uint32_t kernel_index = 0; kernel_index < volume; ++kernel_index) { sh_kernel.coordinate_at(kernel_index, out_value.first.data(), sh_tmp); auto const &in_result = in_map.find(curr_coordinate); if (in_result != in_map.end()) { // insert to p_kernels[kernel_map_index] = kernel_index; p_in_maps[kernel_map_index] = (*in_result).second; p_out_maps[kernel_map_index] = out_value.second; ++kernel_map_index; } } } } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void direct_in_out_map(size_type const num_threads, // map_type const __restrict__ in_map, // map_type const __restrict__ out_map, // index_type const *const __restrict__ out_valid_map_offset, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, index_type const unused_key) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { typename map_type::value_type const &out_value = out_map.data()[out_valid_map_offset[x]]; auto const &result = in_map.find(out_value.first); if (result != in_map.end()) { p_in_maps[x] = (*result).second; p_out_maps[x] = out_value.second; } else { p_in_maps[x] = unused_key; } } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void direct_kernel_map(map_type const __restrict__ in_map, // map_type const __restrict__ out_map, // index_type const *const __restrict__ out_valid_map_index, // size_type const num_threads, // gpu_kernel_region<coordinate_type> kernel, // index_type *__restrict__ p_kernels, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, index_type const unused_map_value) { extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; size_type const coordinate_size = kernel.coordinate_size(); size_type const volume = kernel.volume(); // clang-format off size_type *sh_size = reinterpret_cast<size_type *>(sh_all); size_type *sh_tensor_stride = sh_size; size_type *sh_kernel_size = sh_tensor_stride + coordinate_size; size_type *sh_dilation = sh_kernel_size + coordinate_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; // clang-format on auto const equal = out_map.get_key_equal(); for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = kernel.tensor_stride()[i]; sh_kernel_size[i] = kernel.kernel_size()[i]; sh_dilation[i] = kernel.dilation()[i]; } __syncthreads(); auto sh_kernel = gpu_kernel_region<coordinate_type>( kernel, sh_tensor_stride, sh_kernel_size, sh_dilation); auto const unused_key = out_map.get_unused_key(); if (x < num_threads) { // iterate over values index_type kernel_index = x % volume; typename map_type::value_type const &out_value = out_map.data()[out_valid_map_index[x / volume]]; if (!equal(out_value.first, unused_key)) { // set bounds for the valid keys // TODO: copy the curr_coordinate to sh_curr_coordinate sh_kernel.coordinate_at(kernel_index, out_value.first.data(), sh_tmp); auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp)); if (in_result != in_map.end()) { // insert to p_kernels[x] = kernel_index; p_in_maps[x] = (*in_result).second; p_out_maps[x] = out_value.second; } else { p_kernels[x] = unused_map_value; } } } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map( self_type const &out_map, gpu_kernel_region<coordinate_type> const &kernel, CUDAKernelMapMode::Mode kernel_map_mode, uint32_t thread_dim) const { // Over estimate the reserve size to be size(); size_type const out_size = out_map.size(); size_type const kernel_volume = kernel.volume(); ASSERT(kernel_volume > 0, "Invalid kernel"); if (kernel_volume == 1) { // directly iterate over all output first by finding all in out map. auto const N = out_size; LOG_DEBUG("out_map size:", N); index_type *in_out_map = (index_type *)base_type::m_byte_allocator.allocate( 2 * (N + 1) * sizeof(index_type)); index_type *ins = in_out_map; index_type *outs = in_out_map + N + 1; // for __restrict__ collision prevention index_type unused_key = std::numeric_limits<index_type>::max(); hipLaunchKernelGGL(( detail::direct_in_out_map<coordinate_type, size_type, index_type, map_type>) , dim3(GET_BLOCKS(N, thread_dim)), dim3(thread_dim), 0, 0, N, *m_map, // *(out_map.m_map), // out_map.m_valid_map_index.cdata(), // ins, // in map outs, // out map unused_key); LOG_DEBUG("Direct in out map copy done"); auto begin = thrust::make_zip_iterator(thrust::make_tuple(ins, outs)); auto const valid_size = thrust::remove_if( thrust::device, begin, thrust::make_zip_iterator(thrust::make_tuple(ins + N, outs + N)), detail::is_first<index_type>(unused_key)) - begin; LOG_DEBUG("Valid size:", valid_size); kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator, false); CUDA_CHECK(hipMemcpy(kernel_map.in_maps.data(), ins, valid_size * sizeof(index_type), hipMemcpyDeviceToDevice)); CUDA_CHECK(hipMemcpy(kernel_map.out_maps.data(), outs, valid_size * sizeof(index_type), hipMemcpyDeviceToDevice)); base_type::m_byte_allocator.deallocate((char *)in_out_map, 2 * (N + 1) * sizeof(index_type)); LOG_DEBUG("Cleaning up"); return kernel_map; } else if (kernel_map_mode == CUDAKernelMapMode::MEMORY_EFFICIENT && kernel.region_type() != RegionType::CUSTOM) { // (THREAD * D + 3 * D) * 4 uint32_t const shared_memory_size_in_bytes = 3 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp // clang-format on size_type const num_threads = out_size; auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); LOG_DEBUG("num block", num_blocks); LOG_DEBUG("out_map size", out_map.size()); LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes); LOG_DEBUG("threads dim", thread_dim); LOG_DEBUG("num threads", num_threads); index_type *d_p_count_per_thread = reinterpret_cast<index_type *>( base_type::m_byte_allocator.allocate(num_threads * sizeof(index_type))); // Initialize count per thread hipLaunchKernelGGL(( detail::count_kernel<coordinate_type, size_type, index_type, map_type>) , dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0, *m_map, // *out_map.m_map, // out_map.m_valid_map_index.cbegin(), // num_threads, // kernel, // d_p_count_per_thread); CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("count_kernel finished"); thrust::inclusive_scan(thrust::device, d_p_count_per_thread, d_p_count_per_thread + num_threads, d_p_count_per_thread); index_type num_kernel_map; // type following the kernel map allocator CUDA_CHECK(hipMemcpy(&num_kernel_map, d_p_count_per_thread + num_threads - 1, sizeof(index_type), hipMemcpyDeviceToHost)); // set kernel map LOG_DEBUG("Found", num_kernel_map, "kernel map elements."); kernel_map_type kernel_map(num_kernel_map, base_type::m_byte_allocator); CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("Allocated kernel_map."); hipLaunchKernelGGL(( detail::preallocated_kernel_map_iteration<coordinate_type, size_type, index_type, map_type>) , dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0, *m_map, // *out_map.m_map, // out_map.m_valid_map_index.cbegin(), // num_threads, // kernel, // d_p_count_per_thread, // kernel_map.kernels.begin(), // kernel_map.in_maps.begin(), // kernel_map.out_maps.begin()); CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("Preallocated kernel map done"); kernel_map.decompose(); base_type::m_byte_allocator.deallocate( reinterpret_cast<char *>(d_p_count_per_thread), num_threads * sizeof(index_type)); LOG_DEBUG("hipFree"); return kernel_map; } else if (kernel_map_mode == CUDAKernelMapMode::SPEED_OPTIMIZED && kernel.region_type() != RegionType::CUSTOM) { // (THREAD * 3 * D + 3 * D) * 4 uint32_t const shared_memory_size_in_bytes = 3 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation (thread_dim + (thread_dim + kernel_volume - 1) / kernel_volume) * m_coordinate_size * sizeof(coordinate_type); // tmp coordinate + current coordinate size_type const num_threads = out_size * kernel_volume; auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); LOG_DEBUG("num block", num_blocks); LOG_DEBUG("out_map size", out_map.size()); LOG_DEBUG("kernel_volume", kernel_volume); LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes); LOG_DEBUG("threads dim", thread_dim); LOG_DEBUG("num threads", num_threads); index_type unused_map_value = std::numeric_limits<index_type>::max(); index_type *d_p_valid_in_index = reinterpret_cast<index_type *>(base_type::m_byte_allocator.allocate( 3 * (num_threads + 1) * sizeof(index_type))); index_type *d_p_valid_out_index = d_p_valid_in_index + num_threads + 1; index_type *d_p_valid_kernel_index = d_p_valid_out_index + num_threads + 1; // Initialize count per thread hipLaunchKernelGGL(( detail::direct_kernel_map<coordinate_type, size_type, index_type, map_type>) , dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0, *m_map, // *out_map.m_map, // out_map.m_valid_map_index.cbegin(), // num_threads, // kernel, // d_p_valid_kernel_index, // d_p_valid_in_index, // d_p_valid_out_index, // unused_map_value); CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("direct_kernel_map finished"); auto begin = thrust::make_zip_iterator(thrust::make_tuple( d_p_valid_kernel_index, d_p_valid_in_index, d_p_valid_out_index)); auto const valid_size = thrust::remove_if(thrust::device, begin, thrust::make_zip_iterator(thrust::make_tuple( d_p_valid_kernel_index + num_threads, d_p_valid_in_index + num_threads, d_p_valid_out_index + num_threads)), detail::is_first<index_type>(unused_map_value)) - begin; LOG_DEBUG("Valid size:", valid_size); kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator); CUDA_CHECK(hipMemcpy(kernel_map.kernels.data(), d_p_valid_kernel_index, valid_size * sizeof(index_type), hipMemcpyDeviceToDevice)); CUDA_CHECK(hipMemcpy(kernel_map.in_maps.data(), d_p_valid_in_index, valid_size * sizeof(index_type), hipMemcpyDeviceToDevice)); CUDA_CHECK(hipMemcpy(kernel_map.out_maps.data(), d_p_valid_out_index, valid_size * sizeof(index_type), hipMemcpyDeviceToDevice)); kernel_map.decompose(); base_type::m_byte_allocator.deallocate( reinterpret_cast<char *>(d_p_valid_in_index), 3 * (num_threads + 1) * sizeof(index_type)); LOG_DEBUG("hipFree"); return kernel_map; } else { // kernel volume == 1 ASSERT(false, "Not implemented"); } } namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void stride_map_kernel(map_type const __restrict__ in_map, // map_type const __restrict__ out_map, // index_type const *const __restrict__ in_valid_map_index, // size_type const num_threads, // index_type const *const __restrict__ stride, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, size_type const coordinate_size, index_type const unused_key) { extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; // clang-format off size_type *sh_size = reinterpret_cast<size_type *>(sh_all); size_type *sh_stride = sh_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_size + coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; // clang-format on for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_stride[i] = stride[i]; } __syncthreads(); if (x >= num_threads) return; typename map_type::value_type const &in_value = in_map.data()[in_valid_map_index[x]]; sh_tmp[0] = in_value.first[0]; for (index_type j = 1; j < coordinate_size; ++j) { sh_tmp[j] = (__float2int_rd(__fdiv_rd(in_value.first[j], sh_stride[j - 1]))) * sh_stride[j - 1]; } auto out_iter = out_map.find(coordinate<coordinate_type>(sh_tmp)); if (out_iter == out_map.end()) { p_in_maps[x] = unused_key; } else { p_in_maps[x] = in_value.second; p_out_maps[x] = out_iter->second; } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride_map( self_type const &out_map, stride_type const &out_tensor_stride, uint32_t thread_dim) const { // Over estimate the reserve size to be size(); size_type const in_size = size(); index_storage_type d_out_tensor_stride(out_tensor_stride); index_type unused_key = std::numeric_limits<index_type>::max(); // (THREAD * D + D) * 4 uint32_t const shared_memory_size_in_bytes = m_coordinate_size * sizeof(index_type) + // stride thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp size_type const num_threads = in_size; auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); LOG_DEBUG("num block", num_blocks); LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes); LOG_DEBUG("threads dim", thread_dim); LOG_DEBUG("num threads", num_threads); index_type *in_out_map = (index_type *)base_type::m_byte_allocator.allocate( 2 * (in_size + 1) * sizeof(index_type)); index_type *ins = in_out_map; index_type *outs = in_out_map + in_size + 1; // for __restrict__ collision prevention LOG_DEBUG("Allocated temporary memory"); hipLaunchKernelGGL(( detail::stride_map_kernel<coordinate_type, size_type, index_type, map_type>) , dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0, *m_map, // *out_map.m_map, // m_valid_map_index.cbegin(), // num_threads, // d_out_tensor_stride.cbegin(), // ins, // outs, // m_coordinate_size, // unused_key); auto begin = thrust::make_zip_iterator(thrust::make_tuple(ins, outs)); auto const valid_size = thrust::remove_if(thrust::device, begin, thrust::make_zip_iterator( thrust::make_tuple(ins + in_size, outs + in_size)), detail::is_first<index_type>(unused_key)) - begin; LOG_DEBUG("Valid size:", valid_size); kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator, false); CUDA_CHECK(hipMemcpy(kernel_map.in_maps.data(), ins, valid_size * sizeof(index_type), hipMemcpyDeviceToDevice)); CUDA_CHECK(hipMemcpy(kernel_map.out_maps.data(), outs, valid_size * sizeof(index_type), hipMemcpyDeviceToDevice)); base_type::m_byte_allocator.deallocate( (char *)in_out_map, 2 * (in_size + 1) * sizeof(index_type)); return kernel_map; } namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void origin_map_kernel(map_type const __restrict__ in_map, // map_type const __restrict__ origin_map, // index_type const *const __restrict__ in_valid_map_index, // size_type const num_threads, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, index_type *__restrict__ p_kernels, size_type const coordinate_size) { extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; // clang-format off coordinate_type *sh_tmp = sh_all + tx * coordinate_size; // clang-format on if (x < num_threads) for (index_type i = 0; i < coordinate_size; ++i) sh_tmp[i] = 0; __syncthreads(); if (x < num_threads) { typename map_type::value_type const &in_value = in_map.data()[in_valid_map_index[x]]; sh_tmp[0] = in_value.first[0]; auto origin_iter = origin_map.find(coordinate<coordinate_type>(sh_tmp)); p_in_maps[x] = in_value.second; p_out_maps[x] = origin_iter->second; // origin_map row index // For kernel_map decompose() p_kernels[x] = origin_iter->second; } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type CoordinateMapGPU<coordinate_type, TemplatedAllocator>::origin_map( self_type const &origin_map, uint32_t thread_dim) const { ASSERT(std::all_of(origin_map.get_tensor_stride().begin(), origin_map.get_tensor_stride().end(), [](auto const &i) { return i == 0; }), "Invalid origin tensor stride", origin_map.get_tensor_stride()); // reserve size(); size_type const in_size = size(); LOG_DEBUG("in_map size:", in_size, "origin_map size:", origin_map.size()); // (THREAD * D) * 4 uint32_t const shared_memory_size_in_bytes = thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp size_type const num_threads = in_size; auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); LOG_DEBUG("origin_map num block", num_blocks); LOG_DEBUG("origin_map shared_memory size", shared_memory_size_in_bytes); LOG_DEBUG("origin_map threads dim", thread_dim); LOG_DEBUG("origin_map num threads", num_threads); kernel_map_type kernel_map(in_size, base_type::m_byte_allocator); CUDA_CHECK(hipStreamSynchronize(0)); LOG_DEBUG("Allocated kernel_map."); hipLaunchKernelGGL(( detail::origin_map_kernel<coordinate_type, size_type, index_type, map_type>) , dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0, *m_map, // *origin_map.m_map, // m_valid_map_index.cbegin(), // num_threads, // kernel_map.in_maps.begin(), // kernel_map.out_maps.begin(), // kernel_map.kernels.begin(), // m_coordinate_size); CUDA_CHECK(hipStreamSynchronize(0)); kernel_map.decompose(); LOG_DEBUG("origin map decomposed"); return kernel_map; } namespace detail { template <typename coordinate_type, typename index_type, // typename stride_type, // typename float_type, // typename map_type> __global__ void interpolation_kernel(map_type __restrict__ in_map, // index_type const num_threads, // float_type const *__restrict__ p_tfield, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, // float_type *__restrict__ p_weights, // stride_type const *__restrict__ p_tensor_stride, // index_type const unused_map_value, index_type const coordinate_size, index_type const neighbor_volume) { // coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type) // + THREADS * coordinate_size * sizeof(coordinate_type) SharedMemory<float_type> shared; float_type *sh_all = shared.getPointer(); auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; float_type *sh_tfield = sh_all + tx * coordinate_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>( sh_all + CUDA_NUM_THREADS * coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; index_type *sh_tensor_stride = reinterpret_cast<index_type *>( sh_coordinate + CUDA_NUM_THREADS * coordinate_size); auto const equal = in_map.get_key_equal(); for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = p_tensor_stride[i]; } if (x < num_threads) { index_type const offset = coordinate_size * (x / neighbor_volume); for (index_type i = 0; i < coordinate_size; ++i) { sh_tfield[i] = p_tfield[offset + i]; } } __syncthreads(); if (x < num_threads) { // iterate over values uint32_t neighbor_ind = x % neighbor_volume; // batch index sh_tmp[0] = lrint(sh_tfield[0]); uint32_t mask = 1; for (uint32_t j = coordinate_size - 1; j > 0; --j) { index_type curr_tensor_stride = sh_tensor_stride[j - 1]; if ((neighbor_ind & mask) == 0) sh_tmp[j] = floor(sh_tfield[j] / curr_tensor_stride) * curr_tensor_stride; else sh_tmp[j] = floor(sh_tfield[j] / curr_tensor_stride) * curr_tensor_stride + curr_tensor_stride; mask = mask << 1; } auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp)); if (in_result != in_map.end()) { p_in_maps[x] = (*in_result).second; p_out_maps[x] = x / neighbor_volume; // Compute weight float_type weight = 1; for (uint32_t j = 1; j < coordinate_size; ++j) { weight *= 1 - abs(sh_tfield[j] - sh_tmp[j]) / sh_tensor_stride[j - 1]; } p_weights[x] = weight; } else { p_in_maps[x] = unused_map_value; } } } template <typename coordinate_type, typename index_type, // typename stride_type, // typename float_type, // typename map_type> __global__ void field_map_kernel(map_type __restrict__ in_map, // index_type const num_threads, // float_type const *__restrict__ p_tfield, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, // stride_type const *__restrict__ p_tensor_stride, // index_type const unused_map_value, index_type const coordinate_size) { // coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type) // + THREADS * coordinate_size * sizeof(coordinate_type) SharedMemory<float_type> shared; float_type *sh_all = shared.getPointer(); auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_all); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; index_type *sh_tensor_stride = reinterpret_cast<index_type *>( sh_coordinate + CUDA_NUM_THREADS * coordinate_size); auto const equal = in_map.get_key_equal(); for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = p_tensor_stride[i]; } __syncthreads(); index_type const offset = coordinate_size * x; if (x < num_threads) { // iterate over values float_type const *curr_tfield = p_tfield + offset; // batch index sh_tmp[0] = lrint(curr_tfield[0]); for (uint32_t j = coordinate_size - 1; j > 0; --j) { index_type curr_tensor_stride = sh_tensor_stride[j - 1]; sh_tmp[j] = floor(curr_tfield[j] / curr_tensor_stride) * curr_tensor_stride; } auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp)); if (in_result != in_map.end()) { p_in_maps[x] = (*in_result).second; p_out_maps[x] = x; } else { p_in_maps[x] = unused_map_value; } } } // interpolation map inst template <typename coordinate_type, typename index_type, typename size_type, typename stride_type, typename field_type, typename map_type, typename ByteAllocatorType> std::vector<at::Tensor> interpolation_map_weight_tfield_type( uint32_t const num_tfield, // uint32_t const coordinate_size, // index_type const unused_key, // field_type const *const p_tfield, // map_type &map, // stride_type const *const p_tensor_stride, // ByteAllocatorType const &byte_allocator, c10::TensorOptions tfield_options) { uint32_t const neighbor_volume = ::pow(2, (coordinate_size - 1)); size_type num_threads = neighbor_volume * num_tfield; LOG_DEBUG("neighbor_volume:", neighbor_volume, "num_tfield:", num_tfield, "num_threads:", num_threads); index_type *d_in_map = reinterpret_cast<index_type *>( byte_allocator.allocate(num_threads * sizeof(index_type))); index_type *d_out_map = reinterpret_cast<index_type *>( byte_allocator.allocate(num_threads * sizeof(index_type))); field_type *d_weight = reinterpret_cast<field_type *>( byte_allocator.allocate(num_threads * sizeof(field_type))); size_type shared_memory_size_in_bytes = coordinate_size * CUDA_NUM_THREADS * sizeof(field_type) + coordinate_size * CUDA_NUM_THREADS * sizeof(coordinate_type) + coordinate_size * sizeof(index_type); LOG_DEBUG("Shared memory size:", shared_memory_size_in_bytes); hipLaunchKernelGGL(( interpolation_kernel<coordinate_type, index_type, stride_type, field_type, map_type>) , dim3(GET_BLOCKS(num_threads, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), shared_memory_size_in_bytes, 0, map, // num_threads, // p_tfield, // d_in_map, // d_out_map, // d_weight, // p_tensor_stride, // unused_key, // coordinate_size, // neighbor_volume); // remove unused_keys auto valid_begin = thrust::make_zip_iterator(thrust::make_tuple(d_in_map, // d_out_map, d_weight)); size_type const number_of_valid = thrust::remove_if(thrust::device, // valid_begin, // thrust::make_zip_iterator(thrust::make_tuple( d_in_map + num_threads, // d_out_map + num_threads, d_weight + num_threads)), detail::is_first<index_type>(unused_key)) - valid_begin; LOG_DEBUG("number_of_valid:", number_of_valid); auto final_in_map = torch::empty({number_of_valid}, tfield_options.dtype(torch::kInt32).requires_grad(false)); auto final_out_map = torch::empty({number_of_valid}, tfield_options.dtype(torch::kInt32).requires_grad(false)); auto final_weights = torch::empty({number_of_valid}, tfield_options.requires_grad(false)); if (number_of_valid > 0) { CUDA_CHECK(hipMemcpy(final_in_map.template data_ptr<int32_t>(), d_in_map, number_of_valid * sizeof(int32_t), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(final_out_map.template data_ptr<int32_t>(), d_out_map, number_of_valid * sizeof(int32_t), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(final_weights.template data_ptr<field_type>(), d_weight, number_of_valid * sizeof(field_type), hipMemcpyHostToDevice)); } byte_allocator.deallocate((char *)d_in_map, num_threads * sizeof(index_type)); byte_allocator.deallocate((char *)d_out_map, num_threads * sizeof(index_type)); byte_allocator.deallocate((char *)d_weight, num_threads * sizeof(field_type)); return {final_in_map, final_out_map, final_weights}; } // interpolation map inst template <typename coordinate_type, typename index_type, typename size_type, typename stride_type, typename field_type, typename map_type, typename ByteAllocatorType> std::pair<at::Tensor, at::Tensor> field_map_type(uint32_t const num_tfield, // uint32_t const coordinate_size, // index_type const unused_key, // field_type const *const p_tfield, // map_type &map, // stride_type const *const p_tensor_stride, // ByteAllocatorType const &byte_allocator) { size_type num_threads = num_tfield; LOG_DEBUG("num_threads:", num_threads); index_type *d_in_map = reinterpret_cast<index_type *>( byte_allocator.allocate(num_threads * sizeof(index_type))); index_type *d_out_map = reinterpret_cast<index_type *>( byte_allocator.allocate(num_threads * sizeof(index_type))); size_type shared_memory_size_in_bytes = coordinate_size * CUDA_NUM_THREADS * sizeof(coordinate_type) + coordinate_size * sizeof(index_type); LOG_DEBUG("Shared memory size:", shared_memory_size_in_bytes); hipLaunchKernelGGL(( field_map_kernel<coordinate_type, index_type, stride_type, field_type, map_type>) , dim3(GET_BLOCKS(num_threads, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), shared_memory_size_in_bytes, 0, map, // num_threads, // p_tfield, // d_in_map, // d_out_map, // p_tensor_stride, // unused_key, // coordinate_size); // remove unused_keys auto valid_begin = thrust::make_zip_iterator(thrust::make_tuple(d_in_map, d_out_map)); size_type const number_of_valid = thrust::remove_if(thrust::device, // valid_begin, // thrust::make_zip_iterator( thrust::make_tuple(d_in_map + num_threads, // d_out_map + num_threads)), detail::is_first<index_type>(unused_key)) - valid_begin; LOG_DEBUG("number_of_valid:", number_of_valid); auto curr_device = at::hip::current_device(); auto tfield_options = torch::TensorOptions({at::kCUDA, curr_device}) .dtype(torch::kInt32) .requires_grad(false); auto final_in_map = torch::empty({number_of_valid}, tfield_options); auto final_out_map = torch::empty({number_of_valid}, tfield_options); if (number_of_valid > 0) { CUDA_CHECK(hipMemcpy(final_in_map.template data_ptr<int32_t>(), d_in_map, number_of_valid * sizeof(int32_t), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(final_out_map.template data_ptr<int32_t>(), d_out_map, number_of_valid * sizeof(int32_t), hipMemcpyHostToDevice)); } byte_allocator.deallocate((char *)d_in_map, num_threads * sizeof(index_type)); byte_allocator.deallocate((char *)d_out_map, num_threads * sizeof(index_type)); return {final_in_map, final_out_map}; } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> std::vector<at::Tensor> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::interpolation_map_weight( at::Tensor const &tfield) const { // Over estimate the reserve size to be size(); ASSERT(tfield.dim() == 2, "Invalid tfield dimension"); ASSERT(tfield.size(1) == m_coordinate_size, "Invalid tfield size"); size_type const num_tfield = tfield.size(0); uint32_t const neighbor_volume = ::pow(2, (m_coordinate_size - 1)); index_type const unused_key = std::numeric_limits<index_type>::max(); LOG_DEBUG("map size", m_size); switch (tfield.scalar_type()) { case at::ScalarType::Double: return detail::interpolation_map_weight_tfield_type< coordinate_type, index_type, size_type, index_type, double, map_type, TemplatedAllocator<char>>(num_tfield, // m_coordinate_size, // unused_key, // tfield.template data_ptr<double>(), // *m_map, // m_device_tensor_stride.cbegin(), // m_byte_allocator, // tfield.options()); case at::ScalarType::Float: return detail::interpolation_map_weight_tfield_type< coordinate_type, index_type, size_type, index_type, float, map_type, TemplatedAllocator<char>>(num_tfield, // m_coordinate_size, // unused_key, // tfield.template data_ptr<float>(), // *m_map, // m_device_tensor_stride.cbegin(), // m_byte_allocator, // tfield.options()); default: ASSERT(false, "Unsupported float type"); } } template <typename coordinate_type, template <typename T> class TemplatedAllocator> template <typename coordinate_field_type> std::pair<at::Tensor, at::Tensor> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::field_map( coordinate_field_type const *p_tfield, size_type const num_tfield) const { index_type const unused_key = std::numeric_limits<index_type>::max(); LOG_DEBUG("map size", m_size); return detail::field_map_type<coordinate_type, index_type, size_type, index_type, coordinate_field_type, map_type, TemplatedAllocator<char>>( num_tfield, // m_coordinate_size, // unused_key, // p_tfield, // *m_map, // m_device_tensor_stride.cbegin(), // m_byte_allocator); } /** * Union map */ namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename tensor_type, // typename map_type> __global__ void union_map_kernel(size_type const num_threads, // map_type const __restrict__ in_map, // map_type const __restrict__ union_map, // index_type const *const __restrict__ in_valid_map_index, // tensor_type *__restrict__ p_in_maps, // tensor_type *__restrict__ p_union_maps, size_type const coordinate_size) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { typename map_type::value_type const &in_value = in_map.data()[in_valid_map_index[x]]; auto union_iter = union_map.find(in_value.first); p_in_maps[x] = in_value.second; p_union_maps[x] = union_iter->second; } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> std::vector<at::Tensor> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::union_map( std::vector<std::reference_wrapper<self_type>> const &in_maps, uint32_t thread_dim) const { auto options = torch::TensorOptions({at::kCUDA, at::hip::current_device()}) .dtype(torch::kInt64) .requires_grad(false); std::vector<at::Tensor> union_maps; for (self_type const &in_map : in_maps) { size_type const num_threads = in_map.m_valid_map_index.size(); auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); at::Tensor curr_map = torch::empty({2, num_threads}, options); LOG_DEBUG("in_map size", num_threads, ", num block", num_blocks, ", threads dim", thread_dim); int64_t *d_in_map = curr_map.template data_ptr<int64_t>(); hipLaunchKernelGGL(( detail::union_map_kernel<coordinate_type, size_type, index_type, int64_t, map_type>) , dim3(num_blocks), dim3(thread_dim), 0, 0, num_threads, // *in_map.m_map, // *m_map, // in_map.m_valid_map_index.cbegin(), // d_in_map, // d_in_map + num_threads, // m_coordinate_size); CUDA_CHECK(hipStreamSynchronize(0)); union_maps.push_back(std::move(curr_map)); } return union_maps; } // Helper functions template <typename coordinate_type, template <typename T> class TemplatedAllocator> void CoordinateMapGPU<coordinate_type, TemplatedAllocator>::copy_coordinates( coordinate_type *dst_coordinate) const { size_type const num_threads = size(); if (num_threads <= 0) return; // Copy by offset // size_type const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS); // detail::copy_coordinates_by_offset<coordinate_type, size_type, index_type, // map_type> // <<<num_blocks, CUDA_NUM_THREADS>>>( // *m_map, // // dst_coordinate, // // m_valid_map_index.data(), // // num_threads, // // m_coordinate_size); size_type const num_blocks = GET_BLOCKS(num_threads * m_coordinate_size, CUDA_NUM_THREADS); hipLaunchKernelGGL(( detail::copy_coordinates_by_valid_row<coordinate_type, size_type, index_type, map_type>) , dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0, // *m_map, // const_coordinate_data(), // dst_coordinate, // m_valid_row_index.cbegin(), // num_threads * m_coordinate_size, // m_coordinate_size); } // Template instantiation template class CoordinateFieldMapGPU<default_types::ccoordinate_type, default_types::dcoordinate_type, detail::default_allocator>; template class CoordinateFieldMapGPU<default_types::ccoordinate_type, default_types::dcoordinate_type, detail::c10_allocator>; template class CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>; template class CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>; template std::pair< gpu_storage<default_types::index_type, detail::default_allocator<char>>, gpu_storage<default_types::index_type, detail::default_allocator<char>>> CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>:: insert_and_map<true>( coordinate_iterator<default_types::dcoordinate_type> key_first, coordinate_iterator<default_types::dcoordinate_type> key_last); template std::pair< gpu_storage<default_types::index_type, detail::default_allocator<char>>, gpu_storage<default_types::index_type, detail::default_allocator<char>>> CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>:: insert_and_map<false>( coordinate_iterator<default_types::dcoordinate_type> key_first, coordinate_iterator<default_types::dcoordinate_type> key_last); template std::pair< gpu_storage<default_types::index_type, detail::c10_allocator<char>>, gpu_storage<default_types::index_type, detail::c10_allocator<char>>> CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>:: insert_and_map<true>( coordinate_iterator<default_types::dcoordinate_type> key_first, coordinate_iterator<default_types::dcoordinate_type> key_last); template std::pair< gpu_storage<default_types::index_type, detail::c10_allocator<char>>, gpu_storage<default_types::index_type, detail::c10_allocator<char>>> CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>:: insert_and_map<false>( coordinate_iterator<default_types::dcoordinate_type> key_first, coordinate_iterator<default_types::dcoordinate_type> key_last); template std::pair<at::Tensor, at::Tensor> CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>:: field_map<float>(float const *p_tfield, default_types::size_type const num_tfield) const; template std::pair<at::Tensor, at::Tensor> CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>:: field_map<float>(float const *p_tfield, default_types::size_type const num_tfield) const; } // namespace minkowski
4b1ee12c1c8a814910cfeb6a52670dc5816891da.cu
/* * Copyright (c) 2020 NVIDIA CORPORATION. * Copyright (c) 2018-2020 Chris Choy (chrischoy@ai.stanford.edu) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "coordinate_map_functors.cuh" #include "coordinate_map_gpu.cuh" #include "gpu.cuh" #include "kernel_map.cuh" #include "kernel_map.hpp" #include "sharedmem.cuh" #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/sort.h> namespace minkowski { namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void remap_inverse_map(map_type __restrict__ map, // coordinate_type const *__restrict__ coordinates, // index_type *__restrict__ inverse_map, // size_type const num_threads, // size_type const coordinate_size // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { auto result = map.find( coordinate<coordinate_type>{&coordinates[x * coordinate_size]}); inverse_map[x] = result->second; } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void insert_and_map_kernel(map_type __restrict__ map, // coordinate_type const *__restrict__ coordinates, // index_type *__restrict__ valid_map_index, // index_type *__restrict__ valid_row_index, // size_type const num_threads, // size_type const coordinate_size, // index_type const unused_key) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { // Returns pair<iterator, (bool)insert_success> auto const result = map.insert(thrust::make_pair( coordinate<coordinate_type>{&coordinates[x * coordinate_size]}, x)); // auto test = &coordinates[x * coordinate_size]; if (result.second) { valid_row_index[x] = x; // success map index. remove failed insertion with success. valid_map_index[x] = result.first.offset(); } else { valid_map_index[x] = unused_key; } } } } // namespace detail /* * Field Map */ namespace detail { template <typename coordinate_field_type, typename coordinate_int_type, typename index_type, bool stride_one> __global__ void quantize_coordinates_kernel( coordinate_field_type const *__restrict__ p_tfield, // coordinate_int_type *__restrict__ p_stensor, // index_type const *__restrict__ p_tensor_stride, // index_type const num_threads, index_type const coordinate_size) { // coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type) // + THREADS * coordinate_size * sizeof(coordinate_type) extern __shared__ index_type sh_tensor_stride[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (stride_one) { if (x < num_threads) { if (x % coordinate_size == 0) p_stensor[x] = lrint(p_tfield[x]); else p_stensor[x] = floor(p_tfield[x]); } } else { for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = p_tensor_stride[i]; } __syncthreads(); if (x < num_threads) { // batch index if (x % coordinate_size == 0) p_stensor[x] = lrint(p_tfield[x]); else { index_type curr_tensor_stride = sh_tensor_stride[((x - 1) % coordinate_size)]; p_stensor[x] = floor(p_tfield[x] / curr_tensor_stride) * curr_tensor_stride; } } } } } // namespace detail template <typename coordinate_field_type, typename coordinate_int_type, template <typename T> class TemplatedAllocator> void CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type, TemplatedAllocator>:: quantize_coordinates(coordinate_int_type *d_dst_coordinates, stride_type const &tensor_stride) const { int64_t const stride_prod = std::accumulate( tensor_stride.begin(), tensor_stride.end(), 1, std::multiplies<>()); // Copy tensor_stride to device index_type *d_tensor_stride = reinterpret_cast<index_type *>( m_byte_allocator.allocate(m_coordinate_size * sizeof(index_type))); CUDA_CHECK(cudaMemcpy( d_tensor_stride, // dst tensor_stride.data(), // first element of the dereferenced iter. sizeof(index_type) * m_coordinate_size, // bytes cudaMemcpyHostToDevice)); size_type const num_threads = size() * m_coordinate_size; auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS); if (stride_prod == 1) { detail::quantize_coordinates_kernel<coordinate_field_type, coordinate_int_type, index_type, true> <<<num_blocks, CUDA_NUM_THREADS, m_coordinate_size * sizeof(index_type)>>>( const_coordinate_data(), d_dst_coordinates, d_tensor_stride, num_threads, m_coordinate_size); } else { detail::quantize_coordinates_kernel<coordinate_field_type, coordinate_int_type, index_type, false> <<<num_blocks, CUDA_NUM_THREADS, m_coordinate_size * sizeof(index_type)>>>( const_coordinate_data(), d_dst_coordinates, d_tensor_stride, num_threads, m_coordinate_size); } } /* * @brief Given a key iterator begin-end pair and a value iterator begin-end * pair, insert all elements. * * @note The key and value iterators can be 1) pointers, 2) coordinate or vector * iterators. * * @return none */ template <typename coordinate_type, template <typename T> class TemplatedAllocator> template <bool remap> void CoordinateMapGPU<coordinate_type, TemplatedAllocator>::insert( coordinate_iterator<coordinate_type> key_first, coordinate_iterator<coordinate_type> key_last) { size_type const N = key_last - key_first; LOG_DEBUG("key iterator length", N); if (N == 0) { m_size = 0; return; } m_valid_row_index.allocate(N); m_valid_map_index.allocate(N); // Copy the coordinates to m_coordinate base_type::reserve(N); CUDA_CHECK( cudaMemcpy(coordinate_data(), // dst key_first->data(), // first element of the dereferenced iter. sizeof(coordinate_type) * N * m_coordinate_size, // bytes cudaMemcpyDeviceToDevice)); CUDA_CHECK(cudaDeviceSynchronize()); LOG_DEBUG("Reserved and copiedm", N, "x", m_coordinate_size, "coordinates"); // compute cuda kernel call params size_type const num_threads = N; LOG_DEBUG("nm_threads", num_threads); size_type const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS); LOG_DEBUG("nm_blocks", num_blocks); index_type const unused_key = std::numeric_limits<index_type>::max(); LOG_DEBUG("unused_key", unused_key); detail::insert_and_map_kernel<coordinate_type, size_type, index_type, map_type><<<num_blocks, CUDA_NUM_THREADS>>>( *m_map, // const_coordinate_data(), // m_valid_map_index.data(), // m_valid_row_index.data(), // num_threads, m_coordinate_size, unused_key); CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("Map size:", m_map->size()); // Valid row index auto valid_begin = thrust::make_zip_iterator( thrust::make_tuple(m_valid_map_index.begin(), m_valid_row_index.begin())); size_type const number_of_valid = thrust::remove_if(thrust::device, valid_begin, thrust::make_zip_iterator(thrust::make_tuple( m_valid_map_index.end(), m_valid_row_index.end())), detail::is_first<index_type>(unused_key)) - valid_begin; m_valid_row_index.resize(number_of_valid); m_valid_map_index.resize(number_of_valid); m_size = number_of_valid; LOG_DEBUG("Number of successful insertion", m_size); if (remap // When remapping && number_of_valid != N // when the # of inserted items differ from the # // of successful insertions ) { m_inverse_row_index.allocate(N); thrust::counting_iterator<uint32_t> count_begin{0}; thrust::for_each(count_begin, count_begin + number_of_valid, detail::update_value_with_offset<index_type, map_type>{ *m_map, m_valid_map_index.data()}); size_type const num_threads = N; auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS); detail::remap_inverse_map<coordinate_type, size_type, index_type, map_type> <<<num_blocks, CUDA_NUM_THREADS>>>(*m_map, // const_coordinate_data(), // m_inverse_row_index.data(), // num_threads, m_coordinate_size); LOG_DEBUG("Remapping finished"); } } // namespace minkowski template <typename coordinate_type, template <typename T> class TemplatedAllocator> template <bool remap> std::pair<gpu_storage<default_types::index_type, TemplatedAllocator<char>>, gpu_storage<default_types::index_type, TemplatedAllocator<char>>> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::insert_and_map( coordinate_iterator<coordinate_type> key_first, coordinate_iterator<coordinate_type> key_last) { LOG_DEBUG("insert_and_map"); insert<remap>(key_first, key_last); return std::make_pair(m_valid_row_index, m_inverse_row_index); } template <typename coordinate_type, template <typename T> class TemplatedAllocator> void CoordinateMapGPU<coordinate_type, TemplatedAllocator>:: initialize_valid_indices(size_t const N_unique) { m_valid_row_index.resize(N_unique); m_valid_map_index.resize(N_unique); m_size = N_unique; // Insert coordinates auto insert = detail::insert_coordinate<coordinate_type, map_type, index_type *>{ *m_map, // map const_coordinate_data(), // coordinates, m_valid_row_index.data(), // valid row m_valid_map_index.data(), // iter offset m_coordinate_size}; thrust::counting_iterator<uint32_t> count_begin{0}; thrust::for_each(thrust::device, count_begin, count_begin + N_unique, insert); } /* * @brief given a key iterator begin-end pair find all valid keys and its * index. * * @return a pair of (valid index, query value) vectors. */ template <typename coordinate_type, template <typename T> class TemplatedAllocator> std::pair<gpu_storage<default_types::index_type, TemplatedAllocator<char>>, gpu_storage<default_types::index_type, TemplatedAllocator<char>>> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::find( coordinate_iterator<coordinate_type> key_first, coordinate_iterator<coordinate_type> key_last) const { size_type N = key_last - key_first; LOG_DEBUG(N, "queries for find."); auto const find_functor = detail::find_coordinate<coordinate_type, map_type>( *m_map, key_first->data(), m_unused_element, m_coordinate_size); LOG_DEBUG("Find functor initialized."); auto const invalid_functor = detail::is_unused_pair<coordinate_type, mapped_type>(m_unused_element); LOG_DEBUG("Valid functor initialized."); thrust::counting_iterator<index_type> index{0}; gpu_storage<index_type, byte_allocator_type> input_index(N); gpu_storage<index_type, byte_allocator_type> results(N); LOG_DEBUG("Initialized functors."); thrust::sequence(thrust::device, input_index.begin(), input_index.end()); thrust::transform(thrust::device, index, index + N, results.begin(), find_functor); size_type const number_of_valid = thrust::remove_if(thrust::device, thrust::make_zip_iterator(thrust::make_tuple( input_index.begin(), results.begin())), thrust::make_zip_iterator(thrust::make_tuple( input_index.end(), results.end())), invalid_functor) - thrust::make_zip_iterator( thrust::make_tuple(input_index.begin(), results.begin())); LOG_DEBUG("Number of valid", number_of_valid); input_index.resize(number_of_valid); results.resize(number_of_valid); return std::make_pair(input_index, results); } namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type> __global__ void stride_copy(coordinate_type const *__restrict__ src_coordinates, // index_type const *__restrict__ src_valid_row_index, // index_type const *__restrict__ stride, // coordinate_type *__restrict__ dst_coordinates, // size_type const num_threads, size_type const coordinate_size) { extern __shared__ size_type sh_stride[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) sh_stride[i] = stride[i]; __syncthreads(); if (x < num_threads) { const index_type src_start = src_valid_row_index[x] * coordinate_size; const index_type dst_start = x * coordinate_size; dst_coordinates[dst_start] = src_coordinates[src_start]; for (index_type j = 1; j < coordinate_size; ++j) { dst_coordinates[dst_start + j] = (__float2int_rd( __fdiv_rd(src_coordinates[src_start + j], sh_stride[j - 1]))) * sh_stride[j - 1]; // (__double2int_rd( // __ddiv_rn(src_coordinates[src_start + j], sh_stride[j - 1]))) * // sh_stride[j - 1]; } } } } // namespace detail /* * @brief given a key iterator begin-end pair find all valid keys and its * index. * * @return a pair of (valid index, query value) vectors. */ template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride( stride_type const &stride) const { // Over estimate the reserve size to be size(); size_type const N = size(); LOG_DEBUG("Strided map with kernel stride:", stride); self_type stride_map( N, m_coordinate_size, m_hashtable_occupancy, detail::stride_tensor_stride(base_type::m_tensor_stride, stride), m_map_allocator, base_type::m_byte_allocator); // stride coordinates size_type const num_threads = N; auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS); detail::stride_copy<coordinate_type, size_type, index_type> <<<num_blocks, CUDA_NUM_THREADS, m_coordinate_size * sizeof(size_type)>>>( const_coordinate_data(), // m_valid_row_index.cbegin(), // m_device_tensor_stride.cbegin(), // stride_map.coordinate_data(), // num_threads, m_coordinate_size); LOG_DEBUG("Stride copy done."); auto &stride_valid_row_index = stride_map.m_valid_row_index; auto &stride_valid_map_index = stride_map.m_valid_map_index; stride_valid_row_index.resize(N); // row indices stride_valid_map_index.resize(N); // map offset // Insert coordinates index_type const unused_key = std::numeric_limits<index_type>::max(); LOG_DEBUG("unused_key", unused_key); detail::insert_and_map_kernel<coordinate_type, size_type, index_type, map_type><<<num_blocks, CUDA_NUM_THREADS>>>( *stride_map.m_map, // stride_map.const_coordinate_data(), // stride_valid_map_index.data(), // stride_valid_row_index.data(), // num_threads, m_coordinate_size, unused_key); CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("Stride map size:", m_map->size()); // Valid row index auto valid_begin = thrust::make_zip_iterator( thrust::make_tuple(stride_valid_map_index.begin(), // stride_valid_row_index.begin())); size_type const number_of_valid = thrust::remove_if(thrust::device, // valid_begin, // thrust::make_zip_iterator( thrust::make_tuple(stride_valid_map_index.end(), // stride_valid_row_index.end())), detail::is_first<index_type>(unused_key)) - valid_begin; stride_valid_row_index.resize(number_of_valid); stride_valid_map_index.resize(number_of_valid); stride_map.m_size = number_of_valid; LOG_DEBUG("Reduced to", number_of_valid); // remap values thrust::counting_iterator<uint32_t> count_begin{0}; thrust::for_each(count_begin, count_begin + number_of_valid, detail::update_value_with_offset<index_type, map_type>{ *stride_map.m_map, stride_map.m_valid_map_index.data()}); LOG_DEBUG("Stride remap done"); return stride_map; } namespace detail { template <typename coordinate_type, typename index_type> __device__ bool is_coordinate_aligned(coordinate_type *point, index_type *out_tensor_stride, uint32_t const size) { for (uint32_t i = 0; i < size - 1; ++i) { if (point[i + 1] % out_tensor_stride[i] != 0) return false; } return true; } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void kernel_region_insert( size_type const num_threads, // map_type __restrict__ out_map, // coordinate_type const *const __restrict__ p_in_coordinates, // index_type const *const __restrict__ in_valid_row_index, // coordinate_type *__restrict__ p_out_coordinates, // index_type *__restrict__ out_valid_row_index, // index_type *__restrict__ out_valid_map_index, // gpu_kernel_region<coordinate_type> kernel, // size_type const *const __restrict__ out_tensor_stride, // index_type const unused_key) { // extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; size_type const coordinate_size = kernel.coordinate_size(); size_type const volume = kernel.volume(); // clang-format off size_type *sh_size = reinterpret_cast<size_type *>(sh_all); size_type *sh_tensor_stride = sh_size; size_type *sh_kernel_size = sh_tensor_stride + coordinate_size; size_type *sh_dilation = sh_kernel_size + coordinate_size; size_type *sh_out_tensor_stride = sh_dilation + coordinate_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_out_tensor_stride + coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; // clang-format on for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = kernel.tensor_stride()[i]; sh_kernel_size[i] = kernel.kernel_size()[i]; sh_dilation[i] = kernel.dilation()[i]; sh_out_tensor_stride[i] = out_tensor_stride[i]; } __syncthreads(); auto sh_kernel = gpu_kernel_region<coordinate_type>( kernel, sh_tensor_stride, sh_kernel_size, sh_dilation); coordinate<coordinate_type> curr_coordinate(sh_tmp); if (x < num_threads) { // iterate over values index_type out_index = x * volume; // set bounds for the valid keys for (uint32_t kernel_ind = 0; kernel_ind < volume; ++kernel_ind) { sh_kernel.coordinate_at( kernel_ind, &p_in_coordinates[in_valid_row_index[x] * coordinate_size], sh_tmp); // Creating generative conv transpose if (kernel.is_transpose()) { // initialize out coordinate for (uint32_t i = 0; i < coordinate_size; ++i) p_out_coordinates[out_index * coordinate_size + i] = curr_coordinate[i]; auto const result = out_map.insert(thrust::make_pair( coordinate<coordinate_type>{ &p_out_coordinates[out_index * coordinate_size]}, out_index)); if (result.second) { // row index in the out_coordinates out_valid_row_index[out_index] = out_index; // offset in the coordinate map out_valid_map_index[out_index] = result.first.offset(); } else { out_valid_row_index[out_index] = unused_key; } ++out_index; } else { // skip if the coordinate is not aligned if (!is_coordinate_aligned(sh_tmp, sh_out_tensor_stride, coordinate_size)) { out_valid_row_index[out_index] = unused_key; ++out_index; } else { // initialize out coordinate for (uint32_t i = 0; i < coordinate_size; ++i) p_out_coordinates[out_index * coordinate_size + i] = curr_coordinate[i]; auto const result = out_map.insert(thrust::make_pair( coordinate<coordinate_type>{ &p_out_coordinates[out_index * coordinate_size]}, out_index)); if (result.second) { // row index in the out_coordinates out_valid_row_index[out_index] = out_index; // offset in the coordinate map out_valid_map_index[out_index] = result.first.offset(); } else { out_valid_row_index[out_index] = unused_key; } ++out_index; } } } } } } // namespace detail /* * @brief generate a region strided coordinate map * * @return a gpu_coordinate_map */ template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride_region( cpu_kernel_region<coordinate_type> &kernel, stride_type const &out_tensor_stride) const { ASSERT(m_coordinate_size == kernel.coordinate_size(), "Invalid kernel coordinate_size"); gpu_kernel_region<coordinate_type> gpu_kernel(kernel.to_gpu()); // Over estimate the reserve size to be size(); size_type const N_in = size(); size_type const N_out = N_in * kernel.volume(); LOG_DEBUG("Stride region out tensor stride:", out_tensor_stride, "with capacity:", N_out); self_type stride_map(N_out, m_coordinate_size, m_hashtable_occupancy, out_tensor_stride, m_map_allocator, base_type::m_byte_allocator); index_storage_type d_out_tensor_stride(out_tensor_stride); auto &out_valid_row_index = stride_map.m_valid_row_index; auto &out_valid_map_index = stride_map.m_valid_map_index; out_valid_row_index.resize(N_out); out_valid_map_index.resize(N_out); index_type const unused_key = std::numeric_limits<index_type>::max(); // (THREAD * D + 3 * D) * 4 uint32_t const shared_memory_size_in_bytes = 4 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation CUDA_NUM_THREADS * m_coordinate_size * sizeof(coordinate_type); // tmp detail::kernel_region_insert<coordinate_type, size_type, index_type, map_type> <<<GET_BLOCKS(N_in, CUDA_NUM_THREADS), CUDA_NUM_THREADS, shared_memory_size_in_bytes>>>(N_in, // *stride_map.m_map, // const_coordinate_data(), // m_valid_row_index.cbegin(), // stride_map.coordinate_data(), // out_valid_row_index.data(), // out_valid_map_index.data(), // gpu_kernel, // d_out_tensor_stride.cbegin(), // unused_key); // CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("kernel_region_insert done"); // LOG_DEBUG("valid row index", out_valid_row_index); // LOG_DEBUG("valid map offset", out_valid_map_index); // remove unused_keys auto valid_begin = thrust::make_zip_iterator( thrust::make_tuple(out_valid_row_index.begin(), // out_valid_map_index.begin())); size_type const number_of_valid = thrust::remove_if(thrust::device, // valid_begin, // thrust::make_zip_iterator( thrust::make_tuple(out_valid_row_index.end(), // out_valid_map_index.end())), detail::is_first<index_type>(unused_key)) - valid_begin; out_valid_row_index.resize(number_of_valid); out_valid_map_index.resize(number_of_valid); stride_map.m_size = number_of_valid; LOG_DEBUG("Reduced to", number_of_valid); // remap values thrust::counting_iterator<index_type> count_begin{0}; thrust::for_each(count_begin, count_begin + number_of_valid, detail::update_value_with_offset<index_type, map_type>{ *stride_map.m_map, out_valid_map_index.data()}); LOG_DEBUG("Stride remap done"); return stride_map; } namespace detail { template <typename dst_coordinate_type, typename src_coordinate_type, typename size_type, typename index_type, bool stride_src> __global__ void copy_column_with_valid( dst_coordinate_type *__restrict__ dst_coordinates, // size_type const num_threads, // src_coordinate_type const *__restrict__ src_coordinates, // index_type const *__restrict__ src_valid_row_index, // size_type const coordinate_size) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { if (stride_src) dst_coordinates[x] = src_coordinates[src_valid_row_index[x] * coordinate_size]; else dst_coordinates[x * coordinate_size] = src_coordinates[src_valid_row_index[x]]; } } template <typename dst_coordinate_type, typename src_coordinate_type, typename size_type, bool stride_src> __global__ void copy_column(dst_coordinate_type *__restrict__ dst_coordinates, // size_type const num_threads, // src_coordinate_type const *__restrict__ src_coordinates, // size_type const coordinate_size) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { if (stride_src) dst_coordinates[x] = src_coordinates[x * coordinate_size]; else dst_coordinates[x * coordinate_size] = src_coordinates[x]; } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::origin() const { size_type const N = size(); LOG_DEBUG("Origin map from in map size:", N); // tensor stride is set to {0,..., 0} for the origin map. stride_type origin_tensor_stride(m_coordinate_size - 1); std::for_each(origin_tensor_stride.begin(), origin_tensor_stride.end(), [](auto &i) { i = 0; }); // thrust unique for unique batch index coordinate_type *d_batch_indices = reinterpret_cast<coordinate_type *>( m_byte_allocator.allocate(N * sizeof(coordinate_type))); detail::copy_column_with_valid<coordinate_type, coordinate_type, size_type, index_type, true> <<<GET_BLOCKS(N, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>( d_batch_indices, N, const_coordinate_data(), m_valid_row_index.cbegin(), m_coordinate_size); #ifdef DEBUG CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("copied batch indices"); #endif // Sort and unique thrust::sort(thrust::device, d_batch_indices, d_batch_indices + N); #ifdef DEBUG CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("sorted batch indices"); #endif auto d_batch_indices_end = thrust::unique(thrust::device, d_batch_indices, d_batch_indices + N); size_type const N_unique = d_batch_indices_end - d_batch_indices; #ifdef DEBUG size_t Nsize = std::min<int>(N_unique, 100); std::vector<coordinate_type> tmp(Nsize); CUDA_CHECK(cudaMemcpy(tmp.data(), d_batch_indices, Nsize * sizeof(coordinate_type), cudaMemcpyDeviceToHost)); LOG_DEBUG("sort and unique batch", tmp); CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("unique done"); #endif // Create origin map LOG_DEBUG("Origin map with size:", N_unique, " tensor stride:", origin_tensor_stride); self_type origin_map(N_unique, m_coordinate_size, m_hashtable_occupancy, origin_tensor_stride, m_map_allocator, base_type::m_byte_allocator); CUDA_CHECK( cudaMemset(origin_map.coordinate_data(), 0, N_unique * m_coordinate_size * sizeof(coordinate_type))); detail::copy_column<coordinate_type, coordinate_type, size_type, false> <<<GET_BLOCKS(N_unique, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>( origin_map.coordinate_data(), N_unique, d_batch_indices, m_coordinate_size); #ifdef DEBUG CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("copied batch indices to the origin_map"); #endif auto &origin_valid_row_index = origin_map.m_valid_row_index; auto &origin_valid_map_index = origin_map.m_valid_map_index; origin_valid_row_index.resize(N_unique); origin_valid_map_index.resize(N_unique); origin_map.m_size = N_unique; // Insert coordinates auto insert = detail::insert_coordinate<coordinate_type, map_type, index_type *>{ *origin_map.m_map, // map origin_map.const_coordinate_data(), // coordinates, origin_valid_row_index.data(), // valid row origin_valid_map_index.data(), // iter offset m_coordinate_size}; thrust::counting_iterator<uint32_t> count_begin{0}; thrust::for_each(thrust::device, count_begin, count_begin + N_unique, insert); #ifdef DEBUG CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("origin map insertion"); #endif m_byte_allocator.deallocate((char *)d_batch_indices, N * sizeof(coordinate_type)); return origin_map; } template <typename coordinate_type, typename coordinate_int_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_int_type, TemplatedAllocator> CoordinateFieldMapGPU<coordinate_type, coordinate_int_type, TemplatedAllocator>::origin() const { size_type const N = size(); LOG_DEBUG("Origin map from in map size:", N); // tensor stride is set to {0,..., 0} for the origin map. stride_type origin_tensor_stride(m_coordinate_size - 1); std::for_each(origin_tensor_stride.begin(), origin_tensor_stride.end(), [](auto &i) { i = 0; }); // thrust unique for unique batch index coordinate_int_type *d_batch_indices = reinterpret_cast<coordinate_int_type *>( m_byte_allocator.allocate(N * sizeof(coordinate_int_type))); detail::copy_column<coordinate_int_type, coordinate_type, size_type, true> <<<GET_BLOCKS(N, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>( d_batch_indices, N, const_coordinate_data(), m_coordinate_size); // Sort and unique thrust::sort(thrust::device, d_batch_indices, d_batch_indices + N); auto d_batch_indices_end = thrust::unique(thrust::device, d_batch_indices, d_batch_indices + N); size_type const N_unique = d_batch_indices_end - d_batch_indices; // Create origin map LOG_DEBUG("Origin map with size:", N_unique, " tensor stride:", origin_tensor_stride); CoordinateMapGPU<coordinate_int_type, TemplatedAllocator> origin_map( N_unique, m_coordinate_size, 50, origin_tensor_stride); CUDA_CHECK( cudaMemset(origin_map.coordinate_data(), 0, N_unique * m_coordinate_size * sizeof(coordinate_int_type))); detail::copy_column<coordinate_int_type, coordinate_int_type, size_type, false> <<<GET_BLOCKS(N_unique, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>( origin_map.coordinate_data(), N_unique, d_batch_indices, m_coordinate_size); m_byte_allocator.deallocate((char *)d_batch_indices, N * sizeof(coordinate_type)); origin_map.initialize_valid_indices(N_unique); return origin_map; } namespace detail { template <typename coordinate_field_type, // typename coordinate_int_type, // typename size_type, // typename index_type, // typename map_type> __global__ void origin_field_map_kernel( size_type const num_threads, // coordinate_field_type const *__restrict__ d_field_coords, // map_type const __restrict__ origin_map, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, // index_type *__restrict__ p_kernels, // size_type const coordinate_size) { extern __shared__ coordinate_int_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; // clang-format off coordinate_int_type *sh_tmp = sh_all + tx * coordinate_size; // clang-format on if (x < num_threads) for (index_type i = 0; i < coordinate_size; ++i) sh_tmp[i] = 0; __syncthreads(); if (x < num_threads) { sh_tmp[0] = coordinate_int_type(lroundf(d_field_coords[x * coordinate_size])); auto origin_iter = origin_map.find(coordinate<coordinate_int_type>(sh_tmp)); auto out_index = origin_iter->second; p_in_maps[x] = x; p_out_maps[x] = out_index; // origin_map row index // For kernel_map decompose() p_kernels[x] = out_index; } } } // namespace detail template <typename coordinate_field_type, typename coordinate_int_type, template <typename T> class TemplatedAllocator> CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type, TemplatedAllocator>::kernel_map_type CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type, TemplatedAllocator>:: origin_map(CoordinateMapGPU<coordinate_int_type, TemplatedAllocator> const &origin_map, uint32_t thread_dim) const { ASSERT(std::all_of(origin_map.get_tensor_stride().begin(), origin_map.get_tensor_stride().end(), [](auto const &i) { return i == 0; }), "Invalid origin tensor stride", origin_map.get_tensor_stride()); // reserve size(); size_type const in_size = size(); LOG_DEBUG("in_map size:", in_size, "origin_map size:", origin_map.size()); // (THREAD * D) * 4 uint32_t const shared_memory_size_in_bytes = thread_dim * m_coordinate_size * sizeof(coordinate_int_type); // tmp size_type const num_threads = in_size; auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); LOG_DEBUG("origin_map num block", num_blocks); LOG_DEBUG("origin_map shared_memory size", shared_memory_size_in_bytes); LOG_DEBUG("origin_map threads dim", thread_dim); LOG_DEBUG("origin_map num threads", num_threads); kernel_map_type kernel_map(in_size, base_type::m_byte_allocator); CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("Allocated kernel_map."); detail::origin_field_map_kernel<coordinate_field_type, coordinate_int_type, size_type, index_type, int_hash_map_type> <<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>( num_threads, // const_coordinate_data(), // origin_map.const_hash_map(), // kernel_map.in_maps.begin(), // kernel_map.out_maps.begin(), // kernel_map.kernels.begin(), // m_coordinate_size); CUDA_CHECK(cudaStreamSynchronize(0)); kernel_map.decompose(); LOG_DEBUG("origin map decomposed"); return kernel_map; } namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void prune_copy_and_insert( size_type const num_threads, // size_type const coordinate_size, // index_type const unused_map_offset, // index_type const *const __restrict__ in_valid_row_index, // coordinate_type const *const __restrict__ in_coordinates, // bool const *const __restrict__ keep_begin, // index_type const *const __restrict__ inclusive_scan_keep, // map_type __restrict__ out_map, // coordinate_type *__restrict__ out_coordinates, // index_type *__restrict__ out_valid_row_index, // index_type *__restrict__ out_valid_map_offset // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { if (!keep_begin[x]) { out_valid_map_offset[x] = unused_map_offset; } else { // If keep, auto out_row_index = (x < 1) ? 0 : inclusive_scan_keep[x - 1]; coordinate_type const *curr_in_coord = &in_coordinates[in_valid_row_index[x] * coordinate_size]; coordinate_type *curr_out_coord = &out_coordinates[out_row_index * coordinate_size]; for (index_type i = 0; i < coordinate_size; ++i) curr_out_coord[i] = curr_in_coord[i]; // insert to the out_map auto coord = coordinate<coordinate_type>{curr_out_coord}; // remap the value in the next kernel call auto result = out_map.insert(thrust::make_pair(coord, 0)); out_valid_row_index[x] = out_row_index; if (result.second) out_valid_map_offset[x] = result.first.offset(); else out_valid_map_offset[x] = unused_map_offset; } } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void remap(size_type const num_threads, // map_type const __restrict__ out_map, // index_type *__restrict__ out_valid_map_offset // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { auto &pair = out_map.data()[out_valid_map_offset[x]]; pair.second = x; } } template <typename Dtype, typename Stype> __global__ void typed_copy(uint32_t const num_threads, // Dtype *__restrict__ dst, // Stype const *__restrict__ src // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { dst[x] = src[x]; } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::prune( bool const *keep_begin, bool const *keep_end) const { size_type const N = size(); ASSERT(N == keep_end - keep_begin, "Invalid keep size"); LOG_DEBUG("Prune size:", N); // exclusive sum for coordinate copy. auto const inclusive_scan_size = N * sizeof(index_type); index_type *d_inclusive_scan = (index_type *)m_byte_allocator.allocate(inclusive_scan_size); // bool -> index_type detail::typed_copy<<<GET_BLOCKS(N, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>( N, d_inclusive_scan, keep_begin); CUDA_CHECK(cudaStreamSynchronize(0)); thrust::inclusive_scan(thrust::device, d_inclusive_scan, d_inclusive_scan + N, d_inclusive_scan); index_type N_pruned; CUDA_CHECK(cudaMemcpy(&N_pruned, d_inclusive_scan + N - 1, sizeof(index_type), cudaMemcpyDeviceToHost)); LOG_DEBUG("Pruned N:", N_pruned); // create a coordinate_map self_type pruned_map(N, m_coordinate_size, m_hashtable_occupancy, base_type::m_tensor_stride, m_map_allocator, base_type::m_byte_allocator); // Copy and insert kernel that first checks keep[i] is true and insert at // inclusive_scan[i - 1]. auto &out_valid_map_offset = pruned_map.m_valid_map_index; auto &out_valid_row_index = pruned_map.m_valid_row_index; out_valid_map_offset.resize(N); out_valid_row_index.resize(N); index_type const unused_map_offset = std::numeric_limits<index_type>::max(); detail::prune_copy_and_insert<coordinate_type, size_type, index_type, map_type> <<<GET_BLOCKS(N, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>( N, m_coordinate_size, unused_map_offset, m_valid_row_index.cbegin(), const_coordinate_data(), keep_begin, d_inclusive_scan, *(pruned_map.m_map), pruned_map.coordinate_data(), out_valid_row_index.data(), out_valid_map_offset.data()); CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("Pruned hash map size:", pruned_map.size()); // Remove not inserted rows auto valid_begin = thrust::make_zip_iterator(thrust::make_tuple( out_valid_map_offset.begin(), out_valid_row_index.begin())); size_type const number_of_valid = thrust::remove_if( thrust::device, valid_begin, thrust::make_zip_iterator(thrust::make_tuple( out_valid_map_offset.end(), out_valid_row_index.end())), detail::is_first<index_type>(unused_map_offset)) - valid_begin; LOG_DEBUG("number of valid rows:", number_of_valid); out_valid_map_offset.resize(number_of_valid); out_valid_row_index.resize(number_of_valid); pruned_map.m_size = number_of_valid; // remap the final map values detail::remap<coordinate_type, size_type, index_type, map_type> <<<GET_BLOCKS(number_of_valid, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>( number_of_valid, *(pruned_map.m_map), out_valid_map_offset.data()); CUDA_CHECK(cudaStreamSynchronize(0)); m_byte_allocator.deallocate((char *)d_inclusive_scan, inclusive_scan_size); return pruned_map; } // Merge namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void copy_coordinates_by_offset(map_type __restrict__ map, // coordinate_type *__restrict__ coordinates, // index_type const *__restrict__ map_offsets, // size_type const num_threads, // size_type const coordinate_size // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { typename map_type::value_type const *p_value = map.data() + map_offsets[x]; // Compute Capabilities 3.5 or newer coordinate_type *dst_coordinate = coordinates + p_value->second * coordinate_size; for (index_type i = 0; i < coordinate_size; ++i) dst_coordinate[i] = p_value->first[i]; } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void copy_coordinates_by_valid_row( // map_type __restrict__ map, // coordinate_type const *__restrict__ in_coordinates, // coordinate_type *__restrict__ out_coordinates, // index_type const *__restrict__ valid_row, // size_type const num_threads, // size_type const coordinate_size // ) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { // Compute Capabilities 3.5 or newer index_type const row_index = x / coordinate_size; index_type const col_index = x % coordinate_size; out_coordinates[row_index * coordinate_size + col_index] = in_coordinates[valid_row[row_index] * coordinate_size + col_index]; } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void insert_and_map_kernel_with_offset( map_type __restrict__ map, // coordinate_type const *__restrict__ coordinates, // index_type const coordinate_row_offset, // index_type *__restrict__ valid_map_index, // index_type *__restrict__ valid_row_index, // size_type const num_threads, // size_type const coordinate_size, // index_type const unused_key) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { // m_map.insert(pair); // Returns pair<iterator, (bool)insert_success> auto const result = map.insert(thrust::make_pair( coordinate<coordinate_type>{&coordinates[x * coordinate_size]}, x)); if (result.second) { valid_row_index[x] = x + coordinate_row_offset; // success map index. remove failed insertion with success. valid_map_index[x] = result.first.offset(); } else { valid_map_index[x] = unused_key; } } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::merge( std::vector<std::reference_wrapper<self_type>> const &maps) const { // reserve size size_t all_size = std::accumulate( maps.begin(), maps.end(), 0, [](size_t sum, const self_type &map) { return sum + map.size(); }); LOG_DEBUG("Out merge map capacity:", all_size); self_type merged_map(all_size, m_coordinate_size, m_hashtable_occupancy, base_type::m_tensor_stride, m_map_allocator, base_type::m_byte_allocator); merged_map.m_valid_row_index.resize(all_size); merged_map.m_valid_map_index.resize(all_size); // Copy valid coordinates to the merged map coordinate_type *curr_coordinates = merged_map.coordinate_data(); index_type *curr_valid_map_offset = merged_map.m_valid_map_index.data(); index_type *curr_valid_row_index = merged_map.m_valid_row_index.data(); index_type const unused_key = std::numeric_limits<index_type>::max(); index_type row_offset{0}; for (self_type const &map : maps) { size_type const num_threads = map.size(); if (num_threads == 0) continue; size_type const num_blocks = GET_BLOCKS(num_threads * m_coordinate_size, CUDA_NUM_THREADS); LOG_DEBUG("Current merge map size:", num_threads); detail::copy_coordinates_by_valid_row<coordinate_type, size_type, index_type, map_type> <<<num_blocks, CUDA_NUM_THREADS>>>(map.const_coordinate_data(), // curr_coordinates, // map.m_valid_row_index.cdata(), // num_threads * m_coordinate_size, // m_coordinate_size); detail::insert_and_map_kernel_with_offset<coordinate_type, size_type, index_type, map_type> <<<num_blocks, CUDA_NUM_THREADS>>>(*(merged_map.m_map), curr_coordinates, // row_offset, // curr_valid_map_offset, // curr_valid_row_index, // num_threads, m_coordinate_size, unused_key); CUDA_CHECK(cudaStreamSynchronize(0)); curr_coordinates += num_threads * m_coordinate_size; curr_valid_map_offset += num_threads; curr_valid_row_index += num_threads; row_offset += num_threads; } // Remove invalid maps auto valid_begin = thrust::make_zip_iterator( thrust::make_tuple(merged_map.m_valid_map_index.begin(), merged_map.m_valid_row_index.begin())); size_type const number_of_valid = thrust::remove_if(thrust::device, valid_begin, thrust::make_zip_iterator(thrust::make_tuple( merged_map.m_valid_map_index.end(), merged_map.m_valid_row_index.end())), detail::is_first<index_type>(unused_key)) - valid_begin; // remap the final map row index and the map offset detail::remap<coordinate_type, size_type, index_type, map_type> <<<GET_BLOCKS(number_of_valid, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>( number_of_valid, *(merged_map.m_map), merged_map.m_valid_map_index.data()); merged_map.m_valid_row_index.resize(number_of_valid); merged_map.m_valid_map_index.resize(number_of_valid); merged_map.m_size = number_of_valid; return merged_map; } namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void count_kernel(map_type const __restrict__ in_map, // map_type const __restrict__ out_map, // index_type const *const __restrict__ out_valid_map_index, // size_type const num_threads, // gpu_kernel_region<coordinate_type> kernel, // index_type *__restrict__ p_count_per_thread) { extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; size_type const coordinate_size = kernel.coordinate_size(); size_type const volume = kernel.volume(); // clang-format off size_type *sh_size = reinterpret_cast<size_type *>(sh_all); size_type *sh_tensor_stride = sh_size; size_type *sh_kernel_size = sh_tensor_stride + coordinate_size; size_type *sh_dilation = sh_kernel_size + coordinate_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; // clang-format on auto const equal = out_map.get_key_equal(); // kernel_maps for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = kernel.tensor_stride()[i]; sh_kernel_size[i] = kernel.kernel_size()[i]; sh_dilation[i] = kernel.dilation()[i]; } __syncthreads(); auto sh_kernel = gpu_kernel_region<coordinate_type>( kernel, sh_tensor_stride, sh_kernel_size, sh_dilation); coordinate<coordinate_type> point(sh_tmp); auto const unused_key = out_map.get_unused_key(); if (x < num_threads) { size_type count = 0; typename map_type::value_type const &out_value = out_map.data()[out_valid_map_index[x]]; // valid_index guarantees that it contains a valid value if (!equal(out_value.first, unused_key)) { for (auto kernel_ind = 0; kernel_ind < volume; ++kernel_ind) { sh_kernel.coordinate_at(kernel_ind, out_value.first.data(), sh_tmp); if (in_map.find(point) != in_map.end()) { ++count; } } } p_count_per_thread[x] = count; } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void preallocated_kernel_map_iteration( map_type const __restrict__ in_map, // map_type const __restrict__ out_map, // index_type const *const __restrict__ out_valid_map_index, // size_type const num_threads, // gpu_kernel_region<coordinate_type> kernel, // index_type const *const __restrict__ inclusive_count_cumsum_per_thread, // index_type *__restrict__ p_kernels, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps) { extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; size_type const coordinate_size = kernel.coordinate_size(); size_type const volume = kernel.volume(); // clang-format off size_type *sh_size = reinterpret_cast<size_type *>(sh_all); size_type *sh_tensor_stride = sh_size; size_type *sh_kernel_size = sh_tensor_stride + coordinate_size; size_type *sh_dilation = sh_kernel_size + coordinate_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; // clang-format on auto const equal = out_map.get_key_equal(); for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = kernel.tensor_stride()[i]; sh_kernel_size[i] = kernel.kernel_size()[i]; sh_dilation[i] = kernel.dilation()[i]; } __syncthreads(); auto sh_kernel = gpu_kernel_region<coordinate_type>( kernel, sh_tensor_stride, sh_kernel_size, sh_dilation); coordinate<coordinate_type> curr_coordinate(sh_tmp); auto const unused_key = out_map.get_unused_key(); if (x < num_threads) { // iterate over values auto kernel_map_index = (x < 1) ? 0 : inclusive_count_cumsum_per_thread[x - 1]; typename map_type::value_type const &out_value = out_map.data()[out_valid_map_index[x]]; if (!equal(out_value.first, unused_key)) { // set bounds for the valid keys for (uint32_t kernel_index = 0; kernel_index < volume; ++kernel_index) { sh_kernel.coordinate_at(kernel_index, out_value.first.data(), sh_tmp); auto const &in_result = in_map.find(curr_coordinate); if (in_result != in_map.end()) { // insert to p_kernels[kernel_map_index] = kernel_index; p_in_maps[kernel_map_index] = (*in_result).second; p_out_maps[kernel_map_index] = out_value.second; ++kernel_map_index; } } } } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void direct_in_out_map(size_type const num_threads, // map_type const __restrict__ in_map, // map_type const __restrict__ out_map, // index_type const *const __restrict__ out_valid_map_offset, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, index_type const unused_key) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { typename map_type::value_type const &out_value = out_map.data()[out_valid_map_offset[x]]; auto const &result = in_map.find(out_value.first); if (result != in_map.end()) { p_in_maps[x] = (*result).second; p_out_maps[x] = out_value.second; } else { p_in_maps[x] = unused_key; } } } template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void direct_kernel_map(map_type const __restrict__ in_map, // map_type const __restrict__ out_map, // index_type const *const __restrict__ out_valid_map_index, // size_type const num_threads, // gpu_kernel_region<coordinate_type> kernel, // index_type *__restrict__ p_kernels, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, index_type const unused_map_value) { extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; size_type const coordinate_size = kernel.coordinate_size(); size_type const volume = kernel.volume(); // clang-format off size_type *sh_size = reinterpret_cast<size_type *>(sh_all); size_type *sh_tensor_stride = sh_size; size_type *sh_kernel_size = sh_tensor_stride + coordinate_size; size_type *sh_dilation = sh_kernel_size + coordinate_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; // clang-format on auto const equal = out_map.get_key_equal(); for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = kernel.tensor_stride()[i]; sh_kernel_size[i] = kernel.kernel_size()[i]; sh_dilation[i] = kernel.dilation()[i]; } __syncthreads(); auto sh_kernel = gpu_kernel_region<coordinate_type>( kernel, sh_tensor_stride, sh_kernel_size, sh_dilation); auto const unused_key = out_map.get_unused_key(); if (x < num_threads) { // iterate over values index_type kernel_index = x % volume; typename map_type::value_type const &out_value = out_map.data()[out_valid_map_index[x / volume]]; if (!equal(out_value.first, unused_key)) { // set bounds for the valid keys // TODO: copy the curr_coordinate to sh_curr_coordinate sh_kernel.coordinate_at(kernel_index, out_value.first.data(), sh_tmp); auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp)); if (in_result != in_map.end()) { // insert to p_kernels[x] = kernel_index; p_in_maps[x] = (*in_result).second; p_out_maps[x] = out_value.second; } else { p_kernels[x] = unused_map_value; } } } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map( self_type const &out_map, gpu_kernel_region<coordinate_type> const &kernel, CUDAKernelMapMode::Mode kernel_map_mode, uint32_t thread_dim) const { // Over estimate the reserve size to be size(); size_type const out_size = out_map.size(); size_type const kernel_volume = kernel.volume(); ASSERT(kernel_volume > 0, "Invalid kernel"); if (kernel_volume == 1) { // directly iterate over all output first by finding all in out map. auto const N = out_size; LOG_DEBUG("out_map size:", N); index_type *in_out_map = (index_type *)base_type::m_byte_allocator.allocate( 2 * (N + 1) * sizeof(index_type)); index_type *ins = in_out_map; index_type *outs = in_out_map + N + 1; // for __restrict__ collision prevention index_type unused_key = std::numeric_limits<index_type>::max(); detail::direct_in_out_map<coordinate_type, size_type, index_type, map_type> <<<GET_BLOCKS(N, thread_dim), thread_dim>>>( N, *m_map, // *(out_map.m_map), // out_map.m_valid_map_index.cdata(), // ins, // in map outs, // out map unused_key); LOG_DEBUG("Direct in out map copy done"); auto begin = thrust::make_zip_iterator(thrust::make_tuple(ins, outs)); auto const valid_size = thrust::remove_if( thrust::device, begin, thrust::make_zip_iterator(thrust::make_tuple(ins + N, outs + N)), detail::is_first<index_type>(unused_key)) - begin; LOG_DEBUG("Valid size:", valid_size); kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator, false); CUDA_CHECK(cudaMemcpy(kernel_map.in_maps.data(), ins, valid_size * sizeof(index_type), cudaMemcpyDeviceToDevice)); CUDA_CHECK(cudaMemcpy(kernel_map.out_maps.data(), outs, valid_size * sizeof(index_type), cudaMemcpyDeviceToDevice)); base_type::m_byte_allocator.deallocate((char *)in_out_map, 2 * (N + 1) * sizeof(index_type)); LOG_DEBUG("Cleaning up"); return kernel_map; } else if (kernel_map_mode == CUDAKernelMapMode::MEMORY_EFFICIENT && kernel.region_type() != RegionType::CUSTOM) { // (THREAD * D + 3 * D) * 4 uint32_t const shared_memory_size_in_bytes = 3 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp // clang-format on size_type const num_threads = out_size; auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); LOG_DEBUG("num block", num_blocks); LOG_DEBUG("out_map size", out_map.size()); LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes); LOG_DEBUG("threads dim", thread_dim); LOG_DEBUG("num threads", num_threads); index_type *d_p_count_per_thread = reinterpret_cast<index_type *>( base_type::m_byte_allocator.allocate(num_threads * sizeof(index_type))); // Initialize count per thread detail::count_kernel<coordinate_type, size_type, index_type, map_type> <<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>( *m_map, // *out_map.m_map, // out_map.m_valid_map_index.cbegin(), // num_threads, // kernel, // d_p_count_per_thread); CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("count_kernel finished"); thrust::inclusive_scan(thrust::device, d_p_count_per_thread, d_p_count_per_thread + num_threads, d_p_count_per_thread); index_type num_kernel_map; // type following the kernel map allocator CUDA_CHECK(cudaMemcpy(&num_kernel_map, d_p_count_per_thread + num_threads - 1, sizeof(index_type), cudaMemcpyDeviceToHost)); // set kernel map LOG_DEBUG("Found", num_kernel_map, "kernel map elements."); kernel_map_type kernel_map(num_kernel_map, base_type::m_byte_allocator); CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("Allocated kernel_map."); detail::preallocated_kernel_map_iteration<coordinate_type, size_type, index_type, map_type> <<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>( *m_map, // *out_map.m_map, // out_map.m_valid_map_index.cbegin(), // num_threads, // kernel, // d_p_count_per_thread, // kernel_map.kernels.begin(), // kernel_map.in_maps.begin(), // kernel_map.out_maps.begin()); CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("Preallocated kernel map done"); kernel_map.decompose(); base_type::m_byte_allocator.deallocate( reinterpret_cast<char *>(d_p_count_per_thread), num_threads * sizeof(index_type)); LOG_DEBUG("cudaFree"); return kernel_map; } else if (kernel_map_mode == CUDAKernelMapMode::SPEED_OPTIMIZED && kernel.region_type() != RegionType::CUSTOM) { // (THREAD * 3 * D + 3 * D) * 4 uint32_t const shared_memory_size_in_bytes = 3 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation (thread_dim + (thread_dim + kernel_volume - 1) / kernel_volume) * m_coordinate_size * sizeof(coordinate_type); // tmp coordinate + current coordinate size_type const num_threads = out_size * kernel_volume; auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); LOG_DEBUG("num block", num_blocks); LOG_DEBUG("out_map size", out_map.size()); LOG_DEBUG("kernel_volume", kernel_volume); LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes); LOG_DEBUG("threads dim", thread_dim); LOG_DEBUG("num threads", num_threads); index_type unused_map_value = std::numeric_limits<index_type>::max(); index_type *d_p_valid_in_index = reinterpret_cast<index_type *>(base_type::m_byte_allocator.allocate( 3 * (num_threads + 1) * sizeof(index_type))); index_type *d_p_valid_out_index = d_p_valid_in_index + num_threads + 1; index_type *d_p_valid_kernel_index = d_p_valid_out_index + num_threads + 1; // Initialize count per thread detail::direct_kernel_map<coordinate_type, size_type, index_type, map_type> <<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>( *m_map, // *out_map.m_map, // out_map.m_valid_map_index.cbegin(), // num_threads, // kernel, // d_p_valid_kernel_index, // d_p_valid_in_index, // d_p_valid_out_index, // unused_map_value); CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("direct_kernel_map finished"); auto begin = thrust::make_zip_iterator(thrust::make_tuple( d_p_valid_kernel_index, d_p_valid_in_index, d_p_valid_out_index)); auto const valid_size = thrust::remove_if(thrust::device, begin, thrust::make_zip_iterator(thrust::make_tuple( d_p_valid_kernel_index + num_threads, d_p_valid_in_index + num_threads, d_p_valid_out_index + num_threads)), detail::is_first<index_type>(unused_map_value)) - begin; LOG_DEBUG("Valid size:", valid_size); kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator); CUDA_CHECK(cudaMemcpy(kernel_map.kernels.data(), d_p_valid_kernel_index, valid_size * sizeof(index_type), cudaMemcpyDeviceToDevice)); CUDA_CHECK(cudaMemcpy(kernel_map.in_maps.data(), d_p_valid_in_index, valid_size * sizeof(index_type), cudaMemcpyDeviceToDevice)); CUDA_CHECK(cudaMemcpy(kernel_map.out_maps.data(), d_p_valid_out_index, valid_size * sizeof(index_type), cudaMemcpyDeviceToDevice)); kernel_map.decompose(); base_type::m_byte_allocator.deallocate( reinterpret_cast<char *>(d_p_valid_in_index), 3 * (num_threads + 1) * sizeof(index_type)); LOG_DEBUG("cudaFree"); return kernel_map; } else { // kernel volume == 1 ASSERT(false, "Not implemented"); } } namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void stride_map_kernel(map_type const __restrict__ in_map, // map_type const __restrict__ out_map, // index_type const *const __restrict__ in_valid_map_index, // size_type const num_threads, // index_type const *const __restrict__ stride, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, size_type const coordinate_size, index_type const unused_key) { extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; // clang-format off size_type *sh_size = reinterpret_cast<size_type *>(sh_all); size_type *sh_stride = sh_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_size + coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; // clang-format on for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_stride[i] = stride[i]; } __syncthreads(); if (x >= num_threads) return; typename map_type::value_type const &in_value = in_map.data()[in_valid_map_index[x]]; sh_tmp[0] = in_value.first[0]; for (index_type j = 1; j < coordinate_size; ++j) { sh_tmp[j] = (__float2int_rd(__fdiv_rd(in_value.first[j], sh_stride[j - 1]))) * sh_stride[j - 1]; } auto out_iter = out_map.find(coordinate<coordinate_type>(sh_tmp)); if (out_iter == out_map.end()) { p_in_maps[x] = unused_key; } else { p_in_maps[x] = in_value.second; p_out_maps[x] = out_iter->second; } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride_map( self_type const &out_map, stride_type const &out_tensor_stride, uint32_t thread_dim) const { // Over estimate the reserve size to be size(); size_type const in_size = size(); index_storage_type d_out_tensor_stride(out_tensor_stride); index_type unused_key = std::numeric_limits<index_type>::max(); // (THREAD * D + D) * 4 uint32_t const shared_memory_size_in_bytes = m_coordinate_size * sizeof(index_type) + // stride thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp size_type const num_threads = in_size; auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); LOG_DEBUG("num block", num_blocks); LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes); LOG_DEBUG("threads dim", thread_dim); LOG_DEBUG("num threads", num_threads); index_type *in_out_map = (index_type *)base_type::m_byte_allocator.allocate( 2 * (in_size + 1) * sizeof(index_type)); index_type *ins = in_out_map; index_type *outs = in_out_map + in_size + 1; // for __restrict__ collision prevention LOG_DEBUG("Allocated temporary memory"); detail::stride_map_kernel<coordinate_type, size_type, index_type, map_type> <<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>( *m_map, // *out_map.m_map, // m_valid_map_index.cbegin(), // num_threads, // d_out_tensor_stride.cbegin(), // ins, // outs, // m_coordinate_size, // unused_key); auto begin = thrust::make_zip_iterator(thrust::make_tuple(ins, outs)); auto const valid_size = thrust::remove_if(thrust::device, begin, thrust::make_zip_iterator( thrust::make_tuple(ins + in_size, outs + in_size)), detail::is_first<index_type>(unused_key)) - begin; LOG_DEBUG("Valid size:", valid_size); kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator, false); CUDA_CHECK(cudaMemcpy(kernel_map.in_maps.data(), ins, valid_size * sizeof(index_type), cudaMemcpyDeviceToDevice)); CUDA_CHECK(cudaMemcpy(kernel_map.out_maps.data(), outs, valid_size * sizeof(index_type), cudaMemcpyDeviceToDevice)); base_type::m_byte_allocator.deallocate( (char *)in_out_map, 2 * (in_size + 1) * sizeof(index_type)); return kernel_map; } namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename map_type> __global__ void origin_map_kernel(map_type const __restrict__ in_map, // map_type const __restrict__ origin_map, // index_type const *const __restrict__ in_valid_map_index, // size_type const num_threads, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, index_type *__restrict__ p_kernels, size_type const coordinate_size) { extern __shared__ coordinate_type sh_all[]; auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; // clang-format off coordinate_type *sh_tmp = sh_all + tx * coordinate_size; // clang-format on if (x < num_threads) for (index_type i = 0; i < coordinate_size; ++i) sh_tmp[i] = 0; __syncthreads(); if (x < num_threads) { typename map_type::value_type const &in_value = in_map.data()[in_valid_map_index[x]]; sh_tmp[0] = in_value.first[0]; auto origin_iter = origin_map.find(coordinate<coordinate_type>(sh_tmp)); p_in_maps[x] = in_value.second; p_out_maps[x] = origin_iter->second; // origin_map row index // For kernel_map decompose() p_kernels[x] = origin_iter->second; } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type CoordinateMapGPU<coordinate_type, TemplatedAllocator>::origin_map( self_type const &origin_map, uint32_t thread_dim) const { ASSERT(std::all_of(origin_map.get_tensor_stride().begin(), origin_map.get_tensor_stride().end(), [](auto const &i) { return i == 0; }), "Invalid origin tensor stride", origin_map.get_tensor_stride()); // reserve size(); size_type const in_size = size(); LOG_DEBUG("in_map size:", in_size, "origin_map size:", origin_map.size()); // (THREAD * D) * 4 uint32_t const shared_memory_size_in_bytes = thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp size_type const num_threads = in_size; auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); LOG_DEBUG("origin_map num block", num_blocks); LOG_DEBUG("origin_map shared_memory size", shared_memory_size_in_bytes); LOG_DEBUG("origin_map threads dim", thread_dim); LOG_DEBUG("origin_map num threads", num_threads); kernel_map_type kernel_map(in_size, base_type::m_byte_allocator); CUDA_CHECK(cudaStreamSynchronize(0)); LOG_DEBUG("Allocated kernel_map."); detail::origin_map_kernel<coordinate_type, size_type, index_type, map_type> <<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>( *m_map, // *origin_map.m_map, // m_valid_map_index.cbegin(), // num_threads, // kernel_map.in_maps.begin(), // kernel_map.out_maps.begin(), // kernel_map.kernels.begin(), // m_coordinate_size); CUDA_CHECK(cudaStreamSynchronize(0)); kernel_map.decompose(); LOG_DEBUG("origin map decomposed"); return kernel_map; } namespace detail { template <typename coordinate_type, typename index_type, // typename stride_type, // typename float_type, // typename map_type> __global__ void interpolation_kernel(map_type __restrict__ in_map, // index_type const num_threads, // float_type const *__restrict__ p_tfield, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, // float_type *__restrict__ p_weights, // stride_type const *__restrict__ p_tensor_stride, // index_type const unused_map_value, index_type const coordinate_size, index_type const neighbor_volume) { // coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type) // + THREADS * coordinate_size * sizeof(coordinate_type) SharedMemory<float_type> shared; float_type *sh_all = shared.getPointer(); auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; float_type *sh_tfield = sh_all + tx * coordinate_size; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>( sh_all + CUDA_NUM_THREADS * coordinate_size); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; index_type *sh_tensor_stride = reinterpret_cast<index_type *>( sh_coordinate + CUDA_NUM_THREADS * coordinate_size); auto const equal = in_map.get_key_equal(); for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = p_tensor_stride[i]; } if (x < num_threads) { index_type const offset = coordinate_size * (x / neighbor_volume); for (index_type i = 0; i < coordinate_size; ++i) { sh_tfield[i] = p_tfield[offset + i]; } } __syncthreads(); if (x < num_threads) { // iterate over values uint32_t neighbor_ind = x % neighbor_volume; // batch index sh_tmp[0] = lrint(sh_tfield[0]); uint32_t mask = 1; for (uint32_t j = coordinate_size - 1; j > 0; --j) { index_type curr_tensor_stride = sh_tensor_stride[j - 1]; if ((neighbor_ind & mask) == 0) sh_tmp[j] = floor(sh_tfield[j] / curr_tensor_stride) * curr_tensor_stride; else sh_tmp[j] = floor(sh_tfield[j] / curr_tensor_stride) * curr_tensor_stride + curr_tensor_stride; mask = mask << 1; } auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp)); if (in_result != in_map.end()) { p_in_maps[x] = (*in_result).second; p_out_maps[x] = x / neighbor_volume; // Compute weight float_type weight = 1; for (uint32_t j = 1; j < coordinate_size; ++j) { weight *= 1 - abs(sh_tfield[j] - sh_tmp[j]) / sh_tensor_stride[j - 1]; } p_weights[x] = weight; } else { p_in_maps[x] = unused_map_value; } } } template <typename coordinate_type, typename index_type, // typename stride_type, // typename float_type, // typename map_type> __global__ void field_map_kernel(map_type __restrict__ in_map, // index_type const num_threads, // float_type const *__restrict__ p_tfield, // index_type *__restrict__ p_in_maps, // index_type *__restrict__ p_out_maps, // stride_type const *__restrict__ p_tensor_stride, // index_type const unused_map_value, index_type const coordinate_size) { // coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type) // + THREADS * coordinate_size * sizeof(coordinate_type) SharedMemory<float_type> shared; float_type *sh_all = shared.getPointer(); auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_all); coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size; index_type *sh_tensor_stride = reinterpret_cast<index_type *>( sh_coordinate + CUDA_NUM_THREADS * coordinate_size); auto const equal = in_map.get_key_equal(); for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) { sh_tensor_stride[i] = p_tensor_stride[i]; } __syncthreads(); index_type const offset = coordinate_size * x; if (x < num_threads) { // iterate over values float_type const *curr_tfield = p_tfield + offset; // batch index sh_tmp[0] = lrint(curr_tfield[0]); for (uint32_t j = coordinate_size - 1; j > 0; --j) { index_type curr_tensor_stride = sh_tensor_stride[j - 1]; sh_tmp[j] = floor(curr_tfield[j] / curr_tensor_stride) * curr_tensor_stride; } auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp)); if (in_result != in_map.end()) { p_in_maps[x] = (*in_result).second; p_out_maps[x] = x; } else { p_in_maps[x] = unused_map_value; } } } // interpolation map inst template <typename coordinate_type, typename index_type, typename size_type, typename stride_type, typename field_type, typename map_type, typename ByteAllocatorType> std::vector<at::Tensor> interpolation_map_weight_tfield_type( uint32_t const num_tfield, // uint32_t const coordinate_size, // index_type const unused_key, // field_type const *const p_tfield, // map_type &map, // stride_type const *const p_tensor_stride, // ByteAllocatorType const &byte_allocator, c10::TensorOptions tfield_options) { uint32_t const neighbor_volume = std::pow(2, (coordinate_size - 1)); size_type num_threads = neighbor_volume * num_tfield; LOG_DEBUG("neighbor_volume:", neighbor_volume, "num_tfield:", num_tfield, "num_threads:", num_threads); index_type *d_in_map = reinterpret_cast<index_type *>( byte_allocator.allocate(num_threads * sizeof(index_type))); index_type *d_out_map = reinterpret_cast<index_type *>( byte_allocator.allocate(num_threads * sizeof(index_type))); field_type *d_weight = reinterpret_cast<field_type *>( byte_allocator.allocate(num_threads * sizeof(field_type))); size_type shared_memory_size_in_bytes = coordinate_size * CUDA_NUM_THREADS * sizeof(field_type) + coordinate_size * CUDA_NUM_THREADS * sizeof(coordinate_type) + coordinate_size * sizeof(index_type); LOG_DEBUG("Shared memory size:", shared_memory_size_in_bytes); interpolation_kernel<coordinate_type, index_type, stride_type, field_type, map_type> <<<GET_BLOCKS(num_threads, CUDA_NUM_THREADS), CUDA_NUM_THREADS, shared_memory_size_in_bytes>>>(map, // num_threads, // p_tfield, // d_in_map, // d_out_map, // d_weight, // p_tensor_stride, // unused_key, // coordinate_size, // neighbor_volume); // remove unused_keys auto valid_begin = thrust::make_zip_iterator(thrust::make_tuple(d_in_map, // d_out_map, d_weight)); size_type const number_of_valid = thrust::remove_if(thrust::device, // valid_begin, // thrust::make_zip_iterator(thrust::make_tuple( d_in_map + num_threads, // d_out_map + num_threads, d_weight + num_threads)), detail::is_first<index_type>(unused_key)) - valid_begin; LOG_DEBUG("number_of_valid:", number_of_valid); auto final_in_map = torch::empty({number_of_valid}, tfield_options.dtype(torch::kInt32).requires_grad(false)); auto final_out_map = torch::empty({number_of_valid}, tfield_options.dtype(torch::kInt32).requires_grad(false)); auto final_weights = torch::empty({number_of_valid}, tfield_options.requires_grad(false)); if (number_of_valid > 0) { CUDA_CHECK(cudaMemcpy(final_in_map.template data_ptr<int32_t>(), d_in_map, number_of_valid * sizeof(int32_t), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(final_out_map.template data_ptr<int32_t>(), d_out_map, number_of_valid * sizeof(int32_t), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(final_weights.template data_ptr<field_type>(), d_weight, number_of_valid * sizeof(field_type), cudaMemcpyHostToDevice)); } byte_allocator.deallocate((char *)d_in_map, num_threads * sizeof(index_type)); byte_allocator.deallocate((char *)d_out_map, num_threads * sizeof(index_type)); byte_allocator.deallocate((char *)d_weight, num_threads * sizeof(field_type)); return {final_in_map, final_out_map, final_weights}; } // interpolation map inst template <typename coordinate_type, typename index_type, typename size_type, typename stride_type, typename field_type, typename map_type, typename ByteAllocatorType> std::pair<at::Tensor, at::Tensor> field_map_type(uint32_t const num_tfield, // uint32_t const coordinate_size, // index_type const unused_key, // field_type const *const p_tfield, // map_type &map, // stride_type const *const p_tensor_stride, // ByteAllocatorType const &byte_allocator) { size_type num_threads = num_tfield; LOG_DEBUG("num_threads:", num_threads); index_type *d_in_map = reinterpret_cast<index_type *>( byte_allocator.allocate(num_threads * sizeof(index_type))); index_type *d_out_map = reinterpret_cast<index_type *>( byte_allocator.allocate(num_threads * sizeof(index_type))); size_type shared_memory_size_in_bytes = coordinate_size * CUDA_NUM_THREADS * sizeof(coordinate_type) + coordinate_size * sizeof(index_type); LOG_DEBUG("Shared memory size:", shared_memory_size_in_bytes); field_map_kernel<coordinate_type, index_type, stride_type, field_type, map_type> <<<GET_BLOCKS(num_threads, CUDA_NUM_THREADS), CUDA_NUM_THREADS, shared_memory_size_in_bytes>>>(map, // num_threads, // p_tfield, // d_in_map, // d_out_map, // p_tensor_stride, // unused_key, // coordinate_size); // remove unused_keys auto valid_begin = thrust::make_zip_iterator(thrust::make_tuple(d_in_map, d_out_map)); size_type const number_of_valid = thrust::remove_if(thrust::device, // valid_begin, // thrust::make_zip_iterator( thrust::make_tuple(d_in_map + num_threads, // d_out_map + num_threads)), detail::is_first<index_type>(unused_key)) - valid_begin; LOG_DEBUG("number_of_valid:", number_of_valid); auto curr_device = at::cuda::current_device(); auto tfield_options = torch::TensorOptions({at::kCUDA, curr_device}) .dtype(torch::kInt32) .requires_grad(false); auto final_in_map = torch::empty({number_of_valid}, tfield_options); auto final_out_map = torch::empty({number_of_valid}, tfield_options); if (number_of_valid > 0) { CUDA_CHECK(cudaMemcpy(final_in_map.template data_ptr<int32_t>(), d_in_map, number_of_valid * sizeof(int32_t), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(final_out_map.template data_ptr<int32_t>(), d_out_map, number_of_valid * sizeof(int32_t), cudaMemcpyHostToDevice)); } byte_allocator.deallocate((char *)d_in_map, num_threads * sizeof(index_type)); byte_allocator.deallocate((char *)d_out_map, num_threads * sizeof(index_type)); return {final_in_map, final_out_map}; } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> std::vector<at::Tensor> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::interpolation_map_weight( at::Tensor const &tfield) const { // Over estimate the reserve size to be size(); ASSERT(tfield.dim() == 2, "Invalid tfield dimension"); ASSERT(tfield.size(1) == m_coordinate_size, "Invalid tfield size"); size_type const num_tfield = tfield.size(0); uint32_t const neighbor_volume = std::pow(2, (m_coordinate_size - 1)); index_type const unused_key = std::numeric_limits<index_type>::max(); LOG_DEBUG("map size", m_size); switch (tfield.scalar_type()) { case at::ScalarType::Double: return detail::interpolation_map_weight_tfield_type< coordinate_type, index_type, size_type, index_type, double, map_type, TemplatedAllocator<char>>(num_tfield, // m_coordinate_size, // unused_key, // tfield.template data_ptr<double>(), // *m_map, // m_device_tensor_stride.cbegin(), // m_byte_allocator, // tfield.options()); case at::ScalarType::Float: return detail::interpolation_map_weight_tfield_type< coordinate_type, index_type, size_type, index_type, float, map_type, TemplatedAllocator<char>>(num_tfield, // m_coordinate_size, // unused_key, // tfield.template data_ptr<float>(), // *m_map, // m_device_tensor_stride.cbegin(), // m_byte_allocator, // tfield.options()); default: ASSERT(false, "Unsupported float type"); } } template <typename coordinate_type, template <typename T> class TemplatedAllocator> template <typename coordinate_field_type> std::pair<at::Tensor, at::Tensor> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::field_map( coordinate_field_type const *p_tfield, size_type const num_tfield) const { index_type const unused_key = std::numeric_limits<index_type>::max(); LOG_DEBUG("map size", m_size); return detail::field_map_type<coordinate_type, index_type, size_type, index_type, coordinate_field_type, map_type, TemplatedAllocator<char>>( num_tfield, // m_coordinate_size, // unused_key, // p_tfield, // *m_map, // m_device_tensor_stride.cbegin(), // m_byte_allocator); } /** * Union map */ namespace detail { template <typename coordinate_type, // typename size_type, // typename index_type, // typename tensor_type, // typename map_type> __global__ void union_map_kernel(size_type const num_threads, // map_type const __restrict__ in_map, // map_type const __restrict__ union_map, // index_type const *const __restrict__ in_valid_map_index, // tensor_type *__restrict__ p_in_maps, // tensor_type *__restrict__ p_union_maps, size_type const coordinate_size) { auto const tx = threadIdx.x; auto const bx = blockIdx.x; auto const x = blockDim.x * bx + tx; if (x < num_threads) { typename map_type::value_type const &in_value = in_map.data()[in_valid_map_index[x]]; auto union_iter = union_map.find(in_value.first); p_in_maps[x] = in_value.second; p_union_maps[x] = union_iter->second; } } } // namespace detail template <typename coordinate_type, template <typename T> class TemplatedAllocator> std::vector<at::Tensor> CoordinateMapGPU<coordinate_type, TemplatedAllocator>::union_map( std::vector<std::reference_wrapper<self_type>> const &in_maps, uint32_t thread_dim) const { auto options = torch::TensorOptions({at::kCUDA, at::cuda::current_device()}) .dtype(torch::kInt64) .requires_grad(false); std::vector<at::Tensor> union_maps; for (self_type const &in_map : in_maps) { size_type const num_threads = in_map.m_valid_map_index.size(); auto const num_blocks = GET_BLOCKS(num_threads, thread_dim); at::Tensor curr_map = torch::empty({2, num_threads}, options); LOG_DEBUG("in_map size", num_threads, ", num block", num_blocks, ", threads dim", thread_dim); int64_t *d_in_map = curr_map.template data_ptr<int64_t>(); detail::union_map_kernel<coordinate_type, size_type, index_type, int64_t, map_type> <<<num_blocks, thread_dim>>>(num_threads, // *in_map.m_map, // *m_map, // in_map.m_valid_map_index.cbegin(), // d_in_map, // d_in_map + num_threads, // m_coordinate_size); CUDA_CHECK(cudaStreamSynchronize(0)); union_maps.push_back(std::move(curr_map)); } return union_maps; } // Helper functions template <typename coordinate_type, template <typename T> class TemplatedAllocator> void CoordinateMapGPU<coordinate_type, TemplatedAllocator>::copy_coordinates( coordinate_type *dst_coordinate) const { size_type const num_threads = size(); if (num_threads <= 0) return; // Copy by offset // size_type const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS); // detail::copy_coordinates_by_offset<coordinate_type, size_type, index_type, // map_type> // <<<num_blocks, CUDA_NUM_THREADS>>>( // *m_map, // // dst_coordinate, // // m_valid_map_index.data(), // // num_threads, // // m_coordinate_size); size_type const num_blocks = GET_BLOCKS(num_threads * m_coordinate_size, CUDA_NUM_THREADS); detail::copy_coordinates_by_valid_row<coordinate_type, size_type, index_type, map_type> <<<num_blocks, CUDA_NUM_THREADS>>>( // *m_map, // const_coordinate_data(), // dst_coordinate, // m_valid_row_index.cbegin(), // num_threads * m_coordinate_size, // m_coordinate_size); } // Template instantiation template class CoordinateFieldMapGPU<default_types::ccoordinate_type, default_types::dcoordinate_type, detail::default_allocator>; template class CoordinateFieldMapGPU<default_types::ccoordinate_type, default_types::dcoordinate_type, detail::c10_allocator>; template class CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>; template class CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>; template std::pair< gpu_storage<default_types::index_type, detail::default_allocator<char>>, gpu_storage<default_types::index_type, detail::default_allocator<char>>> CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>:: insert_and_map<true>( coordinate_iterator<default_types::dcoordinate_type> key_first, coordinate_iterator<default_types::dcoordinate_type> key_last); template std::pair< gpu_storage<default_types::index_type, detail::default_allocator<char>>, gpu_storage<default_types::index_type, detail::default_allocator<char>>> CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>:: insert_and_map<false>( coordinate_iterator<default_types::dcoordinate_type> key_first, coordinate_iterator<default_types::dcoordinate_type> key_last); template std::pair< gpu_storage<default_types::index_type, detail::c10_allocator<char>>, gpu_storage<default_types::index_type, detail::c10_allocator<char>>> CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>:: insert_and_map<true>( coordinate_iterator<default_types::dcoordinate_type> key_first, coordinate_iterator<default_types::dcoordinate_type> key_last); template std::pair< gpu_storage<default_types::index_type, detail::c10_allocator<char>>, gpu_storage<default_types::index_type, detail::c10_allocator<char>>> CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>:: insert_and_map<false>( coordinate_iterator<default_types::dcoordinate_type> key_first, coordinate_iterator<default_types::dcoordinate_type> key_last); template std::pair<at::Tensor, at::Tensor> CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>:: field_map<float>(float const *p_tfield, default_types::size_type const num_tfield) const; template std::pair<at::Tensor, at::Tensor> CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>:: field_map<float>(float const *p_tfield, default_types::size_type const num_tfield) const; } // namespace minkowski
73215208a6a43fdc86b6c72472b4675168a4c3be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../src/PermutohedralLatticeGPU.cuh" #include "../src/DeviceMemoryAllocator.h" //input and positions should be device pointers by this point void lattice_filter_gpu(float * output, const float *input, const float *positions, int pd, int vd, int n) { auto allocator = DeviceMemoryAllocator(); //vd = image_channels + 1 if(pd == 5 && vd == 4){ auto lattice = PermutohedralLatticeGPU<float, 5, 4>(n, &allocator); lattice.filter(output, input, positions, false); } else throw 1; } void compute_bilateral_kernel_gpu(const float * reference, float * positions, int num_super_pixels, int n_reference_channels, int n_spatial_dims, const int *spatial_dims, float theta_alpha, float theta_beta){ dim3 blocks((num_super_pixels - 1) / BLOCK_SIZE + 1, 1, 1); dim3 blockSize(BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( compute_kernel<float>), dim3(blocks), dim3(blockSize), 0, 0, reference, positions, num_super_pixels, n_reference_channels, n_spatial_dims, spatial_dims, theta_alpha, theta_beta); }; void compute_spatial_kernel_gpu(float * positions, int num_super_pixels, int n_spatial_dims, const int *spatial_dims, float theta_gamma){ dim3 blocks((num_super_pixels - 1) / BLOCK_SIZE + 1, 1, 1); dim3 blockSize(BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( compute_kernel<float>), dim3(blocks), dim3(blockSize), 0, 0, nullptr, positions, num_super_pixels, 0, n_spatial_dims, spatial_dims, theta_gamma, 0); }; //same stuff for double void lattice_filter_gpu(double * output, const double *input, const double *positions, int pd, int vd, int n) { auto allocator = DeviceMemoryAllocator(); //vd = image_channels + 1 if(pd == 5 && vd == 4){ auto lattice = PermutohedralLatticeGPU<double, 5, 4>(n, &allocator); lattice.filter(output, input, positions, false); } else throw 1; } void compute_bilateral_kernel_gpu(const double * reference, double * positions, int num_super_pixels, int n_reference_channels, int n_spatial_dims, const int *spatial_dims, double theta_alpha, double theta_beta){ dim3 blocks((num_super_pixels - 1) / BLOCK_SIZE + 1, 1, 1); dim3 blockSize(BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( compute_kernel<double>), dim3(blocks), dim3(blockSize), 0, 0, reference, positions, num_super_pixels, n_reference_channels, n_spatial_dims, spatial_dims, theta_alpha, theta_beta); }; void compute_spatial_kernel_gpu(double * positions, int num_super_pixels, int n_spatial_dims, const int *spatial_dims, double theta_gamma){ dim3 blocks((num_super_pixels - 1) / BLOCK_SIZE + 1, 1, 1); dim3 blockSize(BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( compute_kernel<double>), dim3(blocks), dim3(blockSize), 0, 0, nullptr, positions, num_super_pixels, 0, n_spatial_dims, spatial_dims, theta_gamma, 0); };
73215208a6a43fdc86b6c72472b4675168a4c3be.cu
#include "cuda_runtime.h" #include "../src/PermutohedralLatticeGPU.cuh" #include "../src/DeviceMemoryAllocator.h" //input and positions should be device pointers by this point void lattice_filter_gpu(float * output, const float *input, const float *positions, int pd, int vd, int n) { auto allocator = DeviceMemoryAllocator(); //vd = image_channels + 1 if(pd == 5 && vd == 4){ auto lattice = PermutohedralLatticeGPU<float, 5, 4>(n, &allocator); lattice.filter(output, input, positions, false); } else throw 1; } void compute_bilateral_kernel_gpu(const float * reference, float * positions, int num_super_pixels, int n_reference_channels, int n_spatial_dims, const int *spatial_dims, float theta_alpha, float theta_beta){ dim3 blocks((num_super_pixels - 1) / BLOCK_SIZE + 1, 1, 1); dim3 blockSize(BLOCK_SIZE, 1, 1); compute_kernel<float><<<blocks, blockSize>>>(reference, positions, num_super_pixels, n_reference_channels, n_spatial_dims, spatial_dims, theta_alpha, theta_beta); }; void compute_spatial_kernel_gpu(float * positions, int num_super_pixels, int n_spatial_dims, const int *spatial_dims, float theta_gamma){ dim3 blocks((num_super_pixels - 1) / BLOCK_SIZE + 1, 1, 1); dim3 blockSize(BLOCK_SIZE, 1, 1); compute_kernel<float><<<blocks, blockSize>>>(nullptr, positions, num_super_pixels, 0, n_spatial_dims, spatial_dims, theta_gamma, 0); }; //same stuff for double void lattice_filter_gpu(double * output, const double *input, const double *positions, int pd, int vd, int n) { auto allocator = DeviceMemoryAllocator(); //vd = image_channels + 1 if(pd == 5 && vd == 4){ auto lattice = PermutohedralLatticeGPU<double, 5, 4>(n, &allocator); lattice.filter(output, input, positions, false); } else throw 1; } void compute_bilateral_kernel_gpu(const double * reference, double * positions, int num_super_pixels, int n_reference_channels, int n_spatial_dims, const int *spatial_dims, double theta_alpha, double theta_beta){ dim3 blocks((num_super_pixels - 1) / BLOCK_SIZE + 1, 1, 1); dim3 blockSize(BLOCK_SIZE, 1, 1); compute_kernel<double><<<blocks, blockSize>>>(reference, positions, num_super_pixels, n_reference_channels, n_spatial_dims, spatial_dims, theta_alpha, theta_beta); }; void compute_spatial_kernel_gpu(double * positions, int num_super_pixels, int n_spatial_dims, const int *spatial_dims, double theta_gamma){ dim3 blocks((num_super_pixels - 1) / BLOCK_SIZE + 1, 1, 1); dim3 blockSize(BLOCK_SIZE, 1, 1); compute_kernel<double><<<blocks, blockSize>>>(nullptr, positions, num_super_pixels, 0, n_spatial_dims, spatial_dims, theta_gamma, 0); };
971aa6207502cdca57df02f67bef75674e2f35e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "DataTypes.h" #include "Constants.h" #include "Helpers.h" __global__ void VecAdd() { } int main() { hipError_t err = hipSuccess; Circle *circles = (Circle*)malloc(sizeof(Circle) * OBJECT_COUNT); circles[0].center.x = 0; circles[0].center.y = 0; circles[0].radius = CELLSIZE / 4; circles[1].center.x = 0; circles[1].center.y = 1; circles[1].radius = CELLSIZE / 4; circles[2].center.x = 1; circles[2].center.y = 0; circles[2].radius = CELLSIZE / 4; circles[3].center.x = 1; circles[3].center.y = 1; circles[3].radius = CELLSIZE / 4; CellIdItem *cellIds = (CellIdItem*)malloc(sizeof(CellIdItem) * MAX_ITEMS); ControlBitsItem *controlBits = (ControlBitsItem*)malloc(sizeof(ControlBitsItem) * MAX_ITEMS); for (int i = 0; i < OBJECT_COUNT; i++) { cellIds[i].Cells[0] = posToHash(circles[i].center); controlBits[i].HCellType = posToCellType(circles[i].center); glm::uvec2 coords = posToCoords(circles[i].center); int collisionCount = 0; for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { if (x == 0 && y == 0) { continue; } GLuint currentX = coords.x + x; GLuint currentY = coords.y + y; if (collides(circles[i], coordsToGridBox(currentX, currentY))) { cellIds[i].Cells[collisionCount + 1] = posToHash(circles[i].center + glm::vec2(x * CELLSIZE, y * CELLSIZE)); collisionCount++; } } } for (int u = collisionCount + 1; u < MAX_OBJECT_INTERSECTIONS; u++) { cellIds[i].Cells[u] = 0xffffffff; } } for (int i = 0; i < OBJECT_COUNT; i++) { printf("cellIds[%d] HomeCell = %d\n", i, cellIds[i].Cells[0]); printf("cellIds[%d] HomeCellType = %d\n", i, controlBits[i].HCellType); for (int u = 0; u < MAX_OBJECT_INTERSECTIONS; u++) { printf("cellIds[%d] Cells [%d] = %d\n", i, u, cellIds[i].Cells[u]); } printf("----\n"); } return err; }
971aa6207502cdca57df02f67bef75674e2f35e6.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "DataTypes.h" #include "Constants.h" #include "Helpers.h" __global__ void VecAdd() { } int main() { cudaError_t err = cudaSuccess; Circle *circles = (Circle*)malloc(sizeof(Circle) * OBJECT_COUNT); circles[0].center.x = 0; circles[0].center.y = 0; circles[0].radius = CELLSIZE / 4; circles[1].center.x = 0; circles[1].center.y = 1; circles[1].radius = CELLSIZE / 4; circles[2].center.x = 1; circles[2].center.y = 0; circles[2].radius = CELLSIZE / 4; circles[3].center.x = 1; circles[3].center.y = 1; circles[3].radius = CELLSIZE / 4; CellIdItem *cellIds = (CellIdItem*)malloc(sizeof(CellIdItem) * MAX_ITEMS); ControlBitsItem *controlBits = (ControlBitsItem*)malloc(sizeof(ControlBitsItem) * MAX_ITEMS); for (int i = 0; i < OBJECT_COUNT; i++) { cellIds[i].Cells[0] = posToHash(circles[i].center); controlBits[i].HCellType = posToCellType(circles[i].center); glm::uvec2 coords = posToCoords(circles[i].center); int collisionCount = 0; for (int x = -1; x <= 1; x++) { for (int y = -1; y <= 1; y++) { if (x == 0 && y == 0) { continue; } GLuint currentX = coords.x + x; GLuint currentY = coords.y + y; if (collides(circles[i], coordsToGridBox(currentX, currentY))) { cellIds[i].Cells[collisionCount + 1] = posToHash(circles[i].center + glm::vec2(x * CELLSIZE, y * CELLSIZE)); collisionCount++; } } } for (int u = collisionCount + 1; u < MAX_OBJECT_INTERSECTIONS; u++) { cellIds[i].Cells[u] = 0xffffffff; } } for (int i = 0; i < OBJECT_COUNT; i++) { printf("cellIds[%d] HomeCell = %d\n", i, cellIds[i].Cells[0]); printf("cellIds[%d] HomeCellType = %d\n", i, controlBits[i].HCellType); for (int u = 0; u < MAX_OBJECT_INTERSECTIONS; u++) { printf("cellIds[%d] Cells [%d] = %d\n", i, u, cellIds[i].Cells[u]); } printf("----\n"); } return err; }
3355fad310a36941410dd94c04ae071d3768cc5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GistCudaSetup.cuh" #include "GistCudaCalc.cuh" #include "EntropyCalculator.cuh" #include <iostream> /** * Allocate memory on the GPU. * @param array: The pointer to the array, which will be allocated on the GPU. * @param size: An integer giving the size of the array, which will be allocated. * @throws: CudaException if a problem occurs. */ __host__ void allocateCuda_GIGIST(void **array, int size) { // Check if the array is actually free, if not, it will be freed // (fun fact: checking is not necessary, one could also simply free the memory). if ((*array) != NULL) { hipFree(*array); } // If something goes wrong, throw exception if (hipMalloc(array, size) != hipSuccess) { throw CudaException(); } } /** * Copy memory from the CPU to the GPU. * @param array: The array from which the values shall be copied. * @param array_c: The array on the device, to which the values shall be copied. * @param size: The size of the stuff which will be copied. * @throws: CudaException if something goes wrong. */ __host__ void copyMemoryToDevice_GIGIST(void *array, void *array_c, int size) { // If something goes wrong, throw exception // In this case only copying can go wrong. if (hipMemcpy(array_c, array, size, hipMemcpyHostToDevice) != hipSuccess) { throw CudaException(); } } /** * A simple helper function that copies a lot of stuff to the GPU (as structs). * @param charge: An array holding the charges for the different atoms. * @param atomtype: An array holding the integers for the atom types of the different atoms. * @param solvent: An array of boolean values, holding the information whether a certain atom is solvent or solute. * @param atomNumber: The total number of atoms. * @param atomProps_c: A pointer to an array on the GPU, which will hold the atom properties. * @param ljA: An array holding the lennard-jones parameter A for each atom type pair. * @param ljB: An array holding the lennard-jones parameter B for each atom type pair. * @param length: The length of the two aforementioned arrays (ljA & ljB). * @param lJparams_c: A pointer to an array on the GPU, which will hold the lj parameters. * @throws: CudaException if something bad happens. */ __host__ void copyMemoryToDeviceStruct_GIGIST(float *charge, int *atomtype, bool *solvent, int *molecule, int atomNumber, void **atomProps_c, float *ljA, float *ljB, int length, void **lJparams_c) { // Check if the two arrays are free. Again, this could be removed (but will stay!) if ((*atomProps_c) != NULL) { hipFree(*atomProps_c); } if ((*lJparams_c) != NULL) { hipFree(*lJparams_c); } // Allocate the necessary memory on the GPU. if (hipMalloc(atomProps_c, atomNumber * sizeof(AtomProperties)) != hipSuccess) { throw CudaException(); } if (hipMalloc(lJparams_c, length * sizeof(ParamsLJ)) != hipSuccess) { throw CudaException(); } // Create an array for the lennard-jones parameters. ParamsLJ *ljp = (ParamsLJ *) malloc (length * sizeof(ParamsLJ)); // Add the lennard-jones parameters to the array. for (int i = 0; i < length; ++i) { ljp[i] = ParamsLJ(ljA[i], ljB[i]); } // Create an array for the atom properties. AtomProperties *array = (AtomProperties *)malloc(atomNumber * sizeof(AtomProperties)); // Add the properties into the array. for (int i = 0; i < atomNumber; ++i) { array[i] = AtomProperties(charge[i], atomtype[i], solvent[i], molecule[i]); } // Copy the memory from the host to the device. if (hipMemcpy((*atomProps_c), array, atomNumber * sizeof(AtomProperties), hipMemcpyHostToDevice) != hipSuccess) { throw CudaException(); } if (hipMemcpy((*lJparams_c), ljp, length * sizeof(ParamsLJ), hipMemcpyHostToDevice) != hipSuccess) { throw CudaException(); } // Free the two arrays (so that no memory leak occurs). free(ljp); free(array); } /** * Free an array. * @param array: The array you want to free. */ __host__ void freeCuda_GIGIST(void *array) { hipFree(array); } // This is coded C-like, but uses exceptions. /** * This starts the cuda kernel, thus it is actually a quite long function. */ __host__ EnergyReturn doActionCudaEnergy_GIGIST(const double *coords, int *NBindex_c, int ntypes, void *parameter, void *molecule_c, int boxinfo, float *recip_o_box, float *ucell, int maxAtoms, int headAtomType, float neighbourCut2, int *result_o, int *result_n, float *result_w_c, float *result_s_c, int *result_O_c, int *result_N_c, bool doorder) { Coordinates_GPU *coords_c = NULL; float *recip_b_c = NULL; float *ucell_c = NULL; float *result_A = (float *) calloc(maxAtoms, sizeof(float)); float *result_s = (float *) calloc(maxAtoms, sizeof(float)); Coordinates_GPU *coord_array = (Coordinates_GPU *) calloc(maxAtoms, sizeof(Coordinates_GPU)); // Casting AtomProperties *sender = (AtomProperties *) molecule_c; ParamsLJ *lennardJonesParams = (ParamsLJ *) parameter; // Create Boxinfo and Unit cell. This is actually very important for the speed (otherwise // there would be LOTS of access to non-local variables). BoxInfo boxinf; if (boxinfo != 0) { boxinf = BoxInfo(recip_o_box, boxinfo); } UnitCell ucellN; if (boxinfo == 2) { ucellN = UnitCell(ucell); } // Add the coordinates to the array. for (int i = 0; i < maxAtoms; ++i) { coord_array[i] = Coordinates_GPU(&coords[i * 3]); } // vectors that will return the necessary information. std::vector<float> result_esw; std::vector<float> result_eww; // Allocate space on the GPU if (hipMalloc(&coords_c, maxAtoms * sizeof(Coordinates_GPU)) != hipSuccess) { free(result_A); free(result_s); free(coord_array); throw CudaException(); } // Copy the data to the GPU if (hipMemcpy(coords_c, coord_array, maxAtoms * sizeof(Coordinates_GPU), hipMemcpyHostToDevice) != hipSuccess) { hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } if (hipMemcpy(result_w_c, result_A, maxAtoms * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) { hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } if (hipMemcpy(result_s_c, result_s, maxAtoms * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) { hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } // If the doorder calculation is used, it needs to calculate everything differently, so the slow version is used // (this is about 10% slower). if (doorder) { hipLaunchKernelGGL(( cudaCalcEnergySlow_GIGIST), dim3((maxAtoms + SLOW_BLOCKSIZE) / SLOW_BLOCKSIZE), dim3(SLOW_BLOCKSIZE) , 0, 0, coords_c, NBindex_c, ntypes, lennardJonesParams, sender, boxinf, ucellN, maxAtoms, result_w_c, result_s_c, nullptr, nullptr, headAtomType, neighbourCut2, result_O_c, result_N_c); } else { // Uses a 2D array, which is nice for memory access. dim3 threadsPerBlock(BLOCKSIZE, BLOCKSIZE); dim3 numBlocks((maxAtoms + threadsPerBlock.x) / threadsPerBlock.x, (maxAtoms + threadsPerBlock.y) / threadsPerBlock.y); // The actual call of the device function hipLaunchKernelGGL(( cudaCalcEnergy_GIGIST), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, coords_c, NBindex_c, ntypes, lennardJonesParams, sender, boxinf, ucellN, maxAtoms, result_w_c, result_s_c, nullptr, nullptr, headAtomType, neighbourCut2, result_O_c, result_N_c); // Check if there was an error. hipError_t hipError_t = hipGetLastError(); if (hipError_t != hipSuccess) { printf("returned %s\n", hipGetErrorString(hipError_t)); } } // Return the results of the calculation to the main memory if (hipMemcpy(result_A, result_w_c, maxAtoms * sizeof(float), hipMemcpyDeviceToHost) != hipSuccess) { hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } if (hipMemcpy(result_s, result_s_c, maxAtoms * sizeof(float), hipMemcpyDeviceToHost) != hipSuccess) { hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } if (hipMemcpy(result_o, result_O_c, maxAtoms * 4 * sizeof(int), hipMemcpyDeviceToHost) != hipSuccess) { hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } if (hipMemcpy(result_n, result_N_c, maxAtoms * sizeof(int), hipMemcpyDeviceToHost) != hipSuccess) { hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } for (int i = 0; i < maxAtoms; ++i) { result_eww.push_back(result_A[i]); result_esw.push_back(result_s[i]); } // Free everything used in here. hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c); free(result_A); free(result_s); free(coord_array); return {result_eww, result_esw}; } #ifdef DEBUG_GIST_CUDA // Not necessary __host__ std::vector<Quaternion<float> > shoveQuaternionsTest_GIGIST(std::vector<Quaternion<float> > quats) { QuaternionG<float> *quats_c = NULL; float *ret_c = NULL; std::vector<Quaternion<float> > ret; float *ret_f = new float[quats.size() * 4]; QuaternionG<float> *quats_f = new QuaternionG<float>[quats.size()]; for (int i = 0; i < quats.size(); ++i) { quats_f[i] = quats.at(i); } if (hipMalloc(&quats_c, quats.size() * sizeof(QuaternionG<float>)) != hipSuccess) { delete quats_f; delete ret_f; throw CudaException(); } if (hipMalloc(&ret_c, quats.size() * 4 * sizeof(float)) != hipSuccess) { hipFree(quats_c); delete quats_f; delete ret_f; throw CudaException(); } if (hipMemcpy(quats_c, quats_f, quats.size() * sizeof(QuaternionG<float>), hipMemcpyHostToDevice) != hipSuccess) { hipFree(quats_c); hipFree(ret_c); delete quats_f; delete ret_f; throw CudaException(); } hipLaunchKernelGGL(( shoveQuaternions_GIGIST), dim3((quats.size() + BLOCKSIZE) / BLOCKSIZE), dim3(BLOCKSIZE) , 0, 0, quats_c, quats.size(), ret_c); if (hipMemcpy(ret_f, ret_c, quats.size() * 4 * sizeof(float), hipMemcpyDeviceToHost) != hipSuccess) { hipFree(quats_c); hipFree(ret_c); delete quats_f; delete ret_f; throw CudaException(); } for (int i = 0; i < quats.size(); ++i) { ret.push_back(Quaternion<float>(ret_f[i * 4], ret_f[i * 4 + 1], ret_f[i * 4 + 2], ret_f[i * 4 + 3])); } hipFree(quats_c); hipFree(ret_c); delete quats_f; delete ret_f; return ret; } #endif /** * Calculates the entropy on the GPU (this is not really necessary and does not lead to a significant speed up). * @param coords: The coordinates of the different water molecules. * @param x: The number of grid voxels in the x direction. * @param y: The number of grid voxels in the y direction. * @param z: The number of grid voxels in the z direction. * @param quats: A vector object holding all the quaternions. * @param temp: The temperature. * @param rho0: The reference density. * @param nFrames: The total number of frames. * @return: A vector holding the values for dTStrans, dTSorient and dTSsix. * @throws: A CudaException on error. */ std::vector<std::vector<float> > doActionCudaEntropy_GIGIST(std::vector<std::vector<Vec3> > coords, int x, int y, int z, std::vector<std::vector<Quaternion<float> > > quats, float temp, float rho0, int nFrames) { // For the CPU // Input (from previous calculations) std::vector<QuaternionG<float> > quatsF; std::vector<float> coordsF; std::vector<int> cumSumAtoms; // Results float *resultTStrans = new float[quats.size()]; float *resultTSorient = new float[quats.size()]; float *resultTSsix = new float[quats.size()]; // For the GPU // Input (from previous calculations) Dimensions dims = Dimensions(x, y, z); float *coordsG = NULL; QuaternionG<float> *quatsG = NULL; int *cumSumAtomsG = NULL; // Results float *resultTStransG = NULL; float *resultTSorientG = NULL; float *resultTSsixG = NULL; int sum = 0; for (int i = 0 ; i < quats.size(); ++i) { sum += quats.at(i).size(); cumSumAtoms.push_back(sum); for (int j = 0; j < quats.at(i).size(); ++j) { // quatsF always has the size of the number of the current molecule. coordsF.push_back((float) (coords.at(i).at(j)[0])); coordsF.push_back((float) (coords.at(i).at(j)[1])); coordsF.push_back((float) (coords.at(i).at(j)[2])); quatsF.push_back(quats.at(i).at(j)); } } hipError_t err1 = hipMalloc(&quatsG, quatsF.size() * sizeof(QuaternionG<float>)); hipError_t err2 = hipMalloc(&coordsG, coordsF.size() * sizeof(float)); hipError_t err3 = hipMalloc(&cumSumAtomsG, cumSumAtoms.size() * sizeof(int)); hipError_t err4 = hipMalloc(&resultTStransG, quats.size() * sizeof(float)); hipError_t err5 = hipMalloc(&resultTSorientG, quats.size() * sizeof(float)); hipError_t err6 = hipMalloc(&resultTSsixG, quats.size() * sizeof(float)); // Error Check if (err1 != hipSuccess || err2 != hipSuccess || err3 != hipSuccess || err4 != hipSuccess || err5 != hipSuccess || err6 != hipSuccess) { hipFree(quatsG); hipFree(coordsG); hipFree(cumSumAtomsG); hipFree(resultTStransG); hipFree(resultTSorientG); hipFree(resultTSsixG); delete[] resultTStrans; delete[] resultTSorient; delete[] resultTSsix; throw CudaException(); } err1 = hipMemcpy(quatsG, &(quatsF[0]), quatsF.size() * sizeof(QuaternionG<float>), hipMemcpyHostToDevice); err2 = hipMemcpy(coordsG, &(coordsF[0]), coordsF.size() * sizeof(float), hipMemcpyHostToDevice); err3 = hipMemcpy(cumSumAtomsG, &(cumSumAtoms[0]), cumSumAtoms.size() * sizeof(int), hipMemcpyHostToDevice); // Error Check if (err1 != hipSuccess || err2 != hipSuccess || err3 != hipSuccess) { hipFree(quatsG); hipFree(coordsG); hipFree(cumSumAtomsG); hipFree(resultTStransG); hipFree(resultTSorientG); hipFree(resultTSsixG); delete[] resultTStrans; delete[] resultTSorient; delete[] resultTSsix; throw CudaException(); } EntropyCalculator entCalc = EntropyCalculator(quatsG, coordsG, dims, cumSumAtomsG, temp, rho0, nFrames); hipLaunchKernelGGL(( calculateEntropy_GIGIST), dim3((quats.size() + SLOW_BLOCKSIZE) / SLOW_BLOCKSIZE), dim3(SLOW_BLOCKSIZE), 0, 0, entCalc, resultTStransG, resultTSorientG, resultTSsixG); hipError_t err7 = hipGetLastError(); // Error Check if (err7 != hipSuccess) { hipFree(quatsG); hipFree(coordsG); hipFree(cumSumAtomsG); hipFree(resultTStransG); hipFree(resultTSorientG); hipFree(resultTSsixG); delete[] resultTStrans; delete[] resultTSorient; delete[] resultTSsix; throw CudaException(); } // Copy back, use same errors as above for understandability. err4 = hipMemcpy(resultTStrans, resultTStransG, quats.size() * sizeof(float), hipMemcpyDeviceToHost); err5 = hipMemcpy(resultTSorient, resultTSorientG, quats.size() * sizeof(float), hipMemcpyDeviceToHost); err6 = hipMemcpy(resultTSsix, resultTSsixG, quats.size() * sizeof(float), hipMemcpyDeviceToHost); // Don't need that anymore. hipFree(quatsG); hipFree(coordsG); hipFree(cumSumAtomsG); hipFree(resultTStransG); hipFree(resultTSorientG); hipFree(resultTSsixG); // Error Check if (err4 != hipSuccess || err5 != hipSuccess || err6 != hipSuccess) { delete[] resultTStrans; delete[] resultTSorient; delete[] resultTSsix; throw CudaException(); } std::vector<float> trans; std::vector<float> orient; std::vector<float> six; for (int i = 0; i < quats.size(); ++i) { trans.push_back(resultTStrans[i]); orient.push_back(resultTSorient[i]); six.push_back(resultTSsix[i]); } std::vector<std::vector<float> > ret; ret.push_back(trans); ret.push_back(orient); ret.push_back(six); delete[] resultTStrans; delete[] resultTSorient; delete[] resultTSsix; return ret; }
3355fad310a36941410dd94c04ae071d3768cc5c.cu
#include "GistCudaSetup.cuh" #include "GistCudaCalc.cuh" #include "EntropyCalculator.cuh" #include <iostream> /** * Allocate memory on the GPU. * @param array: The pointer to the array, which will be allocated on the GPU. * @param size: An integer giving the size of the array, which will be allocated. * @throws: CudaException if a problem occurs. */ __host__ void allocateCuda_GIGIST(void **array, int size) { // Check if the array is actually free, if not, it will be freed // (fun fact: checking is not necessary, one could also simply free the memory). if ((*array) != NULL) { cudaFree(*array); } // If something goes wrong, throw exception if (cudaMalloc(array, size) != cudaSuccess) { throw CudaException(); } } /** * Copy memory from the CPU to the GPU. * @param array: The array from which the values shall be copied. * @param array_c: The array on the device, to which the values shall be copied. * @param size: The size of the stuff which will be copied. * @throws: CudaException if something goes wrong. */ __host__ void copyMemoryToDevice_GIGIST(void *array, void *array_c, int size) { // If something goes wrong, throw exception // In this case only copying can go wrong. if (cudaMemcpy(array_c, array, size, cudaMemcpyHostToDevice) != cudaSuccess) { throw CudaException(); } } /** * A simple helper function that copies a lot of stuff to the GPU (as structs). * @param charge: An array holding the charges for the different atoms. * @param atomtype: An array holding the integers for the atom types of the different atoms. * @param solvent: An array of boolean values, holding the information whether a certain atom is solvent or solute. * @param atomNumber: The total number of atoms. * @param atomProps_c: A pointer to an array on the GPU, which will hold the atom properties. * @param ljA: An array holding the lennard-jones parameter A for each atom type pair. * @param ljB: An array holding the lennard-jones parameter B for each atom type pair. * @param length: The length of the two aforementioned arrays (ljA & ljB). * @param lJparams_c: A pointer to an array on the GPU, which will hold the lj parameters. * @throws: CudaException if something bad happens. */ __host__ void copyMemoryToDeviceStruct_GIGIST(float *charge, int *atomtype, bool *solvent, int *molecule, int atomNumber, void **atomProps_c, float *ljA, float *ljB, int length, void **lJparams_c) { // Check if the two arrays are free. Again, this could be removed (but will stay!) if ((*atomProps_c) != NULL) { cudaFree(*atomProps_c); } if ((*lJparams_c) != NULL) { cudaFree(*lJparams_c); } // Allocate the necessary memory on the GPU. if (cudaMalloc(atomProps_c, atomNumber * sizeof(AtomProperties)) != cudaSuccess) { throw CudaException(); } if (cudaMalloc(lJparams_c, length * sizeof(ParamsLJ)) != cudaSuccess) { throw CudaException(); } // Create an array for the lennard-jones parameters. ParamsLJ *ljp = (ParamsLJ *) malloc (length * sizeof(ParamsLJ)); // Add the lennard-jones parameters to the array. for (int i = 0; i < length; ++i) { ljp[i] = ParamsLJ(ljA[i], ljB[i]); } // Create an array for the atom properties. AtomProperties *array = (AtomProperties *)malloc(atomNumber * sizeof(AtomProperties)); // Add the properties into the array. for (int i = 0; i < atomNumber; ++i) { array[i] = AtomProperties(charge[i], atomtype[i], solvent[i], molecule[i]); } // Copy the memory from the host to the device. if (cudaMemcpy((*atomProps_c), array, atomNumber * sizeof(AtomProperties), cudaMemcpyHostToDevice) != cudaSuccess) { throw CudaException(); } if (cudaMemcpy((*lJparams_c), ljp, length * sizeof(ParamsLJ), cudaMemcpyHostToDevice) != cudaSuccess) { throw CudaException(); } // Free the two arrays (so that no memory leak occurs). free(ljp); free(array); } /** * Free an array. * @param array: The array you want to free. */ __host__ void freeCuda_GIGIST(void *array) { cudaFree(array); } // This is coded C-like, but uses exceptions. /** * This starts the cuda kernel, thus it is actually a quite long function. */ __host__ EnergyReturn doActionCudaEnergy_GIGIST(const double *coords, int *NBindex_c, int ntypes, void *parameter, void *molecule_c, int boxinfo, float *recip_o_box, float *ucell, int maxAtoms, int headAtomType, float neighbourCut2, int *result_o, int *result_n, float *result_w_c, float *result_s_c, int *result_O_c, int *result_N_c, bool doorder) { Coordinates_GPU *coords_c = NULL; float *recip_b_c = NULL; float *ucell_c = NULL; float *result_A = (float *) calloc(maxAtoms, sizeof(float)); float *result_s = (float *) calloc(maxAtoms, sizeof(float)); Coordinates_GPU *coord_array = (Coordinates_GPU *) calloc(maxAtoms, sizeof(Coordinates_GPU)); // Casting AtomProperties *sender = (AtomProperties *) molecule_c; ParamsLJ *lennardJonesParams = (ParamsLJ *) parameter; // Create Boxinfo and Unit cell. This is actually very important for the speed (otherwise // there would be LOTS of access to non-local variables). BoxInfo boxinf; if (boxinfo != 0) { boxinf = BoxInfo(recip_o_box, boxinfo); } UnitCell ucellN; if (boxinfo == 2) { ucellN = UnitCell(ucell); } // Add the coordinates to the array. for (int i = 0; i < maxAtoms; ++i) { coord_array[i] = Coordinates_GPU(&coords[i * 3]); } // vectors that will return the necessary information. std::vector<float> result_esw; std::vector<float> result_eww; // Allocate space on the GPU if (cudaMalloc(&coords_c, maxAtoms * sizeof(Coordinates_GPU)) != cudaSuccess) { free(result_A); free(result_s); free(coord_array); throw CudaException(); } // Copy the data to the GPU if (cudaMemcpy(coords_c, coord_array, maxAtoms * sizeof(Coordinates_GPU), cudaMemcpyHostToDevice) != cudaSuccess) { cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } if (cudaMemcpy(result_w_c, result_A, maxAtoms * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) { cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } if (cudaMemcpy(result_s_c, result_s, maxAtoms * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) { cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } // If the doorder calculation is used, it needs to calculate everything differently, so the slow version is used // (this is about 10% slower). if (doorder) { cudaCalcEnergySlow_GIGIST<<< (maxAtoms + SLOW_BLOCKSIZE) / SLOW_BLOCKSIZE, SLOW_BLOCKSIZE >>> (coords_c, NBindex_c, ntypes, lennardJonesParams, sender, boxinf, ucellN, maxAtoms, result_w_c, result_s_c, nullptr, nullptr, headAtomType, neighbourCut2, result_O_c, result_N_c); } else { // Uses a 2D array, which is nice for memory access. dim3 threadsPerBlock(BLOCKSIZE, BLOCKSIZE); dim3 numBlocks((maxAtoms + threadsPerBlock.x) / threadsPerBlock.x, (maxAtoms + threadsPerBlock.y) / threadsPerBlock.y); // The actual call of the device function cudaCalcEnergy_GIGIST<<<numBlocks, threadsPerBlock>>> (coords_c, NBindex_c, ntypes, lennardJonesParams, sender, boxinf, ucellN, maxAtoms, result_w_c, result_s_c, nullptr, nullptr, headAtomType, neighbourCut2, result_O_c, result_N_c); // Check if there was an error. cudaError_t cudaError = cudaGetLastError(); if (cudaError != cudaSuccess) { printf("returned %s\n", cudaGetErrorString(cudaError)); } } // Return the results of the calculation to the main memory if (cudaMemcpy(result_A, result_w_c, maxAtoms * sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) { cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } if (cudaMemcpy(result_s, result_s_c, maxAtoms * sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) { cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } if (cudaMemcpy(result_o, result_O_c, maxAtoms * 4 * sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess) { cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } if (cudaMemcpy(result_n, result_N_c, maxAtoms * sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess) { cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c); free(result_A); free(result_s); free(coord_array); throw CudaException(); } for (int i = 0; i < maxAtoms; ++i) { result_eww.push_back(result_A[i]); result_esw.push_back(result_s[i]); } // Free everything used in here. cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c); free(result_A); free(result_s); free(coord_array); return {result_eww, result_esw}; } #ifdef DEBUG_GIST_CUDA // Not necessary __host__ std::vector<Quaternion<float> > shoveQuaternionsTest_GIGIST(std::vector<Quaternion<float> > quats) { QuaternionG<float> *quats_c = NULL; float *ret_c = NULL; std::vector<Quaternion<float> > ret; float *ret_f = new float[quats.size() * 4]; QuaternionG<float> *quats_f = new QuaternionG<float>[quats.size()]; for (int i = 0; i < quats.size(); ++i) { quats_f[i] = quats.at(i); } if (cudaMalloc(&quats_c, quats.size() * sizeof(QuaternionG<float>)) != cudaSuccess) { delete quats_f; delete ret_f; throw CudaException(); } if (cudaMalloc(&ret_c, quats.size() * 4 * sizeof(float)) != cudaSuccess) { cudaFree(quats_c); delete quats_f; delete ret_f; throw CudaException(); } if (cudaMemcpy(quats_c, quats_f, quats.size() * sizeof(QuaternionG<float>), cudaMemcpyHostToDevice) != cudaSuccess) { cudaFree(quats_c); cudaFree(ret_c); delete quats_f; delete ret_f; throw CudaException(); } shoveQuaternions_GIGIST<<< (quats.size() + BLOCKSIZE) / BLOCKSIZE, BLOCKSIZE >>> (quats_c, quats.size(), ret_c); if (cudaMemcpy(ret_f, ret_c, quats.size() * 4 * sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) { cudaFree(quats_c); cudaFree(ret_c); delete quats_f; delete ret_f; throw CudaException(); } for (int i = 0; i < quats.size(); ++i) { ret.push_back(Quaternion<float>(ret_f[i * 4], ret_f[i * 4 + 1], ret_f[i * 4 + 2], ret_f[i * 4 + 3])); } cudaFree(quats_c); cudaFree(ret_c); delete quats_f; delete ret_f; return ret; } #endif /** * Calculates the entropy on the GPU (this is not really necessary and does not lead to a significant speed up). * @param coords: The coordinates of the different water molecules. * @param x: The number of grid voxels in the x direction. * @param y: The number of grid voxels in the y direction. * @param z: The number of grid voxels in the z direction. * @param quats: A vector object holding all the quaternions. * @param temp: The temperature. * @param rho0: The reference density. * @param nFrames: The total number of frames. * @return: A vector holding the values for dTStrans, dTSorient and dTSsix. * @throws: A CudaException on error. */ std::vector<std::vector<float> > doActionCudaEntropy_GIGIST(std::vector<std::vector<Vec3> > coords, int x, int y, int z, std::vector<std::vector<Quaternion<float> > > quats, float temp, float rho0, int nFrames) { // For the CPU // Input (from previous calculations) std::vector<QuaternionG<float> > quatsF; std::vector<float> coordsF; std::vector<int> cumSumAtoms; // Results float *resultTStrans = new float[quats.size()]; float *resultTSorient = new float[quats.size()]; float *resultTSsix = new float[quats.size()]; // For the GPU // Input (from previous calculations) Dimensions dims = Dimensions(x, y, z); float *coordsG = NULL; QuaternionG<float> *quatsG = NULL; int *cumSumAtomsG = NULL; // Results float *resultTStransG = NULL; float *resultTSorientG = NULL; float *resultTSsixG = NULL; int sum = 0; for (int i = 0 ; i < quats.size(); ++i) { sum += quats.at(i).size(); cumSumAtoms.push_back(sum); for (int j = 0; j < quats.at(i).size(); ++j) { // quatsF always has the size of the number of the current molecule. coordsF.push_back((float) (coords.at(i).at(j)[0])); coordsF.push_back((float) (coords.at(i).at(j)[1])); coordsF.push_back((float) (coords.at(i).at(j)[2])); quatsF.push_back(quats.at(i).at(j)); } } cudaError_t err1 = cudaMalloc(&quatsG, quatsF.size() * sizeof(QuaternionG<float>)); cudaError_t err2 = cudaMalloc(&coordsG, coordsF.size() * sizeof(float)); cudaError_t err3 = cudaMalloc(&cumSumAtomsG, cumSumAtoms.size() * sizeof(int)); cudaError_t err4 = cudaMalloc(&resultTStransG, quats.size() * sizeof(float)); cudaError_t err5 = cudaMalloc(&resultTSorientG, quats.size() * sizeof(float)); cudaError_t err6 = cudaMalloc(&resultTSsixG, quats.size() * sizeof(float)); // Error Check if (err1 != cudaSuccess || err2 != cudaSuccess || err3 != cudaSuccess || err4 != cudaSuccess || err5 != cudaSuccess || err6 != cudaSuccess) { cudaFree(quatsG); cudaFree(coordsG); cudaFree(cumSumAtomsG); cudaFree(resultTStransG); cudaFree(resultTSorientG); cudaFree(resultTSsixG); delete[] resultTStrans; delete[] resultTSorient; delete[] resultTSsix; throw CudaException(); } err1 = cudaMemcpy(quatsG, &(quatsF[0]), quatsF.size() * sizeof(QuaternionG<float>), cudaMemcpyHostToDevice); err2 = cudaMemcpy(coordsG, &(coordsF[0]), coordsF.size() * sizeof(float), cudaMemcpyHostToDevice); err3 = cudaMemcpy(cumSumAtomsG, &(cumSumAtoms[0]), cumSumAtoms.size() * sizeof(int), cudaMemcpyHostToDevice); // Error Check if (err1 != cudaSuccess || err2 != cudaSuccess || err3 != cudaSuccess) { cudaFree(quatsG); cudaFree(coordsG); cudaFree(cumSumAtomsG); cudaFree(resultTStransG); cudaFree(resultTSorientG); cudaFree(resultTSsixG); delete[] resultTStrans; delete[] resultTSorient; delete[] resultTSsix; throw CudaException(); } EntropyCalculator entCalc = EntropyCalculator(quatsG, coordsG, dims, cumSumAtomsG, temp, rho0, nFrames); calculateEntropy_GIGIST<<<(quats.size() + SLOW_BLOCKSIZE) / SLOW_BLOCKSIZE, SLOW_BLOCKSIZE>>>(entCalc, resultTStransG, resultTSorientG, resultTSsixG); cudaError_t err7 = cudaGetLastError(); // Error Check if (err7 != cudaSuccess) { cudaFree(quatsG); cudaFree(coordsG); cudaFree(cumSumAtomsG); cudaFree(resultTStransG); cudaFree(resultTSorientG); cudaFree(resultTSsixG); delete[] resultTStrans; delete[] resultTSorient; delete[] resultTSsix; throw CudaException(); } // Copy back, use same errors as above for understandability. err4 = cudaMemcpy(resultTStrans, resultTStransG, quats.size() * sizeof(float), cudaMemcpyDeviceToHost); err5 = cudaMemcpy(resultTSorient, resultTSorientG, quats.size() * sizeof(float), cudaMemcpyDeviceToHost); err6 = cudaMemcpy(resultTSsix, resultTSsixG, quats.size() * sizeof(float), cudaMemcpyDeviceToHost); // Don't need that anymore. cudaFree(quatsG); cudaFree(coordsG); cudaFree(cumSumAtomsG); cudaFree(resultTStransG); cudaFree(resultTSorientG); cudaFree(resultTSsixG); // Error Check if (err4 != cudaSuccess || err5 != cudaSuccess || err6 != cudaSuccess) { delete[] resultTStrans; delete[] resultTSorient; delete[] resultTSsix; throw CudaException(); } std::vector<float> trans; std::vector<float> orient; std::vector<float> six; for (int i = 0; i < quats.size(); ++i) { trans.push_back(resultTStrans[i]); orient.push_back(resultTSorient[i]); six.push_back(resultTSsix[i]); } std::vector<std::vector<float> > ret; ret.push_back(trans); ret.push_back(orient); ret.push_back(six); delete[] resultTStrans; delete[] resultTSorient; delete[] resultTSsix; return ret; }
6a495465f056b4ec7355fb0f3385c07818d4f3ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/tracking_layers.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void ComputeSign(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1); } } template <typename Dtype> __global__ void FindNotNaNs(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index]==in[index] ? Dtype(1) : Dtype(0); } } template <typename Dtype> __global__ void KillNaNs(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index]==in[index] ? in[index] : Dtype(0); } } template <typename Dtype> __global__ void KillMasked(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0); // out[index] = out[index]==out[index] ? out[index] : Dtype(0); // out[index] = out[index]>1e3 ? 0 : out[index]; // out[index] = out[index]<-1e3 ? 0 : out[index]; } } template <typename Dtype> __global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int mask_idx = index % width_height; out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0); } } template <typename Dtype> __global__ void MaskPlateauValues(const int n, const Dtype* in, Dtype* out, Dtype plateau) { CUDA_KERNEL_LOOP(index, n) { if(fabs(in[index]) < plateau) out[index] = Dtype(0); // Mask out plateau values and keep other as is } } template <typename Dtype> __global__ void MaskPlateauValuesInitial(const int n, const Dtype* in, Dtype* out, Dtype plateau) { CUDA_KERNEL_LOOP(index, n) { out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1); } } template <typename Dtype> void L1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Blob<Dtype> *diffptr = diff_top_vec_[0]; Dtype dot, loss; if(bottom.size() > 1) { diff_layer_->Forward(bottom, diff_top_vec_); } // if necessary, compute the number of not-NaNs int count = bottom[0]->count(); int num = bottom[0]->num(); hipLaunchKernelGGL(( FindNotNaNs<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diffptr->gpu_data(), mask_.mutable_gpu_data()); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; if (this->layer_param_.l1_loss_param().normalize_by_num_entries()) { caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_); normalize_coeff_ /= mask_.channels(); } else { normalize_coeff_ = num; } if (this->layer_param_.l1_loss_param().l2_per_location()) { // set masked (NaNs only) to zero hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, mask_.gpu_data(), diffptr->mutable_gpu_data()); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; square_layer_->Forward(diff_top_vec_, square_top_vec_); sum_layer_->Forward(square_top_vec_, sum_top_vec_); // Mask plateau in summed blob (only one channel): if(this->layer_param_.l1_loss_param().plateau() > 0) { float plateau_val_squared = this->layer_param_.l1_loss_param().plateau() * this->layer_param_.l1_loss_param().plateau(); hipLaunchKernelGGL(( MaskPlateauValuesInitial<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data()); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_); // Note sign_ is set to all ones in Reshape caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot); } else { // Mask plateau: if(this->layer_param_.l1_loss_param().plateau() > 0) { hipLaunchKernelGGL(( MaskPlateauValues<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diffptr->gpu_data(), mask_.mutable_gpu_data(), this->layer_param_.l1_loss_param().plateau()); CUDA_POST_KERNEL_CHECK; } //mask_.print("MASK2"); // set masked (NaNs, plateau) to zero hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, mask_.gpu_data(), diffptr->mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; hipLaunchKernelGGL(( ComputeSign<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diffptr->gpu_data(), sign_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; caffe_gpu_dot(count, diffptr->gpu_data(), sign_.gpu_data(), &dot); } loss = dot / normalize_coeff_; top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void L1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { bool prop_down = propagate_down[0]; if(bottom.size() > 1) prop_down |= propagate_down[1]; Blob<Dtype> *diffptr = diff_top_vec_[0]; if (prop_down) { const Dtype alpha = top[0]->cpu_diff()[0] / normalize_coeff_; if (this->layer_param_.l1_loss_param().l2_per_location()) { vector<bool> prop_down(1,true); caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(), Dtype(0), sqrt_output_.mutable_gpu_diff()); sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_); if(this->layer_param_.l1_loss_param().plateau() > 0) { hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff()); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_); square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_); } else { caffe_gpu_axpby(diffptr->count(), alpha, sign_.gpu_data(), Dtype(0), diffptr->mutable_gpu_diff()); } hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(diffptr->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if(bottom.size() > 1) { diff_layer_->Backward(diff_top_vec_, propagate_down, bottom); } } } INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer); } // namespace caffe
6a495465f056b4ec7355fb0f3385c07818d4f3ee.cu
#include <vector> #include "caffe/tracking_layers.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void ComputeSign(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1); } } template <typename Dtype> __global__ void FindNotNaNs(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index]==in[index] ? Dtype(1) : Dtype(0); } } template <typename Dtype> __global__ void KillNaNs(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index]==in[index] ? in[index] : Dtype(0); } } template <typename Dtype> __global__ void KillMasked(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0); // out[index] = out[index]==out[index] ? out[index] : Dtype(0); // out[index] = out[index]>1e3 ? 0 : out[index]; // out[index] = out[index]<-1e3 ? 0 : out[index]; } } template <typename Dtype> __global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int mask_idx = index % width_height; out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0); } } template <typename Dtype> __global__ void MaskPlateauValues(const int n, const Dtype* in, Dtype* out, Dtype plateau) { CUDA_KERNEL_LOOP(index, n) { if(fabs(in[index]) < plateau) out[index] = Dtype(0); // Mask out plateau values and keep other as is } } template <typename Dtype> __global__ void MaskPlateauValuesInitial(const int n, const Dtype* in, Dtype* out, Dtype plateau) { CUDA_KERNEL_LOOP(index, n) { out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1); } } template <typename Dtype> void L1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Blob<Dtype> *diffptr = diff_top_vec_[0]; Dtype dot, loss; if(bottom.size() > 1) { diff_layer_->Forward(bottom, diff_top_vec_); } // if necessary, compute the number of not-NaNs int count = bottom[0]->count(); int num = bottom[0]->num(); FindNotNaNs<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diffptr->gpu_data(), mask_.mutable_gpu_data()); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; if (this->layer_param_.l1_loss_param().normalize_by_num_entries()) { caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_); normalize_coeff_ /= mask_.channels(); } else { normalize_coeff_ = num; } if (this->layer_param_.l1_loss_param().l2_per_location()) { // set masked (NaNs only) to zero KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, mask_.gpu_data(), diffptr->mutable_gpu_data()); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; square_layer_->Forward(diff_top_vec_, square_top_vec_); sum_layer_->Forward(square_top_vec_, sum_top_vec_); // Mask plateau in summed blob (only one channel): if(this->layer_param_.l1_loss_param().plateau() > 0) { float plateau_val_squared = this->layer_param_.l1_loss_param().plateau() * this->layer_param_.l1_loss_param().plateau(); MaskPlateauValuesInitial<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>( sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>( sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data()); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_); // Note sign_ is set to all ones in Reshape caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot); } else { // Mask plateau: if(this->layer_param_.l1_loss_param().plateau() > 0) { MaskPlateauValues<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diffptr->gpu_data(), mask_.mutable_gpu_data(), this->layer_param_.l1_loss_param().plateau()); CUDA_POST_KERNEL_CHECK; } //mask_.print("MASK2"); // set masked (NaNs, plateau) to zero KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, mask_.gpu_data(), diffptr->mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; ComputeSign<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diffptr->gpu_data(), sign_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; caffe_gpu_dot(count, diffptr->gpu_data(), sign_.gpu_data(), &dot); } loss = dot / normalize_coeff_; top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void L1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { bool prop_down = propagate_down[0]; if(bottom.size() > 1) prop_down |= propagate_down[1]; Blob<Dtype> *diffptr = diff_top_vec_[0]; if (prop_down) { const Dtype alpha = top[0]->cpu_diff()[0] / normalize_coeff_; if (this->layer_param_.l1_loss_param().l2_per_location()) { vector<bool> prop_down(1,true); caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(), Dtype(0), sqrt_output_.mutable_gpu_diff()); sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_); if(this->layer_param_.l1_loss_param().plateau() > 0) { KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>( sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff()); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_); square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_); } else { caffe_gpu_axpby(diffptr->count(), alpha, sign_.gpu_data(), Dtype(0), diffptr->mutable_gpu_diff()); } KillMasked<Dtype><<<CAFFE_GET_BLOCKS(diffptr->count()), CAFFE_CUDA_NUM_THREADS>>>( diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if(bottom.size() > 1) { diff_layer_->Backward(diff_top_vec_, propagate_down, bottom); } } } INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer); } // namespace caffe
69b3223160e334ac23dfeaf90a5051cd0648ed11.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "mt_cpu.h" #include "Timer.h" namespace boom { #define N_d 12 #define viscPF (4.3e-7f*2f) //(1.1e-6 ) // (kcal/mol)*(s/nm^2) #define viscPF_teta (2.3e-6f*2f) //(1.7e-5 ) // (kcal/mol)*s #define B_Koeff 174.0f //kcal/mol #define B_Koeff 91.25 //kcal/mol #define dt (2e-10f) // s #define dt_viscPF_teta 4.34782596e-5f // dt_viscPF = dt/viscPF; #define dt_viscPF 0.000232558144f // dt_viscPF_teta = dt/viscPF_teta; #define R_MT 8.128f #define A_Koeff 53.0f #define b_lat 12.9f #define A_long_D 90.0f #define b_long_D 7.9f #define A_long_T 90.0f #define b_long_T 7.9f #define ro0 0.12f #define ro0_long 0.12f #define inv_ro0_long 8.3333f // 1/ro0_long #define c_lat 0.10f #define d_lat 0.25f #define C_Koeff 300.0f #define Rad 2.0f #define inv_ro0 8.3333f // = 1 / ro0; #define clat_dlat_ro0 6.6666f // //clat_dlat_ro0 = 2*c_lat / (d_lat* ro0); #define clong_dlong_ro0 6.6666f //clong_dlong_ro0 = 2*c_lat / (d_lat* ro0); #define d_lat_ro0 33.3333f // d_lat_ro0 = 1 / (d_lat*ro0); #define d_long_ro0 33.3333f // d_long_ro0 = 1 / (d_lat*ro0_long); #define pi 3.141592653f #define fi_r 1.3291395f #define psi_r 1.801785f #define fi_l 1.0856f #define psi_l -1.339725f #define rad_mon 2.0f #define teta0_D 0.2f //rad #define teta0_T 0.0f #define bit char __constant__ float Ax_1[13] = {-0.165214628f, 0.0561592989f, 0.264667839f, 0.412544012f, 0.465911359f, 0.412544012f, 0.264667839f, 0.0561594106f, -0.165214419f, -0.348739684f, -0.452372819f, -0.452372819f, -0.348739684f}; __constant__ float Ax_2[13] = {1.76747036f, 1.87652779f, 1.5556947f, 0.878470898f, 0.0f, -0.878470898f, -1.5556947f, -1.87652767f, -1.76747072f, -1.25350749f, -0.452380866f, 0.452380747f, 1.25350738f}; __constant__ float Ax_3[13] = {0.162366703f, -0.0551912338f, -0.26010555f, -0.405432671f, -0.45788008f, -0.405432671f, -0.26010555f, -0.0551913455f, 0.162366495f, 0.342728168f, 0.444574922f, 0.444574922f, 0.342728198f}; __constant__ float A_Bx_4[13] = {0.0f, 0.46472317f, 0.822983861f, 0.992708862f, 0.935016215f, 0.663122654f, 0.239315659f, -0.239315659f, -0.663122654f, -0.935016215f, -0.992708862f, -0.822983861f, -0.46472317f}; __constant__ float Ay_1[13] = {0.435634613f, 0.462514341f, 0.383437514f, 0.216519803f, 0.0f, -0.216519803f, -0.383437514f, -0.462514341f, -0.435634702f, -0.308956355f, -0.111499891f, 0.111499861f, 0.308956355f}; __constant__ float Ay_2[13] = {-0.428125232f, -0.454541624f, -0.376827925f, -0.212787479f, -0.0f, 0.212787479f, 0.376827925f, 0.454541624f, 0.428125322f, 0.30363065f, 0.109577879f, -0.10957785f, -0.30363062f}; __constant__ float Ay_3[13] = {-0.670314014f, 0.227851257f, 1.07381856f, 1.67378652f, 1.89031017f, 1.67378652f, 1.07381856f, 0.227851719f, -0.67031312f, -1.41491747f, -1.83538115f, -1.83538127f, -1.41491759f}; __constant__ float A_By_4[13] = {1.0f, 0.885456026f, 0.568064749f, 0.120536678f, -0.3546049f, -0.748510778f, -0.970941842f, -0.970941842f, -0.748510778f, -0.3546049f, 0.120536678f, 0.568064749f, 0.885456026f}; __constant__ float Az_1 = 0.465911359f; __constant__ float Az_2 = -0.45788008f; __constant__ float Bx_1[13] = {0.321971923f, -0.109443799f, -0.515787303f, -0.80396992f, -0.907972693f, -0.80396992f, -0.515787303f, -0.109444022f, 0.321971506f, 0.679627359f, 0.881588638f, 0.881588697f, 0.679627359f}; __constant__ float Bx_2[13] = {-1.61023343f, -1.70958889f, -1.41729772f, -0.800320745f, -0.0f, 0.800320745f, 1.41729772f, 1.70958877f, 1.61023378f, 1.14199352f, 0.412136346f, -0.412136227f, -1.1419934f}; __constant__ float Bx_3[13] = {-0.16242376f, 0.0552106313f, 0.260196954f, 0.405575156f, 0.458040982f, 0.405575156f, 0.260196954f, 0.0552107431f, -0.162423551f, -0.342848599f, -0.444731146f, -0.444731146f, -0.342848629f}; __constant__ float By_1[13] = {-0.848969102f, -0.901352584f, -0.747246861f, -0.421955943f, -0.0f, 0.421955943f, 0.747246861f, 0.901352525f, 0.848969221f, 0.602097273f, 0.2172921f, -0.217292041f, -0.602097213f}; __constant__ float By_2[13] = {0.428275675f, 0.454701364f, 0.376960337f, 0.212862253f, 0.0f, -0.212862253f, -0.376960337f, -0.454701334f, -0.428275764f, -0.303737342f, -0.109616384f, 0.109616362f, 0.303737313f}; __constant__ float By_3[13] = {0.610681832f, -0.207581252f, -0.978290021f, -1.52488387f, -1.7221452f, -1.52488387f, -0.978290021f, -0.207581669f, 0.610681057f, 1.28904426f, 1.67210281f, 1.67210281f, 1.28904426f}; __constant__ float Bz_1 = -0.907972693f; __constant__ float Bz_2 = 0.458040982f; __device__ __forceinline__ void calc_grad_c( int i1, // i index int j1, // j index int i2, // i index bit type, // dimer type: 0 - 'D', 1 - 'T' bit pos, // monomer position in dimer: 0 - bottom, 1 - top float x_1, // mol1 float y_1, float teta_1, float x_2, // mol2 float y_2, float teta_2, float x_3, // mol3 float y_3, float teta_3, float *grad_lat_x_1, // left component of mol1 float *grad_lat_y_1, float *grad_lat_teta_1, float *grad_lat_x_2, // right component of mol2 float *grad_lat_y_2, float *grad_lat_teta_2, float *grad_long_x_1, // up component of mol1 float *grad_long_y_1, float *grad_long_teta_1, float *grad_long_x_3, // down component of mol3 float *grad_long_y_3, float *grad_long_teta_3 ) { // PE_left - i2, PF_right - i1 float cos_t_A = cosf(teta_2); float sin_t_A = sinf(teta_2); float cos_t_B = cosf(teta_1); float sin_t_B = sinf(teta_1); float cos_t_1 = cos_t_B; float sin_t_1 = sin_t_B; float cos_t_3 = cosf(teta_3); float sin_t_3 = sinf(teta_3); // swap i1 <=> i2 float Ax_left = Ax_1[i2]*cos_t_A + Ax_3[i2]*sin_t_A - Ax_2[i2] + (x_2 + R_MT) * A_Bx_4[i2]; float Ay_left = Ay_1[i2]*cos_t_A + Ay_2[i2]*sin_t_A + Ay_3[i2] + (x_2 + R_MT) * A_By_4[i2]; float Az_left = -Az_1*sin_t_A + Az_2*cos_t_A + y_2; float Bx_right = Bx_1[i1]*cos_t_B + Bx_3[i1]*sin_t_B - Bx_2[i1] + (x_1 + R_MT) * A_Bx_4[i1]; float By_right = By_1[i1]*cos_t_B + By_2[i1]*sin_t_B + By_3[i1] + (x_1 + R_MT) * A_By_4[i1]; float Bz_right = -Bz_1*sin_t_B + Bz_2*cos_t_B + y_1; float Dx = Ax_left - Bx_right; float Dy = Ay_left - By_right; float Dz = Az_left - Bz_right; float dist = sqrtf(( pow(Dx, 2) + pow(Dy, 2) + pow(Dz, 2) )); if (dist <=1e-7 ){ dist = 1e-5; } float inv_dist = 1/dist; float drdAx = Dx * inv_dist; float drdAy = Dy * inv_dist; float drdAz = Dz * inv_dist; float drdBx = -drdAx; float drdBy = -drdAy; float drdBz = -drdAz; float dA_X_dteta = -sin_t_A*Ax_1[i2] + cos_t_A*Ax_3[i2]; float dA_Y_dteta = -sin_t_A*Ay_1[i2] + cos_t_A*Ay_2[i2]; float dA_Z_dteta = -cos_t_A*Az_1 - sin_t_A*Az_2; float drdx_A = drdAx*A_Bx_4[i2] + drdAy*A_By_4[i2]; float drdy_A = drdAz; float drdteta_A = drdAx*dA_X_dteta + drdAy*dA_Y_dteta + drdAz*dA_Z_dteta; //================================================ float dB_X_dteta = -sin_t_B*Bx_1[i1] + cos_t_B*Bx_3[i1]; float dB_Y_dteta = -sin_t_B*By_1[i1] + cos_t_B*By_2[i1]; float dB_Z_dteta = -cos_t_B*Bz_1 - sin_t_B*Bz_2; float drdx_B = drdBx*A_Bx_4[i1] + drdBy*A_By_4[i1]; float drdy_B = drdBz; float drdteta_B = drdBx*dB_X_dteta + drdBy*dB_Y_dteta + drdBz*dB_Z_dteta; float Grad_U_tmp = (b_lat* dist *expf(-dist*inv_ro0)*(2.0f - dist*inv_ro0) + dist* clat_dlat_ro0 * expf( - (dist*dist) * d_lat_ro0 ) ) * A_Koeff; if ((i1==12)&&(j1>=(N_d-3))) { *grad_lat_x_2 = 0.0f; *grad_lat_y_2 = 0.0f; *grad_lat_teta_2 = 0.0f; *grad_lat_x_1 = 0.0f; *grad_lat_y_1 = 0.0f; *grad_lat_teta_1 = 0.0f; } else { *grad_lat_x_2 = Grad_U_tmp * drdx_A; *grad_lat_y_2 = Grad_U_tmp * drdy_A; *grad_lat_teta_2 = Grad_U_tmp * drdteta_A; *grad_lat_x_1 = Grad_U_tmp * drdx_B; *grad_lat_y_1 = Grad_U_tmp * drdy_B; *grad_lat_teta_1 = Grad_U_tmp * drdteta_B; } // [nd] - mol3 // [nd-1] - mol1 // longitudinal gradient float r_long_x = (x_3 - x_1) - Rad*(sin_t_1 + sin_t_3); float r_long_y = (y_3 - y_1) - Rad*(cos_t_1 + cos_t_3); float r_long = sqrtf( r_long_x*r_long_x + r_long_y*r_long_y); if (r_long <=1e-15 ){ r_long = 1e-7; } float drdx_long = - r_long_x/r_long; float drdy_long = - r_long_y/r_long; float dUdr_C; if (pos==0) { // bottom monomer (interaction inside dimer) dUdr_C = C_Koeff*r_long; } else { // top monomer (interaction with upper dimer) float tmp1 = r_long * expf(-r_long*inv_ro0_long)*(2 - r_long*inv_ro0_long); float tmp2 = r_long * clong_dlong_ro0 * expf(-(r_long*r_long) * d_long_ro0 ); if (type==0) // dimer type 'D' dUdr_C = (tmp1*b_long_D + tmp2) * A_long_D; else // dimer type 'T' dUdr_C = (tmp1*b_long_T + tmp2) * A_long_T; } float Grad_tmp_x = drdx_long * dUdr_C; float Grad_tmp_y = drdy_long * dUdr_C; float GradU_C_teta_1 = -dUdr_C*( drdx_long*(-Rad*cos_t_1) + drdy_long*(Rad*sin_t_1)); float GradU_C_teta_3 = dUdr_C*(-drdx_long*(-Rad*cos_t_3) - drdy_long*(Rad*sin_t_3)); float Grad_tmp; if (type==0) // dimer type 'D' Grad_tmp = B_Koeff*(teta_3 - teta_1 - teta0_D); else // dimer type 'T' Grad_tmp = B_Koeff*(teta_3 - teta_1 - teta0_T); // - ! float GradU_B_teta_1 = - Grad_tmp; float GradU_B_teta_3 = + Grad_tmp; if (j1 == (N_d-1)) { *grad_long_x_1 = 0.0f; *grad_long_y_1 = 0.0f; *grad_long_teta_1 = 0.0f; *grad_long_x_3 = 0.0f; *grad_long_y_3 = 0.0f; *grad_long_teta_3 = 0.0f; } else { *grad_long_x_1 = Grad_tmp_x; *grad_long_y_1 = Grad_tmp_y; *grad_long_teta_1 = GradU_C_teta_1 + GradU_B_teta_1; *grad_long_x_3 = - Grad_tmp_x; *grad_long_y_3 = - Grad_tmp_y; *grad_long_teta_3 = GradU_C_teta_3 + GradU_B_teta_3; } } __global__ void mt_cuda_kernel1( const int niters, float* x_inout, float* y_inout, float* t_inout ) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int s; if(i==0 && j==0){ printf("niters %d\n", niters); } __shared__ float __x[13][N_d+3]; __shared__ float __y[13][N_d+3]; __shared__ float __t[13][N_d+3]; __shared__ float4 __lat_r[13][N_d+3]; __shared__ float4 __long_d[13][N_d+1]; //__lat_r[i][j]=0; //__long_d[i][j]=0; __x[i][j]=x_inout[i*(N_d+3) + j]; __y[i][j]=y_inout[i*(N_d+3) + j]; __t[i][j]=t_inout[i*(N_d+3) + j]; float f_x, f_y, f_t; bit pos = 0; bit type = 0; float x_ij, y_ij, t_ij, x_i2j2,y_i2j2,t_i2j2,x_ij_1, y_ij_1, t_ij_1; float lat_l_x, lat_l_y, lat_l_t, lat_r_x, lat_r_y, lat_r_t, long_u_x, long_u_y, long_u_t, long_d_x, long_d_y, long_d_t; int i2 = (i==12)? 0 : (i+1); int j2 = (i==12)? (j+3) : j; pos =(j % 2); __syncthreads(); //#pragma unroll 2 for (s=0;s<niters;s++){ x_ij= __x[i][j]; y_ij=__y[i][j]; t_ij=__t[i][j]; x_i2j2=__x[i2][j2]; y_i2j2=__y[i2][j2]; t_i2j2=__t[i2][j2]; x_ij_1=__x[i][j+1]; y_ij_1=__y[i][j+1], t_ij_1=__t[i][j+1]; calc_grad_c(i, j, i2, type, pos, x_ij, y_ij, t_ij, x_i2j2,y_i2j2,t_i2j2, x_ij_1, y_ij_1, t_ij_1, &lat_l_x,&lat_l_y,&lat_l_t, &lat_r_x,&lat_r_y,&lat_r_t, &long_u_x, &long_u_y, &long_u_t, &long_d_x, &long_d_y, &long_d_t); __lat_r[i2][j2].x=lat_r_x; __lat_r[i2][j2].y=lat_r_y; __lat_r[i2][j2].z=lat_r_t; __long_d[i][j+1].x=long_d_x;__long_d[i][j+1].y=long_d_y;__long_d[i][j+1].z=long_d_t; __syncthreads(); if (j!=0){ f_x = lat_l_x + __lat_r[i][j].x + long_u_x + __long_d[i][j].x; f_y = lat_l_y + __lat_r[i][j].y + long_u_y + __long_d[i][j].y; f_t = lat_l_t + __lat_r[i][j].z + long_u_t + __long_d[i][j].z; x_ij -= dt_viscPF * f_x; y_ij -= dt_viscPF * f_y; t_ij -= dt_viscPF_teta * f_t; __x[i][j]=x_ij; __y[i][j]=y_ij; __t[i][j]=t_ij; } // if j __syncthreads(); } // for s if (j!=0){ x_inout[i*(N_d+3) + j] = __x[i][j]; y_inout[i*(N_d+3) + j] = __y[i][j]; t_inout[i*(N_d+3) + j] = __t[i][j]; } } /* __global__ void mt_cuda_kernel_root( const int niters, float* x_inout, float* y_inout, float* t_inout ) { dim3 block_size; block_size.x=13; block_size.y=N_d; dim3 grid_size; grid_size.x = 1; grid_size.y = 1; hipLaunchKernelGGL(( boom::mt_cuda_kernel1), dim3(grid_size), dim3(block_size), 0, 0, niters, x_inout, y_inout, t_inout); } */ } // ns boom void init_coords(float x[][N_d], float y[][N_d], float t[][N_d]); void rnd_coords(float x[][N_d], float y[][N_d], float t[][N_d]); void cpy_coords2(float x[][N_d], float y[][N_d], float t[][N_d],float x2[][N_d+3], float y2[][N_d+3], float t2[][N_d+3]); bool compare2(const float refData[][N_d], const float data[][N_d+3],float* err, float* delta, const float epsilon); float x_1[13][N_d]; float y_1[13][N_d]; float t_1[13][N_d]; float x_3[13][N_d+3]; float y_3[13][N_d+3]; float t_3[13][N_d+3]; int main(void) { hipError_t err = hipSuccess; CPerfCounter t_cu; int use_cmp=0; float err1=0.0f; float delta1=0.0f; float epsilon = 1e-2f; int niters=100000; size_t size = 13*(N_d+3)*sizeof(float); /* for (int i = 0; i < 13; ++i) for (int j = 0; j < N_d; ++j) { x_3[i][j] = 7;//rand()/(float)RAND_MAX; y_3[i][j] = 9;//rand()/(float)RAND_MAX; t_3[i][j] = 13;//rand()/(float)RAND_MAX; } */ init_coords(x_1,y_1,t_1); rnd_coords(x_1,y_1,t_1); cpy_coords2(x_1,y_1,t_1,x_3,y_3,t_3); // Allocate the device vector float *d_x = NULL; err = hipMalloc((void **)&d_x, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector x (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device vector float *d_y = NULL; err = hipMalloc((void **)&d_y, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector y (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device vector float *d_t = NULL; err = hipMalloc((void **)&d_t, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector t (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } t_cu.Reset();t_cu.Start(); // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_x, x_3, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector x from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_y, y_3, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector y from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_t, t_3, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector t from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Kernel dim3 block_size; block_size.x=13; block_size.y=N_d; dim3 grid_size; grid_size.x = 1;//num_elements_x / block_size.x; grid_size.y = 1;//num_elements_y / block_size.y; printf("CUDA kernel launch with %d,%d %d,%d\n", block_size.x, block_size.y, grid_size.x,grid_size.y); hipLaunchKernelGGL(( boom::mt_cuda_kernel1), dim3(grid_size), dim3(block_size), 0, 0, niters, d_x, d_y, d_t); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. // printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(x_3, d_x, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector x from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(y_3, d_y, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector y from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(t_3, d_t, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector t from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } t_cu.Stop(); printf("niters %d t_cu %lf\n",niters,t_cu.GetElapsedTime()); // printf("%f,%f,%f\n",x_3[0][1],y_3[0][1],t_3[0][1]); if(use_cmp){ mt_cpu(niters,1,x_1,y_1,t_1,x_1,y_1,t_1); if(!compare2(x_1, x_3, &err1, &delta1, epsilon) || !compare2(y_1, y_3, &err1, &delta1, epsilon) || !compare2(t_1, t_3, &err1, &delta1, epsilon)) { printf("Compare cu results failed (%f,%f)\n",err1,delta1); } else printf("Test OK(%f,%f)\n",err1,delta1); } // Free device global memory err = hipFree(d_x); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_y); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_t); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory //free(h_A); //free(h_B); //free(h_C); // Reset the device and exit // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; } void init_coords(float x[][N_d], float y[][N_d], float t[][N_d]) { int i,j; // y for (i=0; i<13; i++) y[i][0] = 2.0f*6/13*(i+1); // y for (j=1; j<N_d-4; j++) for (i=0; i<13; i++) y[i][j] = y[i][j-1] + 2.0f*Rad; // x teta for (j=0; j<N_d-5; j++) for (i=0; i<13; i++) { x[i][j] = 0.0; t[i][j] = 0.0; } // for (i=0; i<13; i++) { x[i][N_d-5] = 0.6; t[i][N_d-5] = 0.2; } for (j=N_d-4; j<N_d; j++) for (i=0; i<13; i++) { x[i][j] = x[i][j-1] + 2*Rad*sinf(t[i][j-1]); y[i][j] = y[i][j-1] + 2*Rad*cosf(t[i][j-1]); t[i][j] = t[i][j-1]; } } void rnd_coords(float x[][N_d], float y[][N_d], float t[][N_d]) { int i,j; srand (time(NULL)); for (i=0; i<13; i++){ for (j=0; j<N_d; j++){ x[i][j]+= (rand()%100)*1e-5; y[i][j]-= (rand()%100)*1e-5; t[i][j]+= (rand()%100)*1e-5; } } } void cpy_coords2(float x[][N_d], float y[][N_d], float t[][N_d],float x2[][N_d+3], float y2[][N_d+3], float t2[][N_d+3]) { int i,j; for (i=0; i<13; i++){ for (j=0; j<N_d; j++){ x2[i][j]= x[i][j]; y2[i][j]= y[i][j]; t2[i][j]= t[i][j]; } } } bool compare2(const float refData[][N_d], const float data[][N_d+3], float* err, float* delta, const float epsilon = 1e-6f) { float error = 0.0f; float ref = 0.0f; for(int i = 0; i<13; i++) for(int j = 0; j<N_d; j++) { float diff = refData[i][j] - data[i][j]; if(fabs(diff)>*delta){*delta=fabs(diff);} error += diff * diff; ref += refData[i][j] * refData[i][j]; } float normRef =::sqrtf((float) ref); if (::fabs((float) ref) < 1e-7f) { return false; } float normError = ::sqrtf((float) error); error = normError / normRef; if(error>*err)*err=error; return error < epsilon; }
69b3223160e334ac23dfeaf90a5051cd0648ed11.cu
#include <stdio.h> #include <cuda_runtime.h> #include "mt_cpu.h" #include "Timer.h" namespace boom { #define N_d 12 #define viscPF (4.3e-7f*2f) //(1.1e-6 ) // (kcal/mol)*(s/nm^2) #define viscPF_teta (2.3e-6f*2f) //(1.7e-5 ) // (kcal/mol)*s #define B_Koeff 174.0f //kcal/mol #define B_Koeff 91.25 //kcal/mol #define dt (2e-10f) // s #define dt_viscPF_teta 4.34782596e-5f // dt_viscPF = dt/viscPF; #define dt_viscPF 0.000232558144f // dt_viscPF_teta = dt/viscPF_teta; #define R_MT 8.128f #define A_Koeff 53.0f #define b_lat 12.9f #define A_long_D 90.0f #define b_long_D 7.9f #define A_long_T 90.0f #define b_long_T 7.9f #define ro0 0.12f #define ro0_long 0.12f #define inv_ro0_long 8.3333f // 1/ro0_long #define c_lat 0.10f #define d_lat 0.25f #define C_Koeff 300.0f #define Rad 2.0f #define inv_ro0 8.3333f // = 1 / ro0; #define clat_dlat_ro0 6.6666f // //clat_dlat_ro0 = 2*c_lat / (d_lat* ro0); #define clong_dlong_ro0 6.6666f //clong_dlong_ro0 = 2*c_lat / (d_lat* ro0); #define d_lat_ro0 33.3333f // d_lat_ro0 = 1 / (d_lat*ro0); #define d_long_ro0 33.3333f // d_long_ro0 = 1 / (d_lat*ro0_long); #define pi 3.141592653f #define fi_r 1.3291395f #define psi_r 1.801785f #define fi_l 1.0856f #define psi_l -1.339725f #define rad_mon 2.0f #define teta0_D 0.2f //rad #define teta0_T 0.0f #define bit char __constant__ float Ax_1[13] = {-0.165214628f, 0.0561592989f, 0.264667839f, 0.412544012f, 0.465911359f, 0.412544012f, 0.264667839f, 0.0561594106f, -0.165214419f, -0.348739684f, -0.452372819f, -0.452372819f, -0.348739684f}; __constant__ float Ax_2[13] = {1.76747036f, 1.87652779f, 1.5556947f, 0.878470898f, 0.0f, -0.878470898f, -1.5556947f, -1.87652767f, -1.76747072f, -1.25350749f, -0.452380866f, 0.452380747f, 1.25350738f}; __constant__ float Ax_3[13] = {0.162366703f, -0.0551912338f, -0.26010555f, -0.405432671f, -0.45788008f, -0.405432671f, -0.26010555f, -0.0551913455f, 0.162366495f, 0.342728168f, 0.444574922f, 0.444574922f, 0.342728198f}; __constant__ float A_Bx_4[13] = {0.0f, 0.46472317f, 0.822983861f, 0.992708862f, 0.935016215f, 0.663122654f, 0.239315659f, -0.239315659f, -0.663122654f, -0.935016215f, -0.992708862f, -0.822983861f, -0.46472317f}; __constant__ float Ay_1[13] = {0.435634613f, 0.462514341f, 0.383437514f, 0.216519803f, 0.0f, -0.216519803f, -0.383437514f, -0.462514341f, -0.435634702f, -0.308956355f, -0.111499891f, 0.111499861f, 0.308956355f}; __constant__ float Ay_2[13] = {-0.428125232f, -0.454541624f, -0.376827925f, -0.212787479f, -0.0f, 0.212787479f, 0.376827925f, 0.454541624f, 0.428125322f, 0.30363065f, 0.109577879f, -0.10957785f, -0.30363062f}; __constant__ float Ay_3[13] = {-0.670314014f, 0.227851257f, 1.07381856f, 1.67378652f, 1.89031017f, 1.67378652f, 1.07381856f, 0.227851719f, -0.67031312f, -1.41491747f, -1.83538115f, -1.83538127f, -1.41491759f}; __constant__ float A_By_4[13] = {1.0f, 0.885456026f, 0.568064749f, 0.120536678f, -0.3546049f, -0.748510778f, -0.970941842f, -0.970941842f, -0.748510778f, -0.3546049f, 0.120536678f, 0.568064749f, 0.885456026f}; __constant__ float Az_1 = 0.465911359f; __constant__ float Az_2 = -0.45788008f; __constant__ float Bx_1[13] = {0.321971923f, -0.109443799f, -0.515787303f, -0.80396992f, -0.907972693f, -0.80396992f, -0.515787303f, -0.109444022f, 0.321971506f, 0.679627359f, 0.881588638f, 0.881588697f, 0.679627359f}; __constant__ float Bx_2[13] = {-1.61023343f, -1.70958889f, -1.41729772f, -0.800320745f, -0.0f, 0.800320745f, 1.41729772f, 1.70958877f, 1.61023378f, 1.14199352f, 0.412136346f, -0.412136227f, -1.1419934f}; __constant__ float Bx_3[13] = {-0.16242376f, 0.0552106313f, 0.260196954f, 0.405575156f, 0.458040982f, 0.405575156f, 0.260196954f, 0.0552107431f, -0.162423551f, -0.342848599f, -0.444731146f, -0.444731146f, -0.342848629f}; __constant__ float By_1[13] = {-0.848969102f, -0.901352584f, -0.747246861f, -0.421955943f, -0.0f, 0.421955943f, 0.747246861f, 0.901352525f, 0.848969221f, 0.602097273f, 0.2172921f, -0.217292041f, -0.602097213f}; __constant__ float By_2[13] = {0.428275675f, 0.454701364f, 0.376960337f, 0.212862253f, 0.0f, -0.212862253f, -0.376960337f, -0.454701334f, -0.428275764f, -0.303737342f, -0.109616384f, 0.109616362f, 0.303737313f}; __constant__ float By_3[13] = {0.610681832f, -0.207581252f, -0.978290021f, -1.52488387f, -1.7221452f, -1.52488387f, -0.978290021f, -0.207581669f, 0.610681057f, 1.28904426f, 1.67210281f, 1.67210281f, 1.28904426f}; __constant__ float Bz_1 = -0.907972693f; __constant__ float Bz_2 = 0.458040982f; __device__ __forceinline__ void calc_grad_c( int i1, // i index правой молекулы int j1, // j index правой молекулы int i2, // i index левой молекулы bit type, // dimer type: 0 - 'D', 1 - 'T' bit pos, // monomer position in dimer: 0 - bottom, 1 - top float x_1, // правая молекула mol1 float y_1, float teta_1, float x_2, // левая молекула mol2 float y_2, float teta_2, float x_3, // верхняя молекула mol3 float y_3, float teta_3, float *grad_lat_x_1, // left component of mol1 float *grad_lat_y_1, float *grad_lat_teta_1, float *grad_lat_x_2, // right component of mol2 float *grad_lat_y_2, float *grad_lat_teta_2, float *grad_long_x_1, // up component of mol1 float *grad_long_y_1, float *grad_long_teta_1, float *grad_long_x_3, // down component of mol3 float *grad_long_y_3, float *grad_long_teta_3 ) { // теперь PE_left - это индекс i2, а PF_right - индекс i1 float cos_t_A = cosf(teta_2); float sin_t_A = sinf(teta_2); float cos_t_B = cosf(teta_1); float sin_t_B = sinf(teta_1); float cos_t_1 = cos_t_B; float sin_t_1 = sin_t_B; float cos_t_3 = cosf(teta_3); float sin_t_3 = sinf(teta_3); // swap i1 <=> i2 float Ax_left = Ax_1[i2]*cos_t_A + Ax_3[i2]*sin_t_A - Ax_2[i2] + (x_2 + R_MT) * A_Bx_4[i2]; float Ay_left = Ay_1[i2]*cos_t_A + Ay_2[i2]*sin_t_A + Ay_3[i2] + (x_2 + R_MT) * A_By_4[i2]; float Az_left = -Az_1*sin_t_A + Az_2*cos_t_A + y_2; float Bx_right = Bx_1[i1]*cos_t_B + Bx_3[i1]*sin_t_B - Bx_2[i1] + (x_1 + R_MT) * A_Bx_4[i1]; float By_right = By_1[i1]*cos_t_B + By_2[i1]*sin_t_B + By_3[i1] + (x_1 + R_MT) * A_By_4[i1]; float Bz_right = -Bz_1*sin_t_B + Bz_2*cos_t_B + y_1; float Dx = Ax_left - Bx_right; float Dy = Ay_left - By_right; float Dz = Az_left - Bz_right; float dist = sqrtf(( pow(Dx, 2) + pow(Dy, 2) + pow(Dz, 2) )); if (dist <=1e-7 ){ dist = 1e-5; } float inv_dist = 1/dist; float drdAx = Dx * inv_dist; float drdAy = Dy * inv_dist; float drdAz = Dz * inv_dist; float drdBx = -drdAx; float drdBy = -drdAy; float drdBz = -drdAz; float dA_X_dteta = -sin_t_A*Ax_1[i2] + cos_t_A*Ax_3[i2]; float dA_Y_dteta = -sin_t_A*Ay_1[i2] + cos_t_A*Ay_2[i2]; float dA_Z_dteta = -cos_t_A*Az_1 - sin_t_A*Az_2; float drdx_A = drdAx*A_Bx_4[i2] + drdAy*A_By_4[i2]; float drdy_A = drdAz; float drdteta_A = drdAx*dA_X_dteta + drdAy*dA_Y_dteta + drdAz*dA_Z_dteta; //================================================ float dB_X_dteta = -sin_t_B*Bx_1[i1] + cos_t_B*Bx_3[i1]; float dB_Y_dteta = -sin_t_B*By_1[i1] + cos_t_B*By_2[i1]; float dB_Z_dteta = -cos_t_B*Bz_1 - sin_t_B*Bz_2; float drdx_B = drdBx*A_Bx_4[i1] + drdBy*A_By_4[i1]; float drdy_B = drdBz; float drdteta_B = drdBx*dB_X_dteta + drdBy*dB_Y_dteta + drdBz*dB_Z_dteta; float Grad_U_tmp = (b_lat* dist *expf(-dist*inv_ro0)*(2.0f - dist*inv_ro0) + dist* clat_dlat_ro0 * expf( - (dist*dist) * d_lat_ro0 ) ) * A_Koeff; if ((i1==12)&&(j1>=(N_d-3))) { *grad_lat_x_2 = 0.0f; *grad_lat_y_2 = 0.0f; *grad_lat_teta_2 = 0.0f; *grad_lat_x_1 = 0.0f; *grad_lat_y_1 = 0.0f; *grad_lat_teta_1 = 0.0f; } else { *grad_lat_x_2 = Grad_U_tmp * drdx_A; *grad_lat_y_2 = Grad_U_tmp * drdy_A; *grad_lat_teta_2 = Grad_U_tmp * drdteta_A; *grad_lat_x_1 = Grad_U_tmp * drdx_B; *grad_lat_y_1 = Grad_U_tmp * drdy_B; *grad_lat_teta_1 = Grad_U_tmp * drdteta_B; } // [nd] - mol3 // [nd-1] - mol1 // longitudinal gradient float r_long_x = (x_3 - x_1) - Rad*(sin_t_1 + sin_t_3); float r_long_y = (y_3 - y_1) - Rad*(cos_t_1 + cos_t_3); float r_long = sqrtf( r_long_x*r_long_x + r_long_y*r_long_y); if (r_long <=1e-15 ){ r_long = 1e-7; } float drdx_long = - r_long_x/r_long; float drdy_long = - r_long_y/r_long; float dUdr_C; if (pos==0) { // bottom monomer (interaction inside dimer) dUdr_C = C_Koeff*r_long; } else { // top monomer (interaction with upper dimer) float tmp1 = r_long * expf(-r_long*inv_ro0_long)*(2 - r_long*inv_ro0_long); float tmp2 = r_long * clong_dlong_ro0 * expf(-(r_long*r_long) * d_long_ro0 ); if (type==0) // dimer type 'D' dUdr_C = (tmp1*b_long_D + tmp2) * A_long_D; else // dimer type 'T' dUdr_C = (tmp1*b_long_T + tmp2) * A_long_T; } float Grad_tmp_x = drdx_long * dUdr_C; float Grad_tmp_y = drdy_long * dUdr_C; float GradU_C_teta_1 = -dUdr_C*( drdx_long*(-Rad*cos_t_1) + drdy_long*(Rad*sin_t_1)); float GradU_C_teta_3 = dUdr_C*(-drdx_long*(-Rad*cos_t_3) - drdy_long*(Rad*sin_t_3)); float Grad_tmp; if (type==0) // dimer type 'D' Grad_tmp = B_Koeff*(teta_3 - teta_1 - teta0_D); else // dimer type 'T' Grad_tmp = B_Koeff*(teta_3 - teta_1 - teta0_T); // поменял тут знак - все заработало! float GradU_B_teta_1 = - Grad_tmp; float GradU_B_teta_3 = + Grad_tmp; if (j1 == (N_d-1)) { *grad_long_x_1 = 0.0f; *grad_long_y_1 = 0.0f; *grad_long_teta_1 = 0.0f; *grad_long_x_3 = 0.0f; *grad_long_y_3 = 0.0f; *grad_long_teta_3 = 0.0f; } else { *grad_long_x_1 = Grad_tmp_x; *grad_long_y_1 = Grad_tmp_y; *grad_long_teta_1 = GradU_C_teta_1 + GradU_B_teta_1; *grad_long_x_3 = - Grad_tmp_x; *grad_long_y_3 = - Grad_tmp_y; *grad_long_teta_3 = GradU_C_teta_3 + GradU_B_teta_3; } } __global__ void mt_cuda_kernel1( const int niters, float* x_inout, float* y_inout, float* t_inout ) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int s; if(i==0 && j==0){ printf("niters %d\n", niters); } __shared__ float __x[13][N_d+3]; __shared__ float __y[13][N_d+3]; __shared__ float __t[13][N_d+3]; __shared__ float4 __lat_r[13][N_d+3]; __shared__ float4 __long_d[13][N_d+1]; //__lat_r[i][j]=0; //__long_d[i][j]=0; __x[i][j]=x_inout[i*(N_d+3) + j]; __y[i][j]=y_inout[i*(N_d+3) + j]; __t[i][j]=t_inout[i*(N_d+3) + j]; float f_x, f_y, f_t; bit pos = 0; bit type = 0; float x_ij, y_ij, t_ij, x_i2j2,y_i2j2,t_i2j2,x_ij_1, y_ij_1, t_ij_1; float lat_l_x, lat_l_y, lat_l_t, lat_r_x, lat_r_y, lat_r_t, long_u_x, long_u_y, long_u_t, long_d_x, long_d_y, long_d_t; int i2 = (i==12)? 0 : (i+1); int j2 = (i==12)? (j+3) : j; pos =(j % 2); __syncthreads(); //#pragma unroll 2 for (s=0;s<niters;s++){ x_ij= __x[i][j]; y_ij=__y[i][j]; t_ij=__t[i][j]; x_i2j2=__x[i2][j2]; y_i2j2=__y[i2][j2]; t_i2j2=__t[i2][j2]; x_ij_1=__x[i][j+1]; y_ij_1=__y[i][j+1], t_ij_1=__t[i][j+1]; calc_grad_c(i, j, i2, type, pos, x_ij, y_ij, t_ij, x_i2j2,y_i2j2,t_i2j2, x_ij_1, y_ij_1, t_ij_1, &lat_l_x,&lat_l_y,&lat_l_t, &lat_r_x,&lat_r_y,&lat_r_t, &long_u_x, &long_u_y, &long_u_t, &long_d_x, &long_d_y, &long_d_t); __lat_r[i2][j2].x=lat_r_x; __lat_r[i2][j2].y=lat_r_y; __lat_r[i2][j2].z=lat_r_t; __long_d[i][j+1].x=long_d_x;__long_d[i][j+1].y=long_d_y;__long_d[i][j+1].z=long_d_t; __syncthreads(); if (j!=0){ f_x = lat_l_x + __lat_r[i][j].x + long_u_x + __long_d[i][j].x; f_y = lat_l_y + __lat_r[i][j].y + long_u_y + __long_d[i][j].y; f_t = lat_l_t + __lat_r[i][j].z + long_u_t + __long_d[i][j].z; x_ij -= dt_viscPF * f_x; y_ij -= dt_viscPF * f_y; t_ij -= dt_viscPF_teta * f_t; __x[i][j]=x_ij; __y[i][j]=y_ij; __t[i][j]=t_ij; } // if j __syncthreads(); } // for s if (j!=0){ x_inout[i*(N_d+3) + j] = __x[i][j]; y_inout[i*(N_d+3) + j] = __y[i][j]; t_inout[i*(N_d+3) + j] = __t[i][j]; } } /* __global__ void mt_cuda_kernel_root( const int niters, float* x_inout, float* y_inout, float* t_inout ) { dim3 block_size; block_size.x=13; block_size.y=N_d; dim3 grid_size; grid_size.x = 1; grid_size.y = 1; boom::mt_cuda_kernel1<<<grid_size, block_size>>>(niters, x_inout, y_inout, t_inout); } */ } // ns boom void init_coords(float x[][N_d], float y[][N_d], float t[][N_d]); void rnd_coords(float x[][N_d], float y[][N_d], float t[][N_d]); void cpy_coords2(float x[][N_d], float y[][N_d], float t[][N_d],float x2[][N_d+3], float y2[][N_d+3], float t2[][N_d+3]); bool compare2(const float refData[][N_d], const float data[][N_d+3],float* err, float* delta, const float epsilon); float x_1[13][N_d]; float y_1[13][N_d]; float t_1[13][N_d]; float x_3[13][N_d+3]; float y_3[13][N_d+3]; float t_3[13][N_d+3]; int main(void) { cudaError_t err = cudaSuccess; CPerfCounter t_cu; int use_cmp=0; float err1=0.0f; float delta1=0.0f; float epsilon = 1e-2f; int niters=100000; size_t size = 13*(N_d+3)*sizeof(float); /* for (int i = 0; i < 13; ++i) for (int j = 0; j < N_d; ++j) { x_3[i][j] = 7;//rand()/(float)RAND_MAX; y_3[i][j] = 9;//rand()/(float)RAND_MAX; t_3[i][j] = 13;//rand()/(float)RAND_MAX; } */ init_coords(x_1,y_1,t_1); rnd_coords(x_1,y_1,t_1); cpy_coords2(x_1,y_1,t_1,x_3,y_3,t_3); // Allocate the device vector float *d_x = NULL; err = cudaMalloc((void **)&d_x, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector x (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device vector float *d_y = NULL; err = cudaMalloc((void **)&d_y, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector y (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device vector float *d_t = NULL; err = cudaMalloc((void **)&d_t, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector t (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } t_cu.Reset();t_cu.Start(); // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_x, x_3, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector x from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_y, y_3, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector y from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_t, t_3, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector t from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Kernel dim3 block_size; block_size.x=13; block_size.y=N_d; dim3 grid_size; grid_size.x = 1;//num_elements_x / block_size.x; grid_size.y = 1;//num_elements_y / block_size.y; printf("CUDA kernel launch with %d,%d %d,%d\n", block_size.x, block_size.y, grid_size.x,grid_size.y); boom::mt_cuda_kernel1<<<grid_size, block_size>>>(niters, d_x, d_y, d_t); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. // printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(x_3, d_x, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector x from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(y_3, d_y, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector y from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(t_3, d_t, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector t from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } t_cu.Stop(); printf("niters %d t_cu %lf\n",niters,t_cu.GetElapsedTime()); // printf("%f,%f,%f\n",x_3[0][1],y_3[0][1],t_3[0][1]); if(use_cmp){ mt_cpu(niters,1,x_1,y_1,t_1,x_1,y_1,t_1); if(!compare2(x_1, x_3, &err1, &delta1, epsilon) || !compare2(y_1, y_3, &err1, &delta1, epsilon) || !compare2(t_1, t_3, &err1, &delta1, epsilon)) { printf("Compare cu results failed (%f,%f)\n",err1,delta1); } else printf("Test OK(%f,%f)\n",err1,delta1); } // Free device global memory err = cudaFree(d_x); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_y); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_t); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory //free(h_A); //free(h_B); //free(h_C); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; } void init_coords(float x[][N_d], float y[][N_d], float t[][N_d]) { int i,j; // задание y координаты для нижней спирали for (i=0; i<13; i++) y[i][0] = 2.0f*6/13*(i+1); // задание y координат для остальных молекул до половины высоты трубочки for (j=1; j<N_d-4; j++) for (i=0; i<13; i++) y[i][j] = y[i][j-1] + 2.0f*Rad; // задание x и teta координат так чтобы был цилиндр до половины высоты трубочки for (j=0; j<N_d-5; j++) for (i=0; i<13; i++) { x[i][j] = 0.0; t[i][j] = 0.0; } // for (i=0; i<13; i++) { x[i][N_d-5] = 0.6; t[i][N_d-5] = 0.2; } for (j=N_d-4; j<N_d; j++) for (i=0; i<13; i++) { x[i][j] = x[i][j-1] + 2*Rad*sinf(t[i][j-1]); y[i][j] = y[i][j-1] + 2*Rad*cosf(t[i][j-1]); t[i][j] = t[i][j-1]; } } void rnd_coords(float x[][N_d], float y[][N_d], float t[][N_d]) { int i,j; srand (time(NULL)); for (i=0; i<13; i++){ for (j=0; j<N_d; j++){ x[i][j]+= (rand()%100)*1e-5; y[i][j]-= (rand()%100)*1e-5; t[i][j]+= (rand()%100)*1e-5; } } } void cpy_coords2(float x[][N_d], float y[][N_d], float t[][N_d],float x2[][N_d+3], float y2[][N_d+3], float t2[][N_d+3]) { int i,j; for (i=0; i<13; i++){ for (j=0; j<N_d; j++){ x2[i][j]= x[i][j]; y2[i][j]= y[i][j]; t2[i][j]= t[i][j]; } } } bool compare2(const float refData[][N_d], const float data[][N_d+3], float* err, float* delta, const float epsilon = 1e-6f) { float error = 0.0f; float ref = 0.0f; for(int i = 0; i<13; i++) for(int j = 0; j<N_d; j++) { float diff = refData[i][j] - data[i][j]; if(fabs(diff)>*delta){*delta=fabs(diff);} error += diff * diff; ref += refData[i][j] * refData[i][j]; } float normRef =::sqrtf((float) ref); if (::fabs((float) ref) < 1e-7f) { return false; } float normError = ::sqrtf((float) error); error = normError / normRef; if(error>*err)*err=error; return error < epsilon; }
6661c069ef1041dbc29a8dfd902705e64e5629c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory Iowa State University and The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: joaander #include "Enforce2DUpdaterGPU_hip.cuh" #ifdef WIN32 #include <cassert> #else #include <assert.h> #endif #include <stdio.h> /*! \file Enforce2DUpdaterGPU.cu \brief Defines GPU kernel code for constraining systems to a 2D plane on the GPU. Used by Enforce2DUpdaterGPU. */ //! Constrains partcles to the xy plane on the GPU /*! \param N number of particles in system \param d_vel Particle velocities to constrain to xy plane \param d_accel Particle accelerations to constrain to xy plane */ extern "C" __global__ void gpu_enforce2d_kernel(const unsigned int N, Scalar4 *d_vel, Scalar3 *d_accel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { // read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes) Scalar4 vel = d_vel[idx]; Scalar3 accel = d_accel[idx]; // zero the z-velocity and z-acceleration(FLOPS: ?) vel.z = Scalar(0.0); accel.z = Scalar(0.0); // write out the results (MEM_TRANSFER: 32 bytes) d_vel[idx] = vel; d_accel[idx] = accel; } } /*! \param N number of particles in system \param d_vel Particle velocities to constrain to xy plane \param d_accel Particle accelerations to constrain to xy plane */ hipError_t gpu_enforce2d(const unsigned int N, Scalar4 *d_vel, Scalar3 *d_accel) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (N/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_enforce2d_kernel), dim3(grid), dim3(threads) , 0, 0, N, d_vel, d_accel); return hipSuccess; }
6661c069ef1041dbc29a8dfd902705e64e5629c6.cu
/* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory Iowa State University and The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: joaander #include "Enforce2DUpdaterGPU.cuh" #ifdef WIN32 #include <cassert> #else #include <assert.h> #endif #include <stdio.h> /*! \file Enforce2DUpdaterGPU.cu \brief Defines GPU kernel code for constraining systems to a 2D plane on the GPU. Used by Enforce2DUpdaterGPU. */ //! Constrains partcles to the xy plane on the GPU /*! \param N number of particles in system \param d_vel Particle velocities to constrain to xy plane \param d_accel Particle accelerations to constrain to xy plane */ extern "C" __global__ void gpu_enforce2d_kernel(const unsigned int N, Scalar4 *d_vel, Scalar3 *d_accel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { // read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes) Scalar4 vel = d_vel[idx]; Scalar3 accel = d_accel[idx]; // zero the z-velocity and z-acceleration(FLOPS: ?) vel.z = Scalar(0.0); accel.z = Scalar(0.0); // write out the results (MEM_TRANSFER: 32 bytes) d_vel[idx] = vel; d_accel[idx] = accel; } } /*! \param N number of particles in system \param d_vel Particle velocities to constrain to xy plane \param d_accel Particle accelerations to constrain to xy plane */ cudaError_t gpu_enforce2d(const unsigned int N, Scalar4 *d_vel, Scalar3 *d_accel) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (N/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_enforce2d_kernel<<< grid, threads >>>(N, d_vel, d_accel); return cudaSuccess; }
b444dd1d89493de1abc588b7bf5fb545b1178952.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaTools.h" #include "Indice2D.h" #include "ColorToolCuda.h" #include "CalibreurCudas.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void kernelFillImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit); __global__ void kernelInitImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit); __global__ void kernelRenderImageHeatTransfert(uchar4* ptrDevImageGL, int w, int h, CalibreurCudas calibreur, float* ptrDevImageToRender); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ static void setPixel(uchar4& pixel, float valueOriginal); __device__ static void ecrasement(float* ptrDevImageHeater, float* ptrDevImage, int tid); __device__ static void diffusion(float* ptrDevImageDiffusion, float* ptrDevImageResult, int w, int h, int tid); __device__ static void D(float* ptrDevImageDiffusion, float* ptrDevImageResult, int w, int h, int NB_THREAD, int n); __device__ static void E(float* ptrDevImageHeater, float* ptrDevImage, int w, int h, int NB_THREAD, int n); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ void launchKernelFillImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit) { dim3 dg = dim3(8, 8); dim3 db = dim3(16, 16, 1); hipLaunchKernelGGL(( kernelFillImageHeatTransfert), dim3(dg),dim3(db), 0, 0, w, h, ptrDevImageA, ptrDevImageB, ptrDevImageHeaters, ptrDevImageInit); int a = 0; } void launchKernelInitImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit) { dim3 dg = dim3(8, 8); dim3 db = dim3(16, 16, 1); hipLaunchKernelGGL(( kernelInitImageHeatTransfert), dim3(dg),dim3(db), 0, 0, w, h, ptrDevImageA, ptrDevImageB, ptrDevImageHeaters, ptrDevImageInit); int a = 0; } void launchKernelRenderImageHeatTransfert(uchar4* ptrDevImageGL, int w, int h, CalibreurCudas calibreur, float* ptrDevImageToRender) { dim3 dg = dim3(8, 8); dim3 db = dim3(16, 16, 1); hipLaunchKernelGGL(( kernelRenderImageHeatTransfert), dim3(dg),dim3(db), 0, 0, ptrDevImageGL, w, h, calibreur, ptrDevImageToRender); int a = 0; } __global__ void kernelFillImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit) { const int n = w * h; const int NB_THREAD = Indice2D::nbThread(); int s = Indice2D::tid(); //ImageB = Diffusion(ImageA) D(ptrDevImageA, ptrDevImageB, w, h, NB_THREAD, n); //ImageB = Ecrasement(ImageHeaters, ImageB) E(ptrDevImageHeaters, ptrDevImageB, w, h, NB_THREAD, n); //ImageA = Diffusion(ImageB) D(ptrDevImageB, ptrDevImageA, w, h, NB_THREAD, n); //ImageA = Ecrasement(ImageHeaters, ImageA) E(ptrDevImageHeaters, ptrDevImageA, w, h, NB_THREAD, n); } __global__ void kernelInitImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit) { const int n = w * h; const int NB_THREAD = Indice2D::nbThread(); int s = Indice2D::tid(); //ImageInit = Ecrasement(ImageHeaters, ImageInit) E(ptrDevImageHeaters, ptrDevImageInit, w, h, NB_THREAD, n); //ImageA = Diffusion(ImageInit) D(ptrDevImageInit, ptrDevImageA, w, h, NB_THREAD, n); //ImageA = Ecrasement(ImageHeaters, ImageA) E(ptrDevImageHeaters, ptrDevImageA, w, h, NB_THREAD, n); } __global__ void kernelRenderImageHeatTransfert(uchar4* ptrDevImageGL, int w, int h, CalibreurCudas calibreur, float* ptrDevImageToRender) { const int n = w * h; const int NB_THREAD = Indice2D::nbThread(); int s = Indice2D::tid(); //Render ImageToRender while (s < n) { float hue = calibreur.calibrate(ptrDevImageToRender[s]); setPixel(ptrDevImageGL[s], hue); s += NB_THREAD; } __syncthreads(); } __device__ static void setPixel(uchar4& pixelIJ, float hue) { float h = hue; float s = 1.0; float b = 1.0; ColorToolCuda::HSB_TO_RVB(h, s, b, pixelIJ.x, pixelIJ.y, pixelIJ.z); pixelIJ.w = 255; } __device__ static void ecrasement(float* ptrDevImageHeater, float* ptrDevImage, int tid) { float valueHeater = ptrDevImageHeater[tid]; if (valueHeater != 0.0) { ptrDevImage[tid] = valueHeater; } } __device__ static void diffusion(float* ptrDevImageDiffusion, float* ptrDevImageResult, int w, int h, int tid) { int size = w * h; float k = 0.25; int i; int j; float newValue = 0.0; float oldValue = ptrDevImageResult[tid]; if (tid < size) { Indice2D::pixelIJ(tid, w, i, j); if ((i >= 3 && i < w-3) && (j >= 3 && j < h-3)) { float westValue = ptrDevImageDiffusion[tid - 1]; float eastValue = ptrDevImageDiffusion[tid + 1]; float northValue = ptrDevImageDiffusion[tid - w]; float southValue = ptrDevImageDiffusion[tid + w]; newValue = oldValue + k * (westValue + eastValue + northValue + southValue - (4 * oldValue)); } else { newValue = oldValue; } ptrDevImageResult[tid] = newValue; } } __device__ static void D(float* ptrDevImageDiffusion, float* ptrDevImageResult, int w, int h, int NB_THREAD, int n) { int s = Indice2D::tid(); //ptrDevImageResult = Diffusion(ptrDevImageDiffusion) while (s < n) { diffusion(ptrDevImageDiffusion, ptrDevImageResult, w, h, s); s += NB_THREAD; } __syncthreads(); } __device__ static void E(float* ptrDevImageHeaters, float* ptrDevImage, int w, int h, int NB_THREAD, int n) { int s = Indice2D::tid(); //ptrDevImage = Ecrasement(ptrDevImageHeaters, ptrDevImage) while (s < n) { ecrasement(ptrDevImageHeaters, ptrDevImage, s); s += NB_THREAD; } __syncthreads(); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
b444dd1d89493de1abc588b7bf5fb545b1178952.cu
#include "cudaTools.h" #include "Indice2D.h" #include "ColorToolCuda.h" #include "CalibreurCudas.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void kernelFillImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit); __global__ void kernelInitImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit); __global__ void kernelRenderImageHeatTransfert(uchar4* ptrDevImageGL, int w, int h, CalibreurCudas calibreur, float* ptrDevImageToRender); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ static void setPixel(uchar4& pixel, float valueOriginal); __device__ static void ecrasement(float* ptrDevImageHeater, float* ptrDevImage, int tid); __device__ static void diffusion(float* ptrDevImageDiffusion, float* ptrDevImageResult, int w, int h, int tid); __device__ static void D(float* ptrDevImageDiffusion, float* ptrDevImageResult, int w, int h, int NB_THREAD, int n); __device__ static void E(float* ptrDevImageHeater, float* ptrDevImage, int w, int h, int NB_THREAD, int n); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ void launchKernelFillImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit) { dim3 dg = dim3(8, 8); dim3 db = dim3(16, 16, 1); kernelFillImageHeatTransfert<<<dg,db>>>(w, h, ptrDevImageA, ptrDevImageB, ptrDevImageHeaters, ptrDevImageInit); int a = 0; } void launchKernelInitImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit) { dim3 dg = dim3(8, 8); dim3 db = dim3(16, 16, 1); kernelInitImageHeatTransfert<<<dg,db>>>(w, h, ptrDevImageA, ptrDevImageB, ptrDevImageHeaters, ptrDevImageInit); int a = 0; } void launchKernelRenderImageHeatTransfert(uchar4* ptrDevImageGL, int w, int h, CalibreurCudas calibreur, float* ptrDevImageToRender) { dim3 dg = dim3(8, 8); dim3 db = dim3(16, 16, 1); kernelRenderImageHeatTransfert<<<dg,db>>>(ptrDevImageGL, w, h, calibreur, ptrDevImageToRender); int a = 0; } __global__ void kernelFillImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit) { const int n = w * h; const int NB_THREAD = Indice2D::nbThread(); int s = Indice2D::tid(); //ImageB = Diffusion(ImageA) D(ptrDevImageA, ptrDevImageB, w, h, NB_THREAD, n); //ImageB = Ecrasement(ImageHeaters, ImageB) E(ptrDevImageHeaters, ptrDevImageB, w, h, NB_THREAD, n); //ImageA = Diffusion(ImageB) D(ptrDevImageB, ptrDevImageA, w, h, NB_THREAD, n); //ImageA = Ecrasement(ImageHeaters, ImageA) E(ptrDevImageHeaters, ptrDevImageA, w, h, NB_THREAD, n); } __global__ void kernelInitImageHeatTransfert(int w, int h, float* ptrDevImageA, float* ptrDevImageB, float* ptrDevImageHeaters, float* ptrDevImageInit) { const int n = w * h; const int NB_THREAD = Indice2D::nbThread(); int s = Indice2D::tid(); //ImageInit = Ecrasement(ImageHeaters, ImageInit) E(ptrDevImageHeaters, ptrDevImageInit, w, h, NB_THREAD, n); //ImageA = Diffusion(ImageInit) D(ptrDevImageInit, ptrDevImageA, w, h, NB_THREAD, n); //ImageA = Ecrasement(ImageHeaters, ImageA) E(ptrDevImageHeaters, ptrDevImageA, w, h, NB_THREAD, n); } __global__ void kernelRenderImageHeatTransfert(uchar4* ptrDevImageGL, int w, int h, CalibreurCudas calibreur, float* ptrDevImageToRender) { const int n = w * h; const int NB_THREAD = Indice2D::nbThread(); int s = Indice2D::tid(); //Render ImageToRender while (s < n) { float hue = calibreur.calibrate(ptrDevImageToRender[s]); setPixel(ptrDevImageGL[s], hue); s += NB_THREAD; } __syncthreads(); } __device__ static void setPixel(uchar4& pixelIJ, float hue) { float h = hue; float s = 1.0; float b = 1.0; ColorToolCuda::HSB_TO_RVB(h, s, b, pixelIJ.x, pixelIJ.y, pixelIJ.z); pixelIJ.w = 255; } __device__ static void ecrasement(float* ptrDevImageHeater, float* ptrDevImage, int tid) { float valueHeater = ptrDevImageHeater[tid]; if (valueHeater != 0.0) { ptrDevImage[tid] = valueHeater; } } __device__ static void diffusion(float* ptrDevImageDiffusion, float* ptrDevImageResult, int w, int h, int tid) { int size = w * h; float k = 0.25; int i; int j; float newValue = 0.0; float oldValue = ptrDevImageResult[tid]; if (tid < size) { Indice2D::pixelIJ(tid, w, i, j); if ((i >= 3 && i < w-3) && (j >= 3 && j < h-3)) { float westValue = ptrDevImageDiffusion[tid - 1]; float eastValue = ptrDevImageDiffusion[tid + 1]; float northValue = ptrDevImageDiffusion[tid - w]; float southValue = ptrDevImageDiffusion[tid + w]; newValue = oldValue + k * (westValue + eastValue + northValue + southValue - (4 * oldValue)); } else { newValue = oldValue; } ptrDevImageResult[tid] = newValue; } } __device__ static void D(float* ptrDevImageDiffusion, float* ptrDevImageResult, int w, int h, int NB_THREAD, int n) { int s = Indice2D::tid(); //ptrDevImageResult = Diffusion(ptrDevImageDiffusion) while (s < n) { diffusion(ptrDevImageDiffusion, ptrDevImageResult, w, h, s); s += NB_THREAD; } __syncthreads(); } __device__ static void E(float* ptrDevImageHeaters, float* ptrDevImage, int w, int h, int NB_THREAD, int n) { int s = Indice2D::tid(); //ptrDevImage = Ecrasement(ptrDevImageHeaters, ptrDevImage) while (s < n) { ecrasement(ptrDevImageHeaters, ptrDevImage, s); s += NB_THREAD; } __syncthreads(); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
a20206b62a806ed4add5dc23876914042899c5e5.hip
// !!! This is a file automatically generated by hipify!!! #include "header/cuda_usage.h" #include "header/chain.h" #include "header/iterator.h" #include "header/printing.h" #include "header/cluster_probability.h" #include "header/multinomial.h" #include "header/distribution.h" #include "header/beta_hat.h" #include "header/wrap_R.h" #include "header/summary2.h" #include "header/cholesky.h" #include "header/construct_prec.h" #include "header/multi_dot_product.h" #include "header/gibbs.h" #include "header/running_mean.h" #include <R.h> #include <Rinternals.h> #include <Rmath.h> // This prevents the replacement of "beta" by Rmath.h #ifdef beta #undef beta #endif #include <hip/hip_runtime.h> extern "C" SEXP Rdata_init(SEXP ytyR, SEXP xtyR, SEXP xtxR, SEXP G, SEXP V, SEXP N){ int g = INTEGER(G)[0], v = INTEGER(V)[0], n = INTEGER(N)[0]; fvec_h xtx(v*v, 1.0); double *ytyp = REAL(ytyR); double *xtyp = REAL(xtyR); double *xtxp = &(xtx[0]); data_t data(ytyp, xtyp, xtxp, g, v, n, false); printVec(data.xtx, v, v); printVec(data.xty, v, g); printVec(data.ytx, g, v); SEXP zero = PROTECT(allocVector(INTSXP, 1)); INTEGER(zero)[0] = 0; UNPROTECT(1); return zero; } extern "C" SEXP Rcluster_weights(SEXP Rdata, SEXP Rchain, SEXP Rpriors, SEXP Rverbose){ int verbose = INTEGER(Rverbose)[0]; data_t data = Rdata_wrap(Rdata, verbose); chain_t chain = Rchain_wrap(Rchain, verbose); priors_t priors = Rpriors_wrap(Rpriors, verbose); fvec_d grid(data.G*priors.K); cluster_weights_no_voom(grid, data, chain, 0); fvec_h grid_h(data.G*priors.K); thrust::copy(grid.begin(), grid.end(), grid_h.begin()); SEXP OUT = PROTECT(allocVector(REALSXP, data.G*priors.K)); for(int i=0; i<data.G*priors.K; ++i) REAL(OUT)[i] = grid_h[i]; UNPROTECT(1); return OUT; } extern "C" SEXP Rnormalize_wts(SEXP grid, SEXP dim1, SEXP dim2){ int k=INTEGER(dim1)[0], g = INTEGER(dim2)[0]; fvec_h grd_h(REAL(grid), REAL(grid) + k*g); fvec_d grd_d(k*g); thrust::copy(grd_h.begin(), grd_h.end(), grd_d.begin()); normalize_wts(grd_d, k, g); thrust::copy(grd_d.begin(), grd_d.end(), grd_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, g*k)); for(int i=0; i<k*g; ++i) REAL(out)[i] = grd_h[i]; UNPROTECT(1); return out; } extern "C" SEXP RgetUniform(SEXP Rseed, SEXP upperR){ int n = length(upperR), seed = INTEGER(Rseed)[0]; //instantiate RNGs hiprandState_t *devStates; CUDA_CALL(hipMalloc((void **) &devStates, n * sizeof(hiprandState_t))); //temporary memory fvec_h upper(REAL(upperR), REAL(upperR) + n); fvec_d upper_d(upper.begin(), upper.end()); double *upper_d_ptr = thrust::raw_pointer_cast(upper_d.data()); int block_size=512; int n_blocks = n/block_size + 1; //set up RNGs hipLaunchKernelGGL(( setup_kernel), dim3(n_blocks),dim3(block_size), 0, 0, seed, n, devStates); //sample from U(0, upper) hipLaunchKernelGGL(( getUniform), dim3(n_blocks),dim3(block_size), 0, 0, devStates, n, upper_d_ptr); thrust::copy(upper_d.begin(), upper_d.end(), upper.begin()); SEXP out = PROTECT(allocVector(REALSXP, n)); for(int i = 0; i < n; ++i) REAL(out)[i] = upper_d[i]; //clean up CUDA_CALL(hipFree(devStates)); UNPROTECT(1); return out; } extern "C" SEXP Rgnl_multinomial(SEXP Rseed, SEXP probs, SEXP K, SEXP G){ int k = INTEGER(K)[0], g = INTEGER(G)[0], seed = INTEGER(Rseed)[0]; //instantiate RNGs hiprandState_t *devStates; CUDA_CALL(hipMalloc((void **) &devStates, g * sizeof(hiprandState_t))); //temporary memory fvec_h probs_h(REAL(probs), REAL(probs) + g*k); fvec_d probs_d(probs_h.begin(), probs_h.end()); ivec_h zeta_h(g); ivec_d zeta_d(g); double *probs_d_ptr = thrust::raw_pointer_cast(probs_d.data()); //set up RNGs int block_size=512; int n_blocks = g/block_size + 1; hipLaunchKernelGGL(( setup_kernel), dim3(n_blocks),dim3(block_size), 0, 0, seed, g, devStates); //get multinomial draws gnl_multinomial(zeta_d, probs_d, devStates, k, g); thrust::copy(probs_d.begin(), probs_d.end(), probs_h.begin()); thrust::copy(zeta_d.begin(), zeta_d.end(), zeta_h.begin()); SEXP out = PROTECT(allocVector(VECSXP, 2)); SEXP out_z = PROTECT(allocVector(INTSXP, g)); SEXP out_p = PROTECT(allocVector(REALSXP, k*g)); for(int i = 0; i < g; ++i){ INTEGER(out_z)[i] = zeta_h[i]; for(int j=0; j < k; ++j){ REAL(out_p)[i*k + j] = probs_h[i*k + j]; } } SET_VECTOR_ELT(out, 0, out_z); SET_VECTOR_ELT(out, 1, out_p); //clean up CUDA_CALL(hipFree(devStates)); UNPROTECT(3); return out; } extern "C" SEXP Rbeta_hat(SEXP R_Lvec, SEXP R_xty, SEXP K, SEXP V){ int k=INTEGER(K)[0], v=INTEGER(V)[0]; fvec_h L_h(REAL(R_Lvec), REAL(R_Lvec)+k*v*v), b_h(REAL(R_xty), REAL(R_xty)+k*v); fvec_d L_d(k*v*v); fvec_d b_d(k*v); thrust::copy(L_h.begin(), L_h.end(), L_d.begin()); thrust::copy(b_h.begin(), b_h.end(), b_d.begin()); beta_hat(L_d, b_d, k, v); thrust::copy(b_d.begin(), b_d.end(), b_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, k*v)); for(int i=0; i<k*v; ++i) REAL(out)[i] = b_h[i]; UNPROTECT(1); return out; } extern "C" SEXP Rtest_data_wrap(SEXP Rdata, SEXP Rpriors, SEXP Rchain){ data_t data = Rdata_wrap(Rdata, 1); priors_t priors = Rpriors_wrap(Rpriors, 1); chain_t chain = Rchain_wrap(Rchain, 1); std::cout << "y transpose x\n"; printVec(data.ytx, data.G, data.V); std::cout << "prior location\n"; printVec(priors.mu0, priors.V, 1); std::cout << "Contrasts on location parameters"; printVec(chain.C, chain.P, chain.V); std::cout << "slice sampler width parameter: " << chain.slice_width << "\n"; SEXP out = PROTECT(allocVector(INTSXP, 1)); INTEGER(out)[0] = 0; UNPROTECT(1); return out; } //Temporarily disable while fixing draw_MVNormal /* extern"C" SEXP Rtest_MVNormal(SEXP Rseed, SEXP Rzeta, SEXP Rdata, SEXP Rpriors){ int seed = INTEGER(Rseed)[0]; data_t data = Rdata_wrap(Rdata); priors_t priors = Rpriors_wrap(Rpriors); ivec_h zeta_h(INTEGER(Rzeta), INTEGER(Rzeta) + data.G); ivec_d zeta_d(zeta_h.begin(),zeta_h.end()); summary2 smry(priors.K, zeta_d, data); smry.print_Mk(); smry.print_yty(); smry.print_xty(); //instantiate RNGs hiprandState_t *devStates; int V = data.V, K = data.K; CUDA_CALL(hipMalloc((void **) &devStates, V*K*sizeof(hiprandState_t))); int block_size=(512 / (32*V)) * (32*V); int n_blocks = (K*V)/block_size + 1; setup_kernel<<<n_blocks, block_size>>>(seed, priors.K*data.V, devStates); //make precision matrices fvec_d prec(smry.num_occupied * smry.V * smry.V, 0.0); fvec_d tau2(priors.K, 1.0); void construct_prec(fvec_d &prec, data_t &data, priors_t &priors, chain_t &chain, ivec_d &Mk); construct_prec(prec, data, priors, ) construct_prec(prec.begin(), prec.end(), priors.lambda2.begin(), priors.lambda2.end(), tau2.begin(), tau2.end(), smry.Mk.begin(), smry.Mk.end(), data.xtx.begin(), data.xtx.end(), smry.num_occupied, data.V); std::cout << "prec_matrices:\n"; printVec(prec, smry.V*smry.V, smry.num_occupied); //cholesky decomposition realIter b=prec.begin(), e = prec.end(); chol_multiple(b, e, data.V, smry.num_occupied); std::cout << "chol_matrices:\n"; printVec(prec, smry.V*smry.V, smry.num_occupied); std::cout << "smry.V= " << smry.V << "\n"; std::cout << "smry.num_occupied= " << smry.num_occupied << "\n"; //conditional means std::cout << "xty_sums:\n"; thrust::device_ptr<double> xty_ptr = &smry.xty_sums[0]; thrust::copy(xty_ptr, xty_ptr + smry.num_occupied * data.V, std::ostream_iterator<double>(std::cout, " ")); fvec_d bhat(smry.num_occupied * data.V); thrust::copy(xty_ptr, xty_ptr + smry.num_occupied * data.V, bhat.begin()); hipDeviceSynchronize(); std::cout << "container for beta_hat (initialized):\n"; printVec(bhat, data.V, smry.num_occupied); beta_hat(prec, bhat, smry.num_occupied, data.V); std::cout << "beta_hat:\n"; printVec(bhat, data.V, smry.num_occupied); //draw beta int beta_size = data.V*priors.K; fvec_h beta_h(beta_size, 0.0); fvec_d beta(beta_h.begin(), beta_h.end()); draw_MVNormal(devStates, bhat, prec, beta, priors, smry, 0); //print value std::cout << "beta_draws:\n"; printVec(beta, data.V, priors.K); thrust::copy(beta.begin(), beta.end(), beta_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, beta_size)); for(int i=0; i < beta_size; ++i){ REAL(out)[i] = beta_h[i]; } //clean up CUDA_CALL(hipFree(devStates)); UNPROTECT(1); return out; }*/ extern "C" SEXP Rmulti_dot_prod(SEXP Rx, SEXP Ry, SEXP Rdim, SEXP Rn){ int dim = INTEGER(Rdim)[0], n = INTEGER(Rn)[0]; fvec_h x_h(REAL(Rx), REAL(Rx) + dim*n); fvec_h y_h(REAL(Ry), REAL(Ry) + dim*n); fvec_d x_d(x_h.begin(), x_h.end()); fvec_d y_d(y_h.begin(), y_h.end()); fvec_d z_d(n); multi_dot_prod(x_d, y_d, z_d, dim, n); fvec_h z_h(n); thrust::copy(z_d.begin(), z_d.end(), z_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, n)); for(int i = 0; i < n; ++i){ REAL(out)[i] = z_h[i]; } UNPROTECT(1); return out; } extern "C" SEXP RsumSqErr(SEXP Rdata, SEXP Rzeta, SEXP K, SEXP Rbeta, SEXP verbose){ int k = INTEGER(K)[0], verb = INTEGER(verbose)[0]; data_t data = Rdata_wrap(Rdata); ivec_h zeta_h(INTEGER(Rzeta), INTEGER(Rzeta) + data.G); ivec_d zeta_d(zeta_h.begin(), zeta_h.end()); // get cluster summaries summary2 smry(k, zeta_d, data); fvec_d beta(REAL(Rbeta), REAL(Rbeta) + k*data.V); fvec_d sse_d(smry.num_occupied); if(verb>0){ // debugging std::cout << "beta:\n"; printVec(beta, data.V, k); std::cout << "xty sums:\n"; printVec(smry.xty_sums, smry.V, smry.num_occupied); std::cout << "yty sums:\n"; printVec(smry.yty_sums, smry.num_occupied, verb); std::cout << "xtx sums:\n"; printVec(smry.xtx_sums, smry.V*smry.V, smry.num_occupied); } // calculate SSE for given value of beta smry.sumSqErr(sse_d, beta, verb); //transfer to host vector fvec_h sse_h(smry.num_occupied); thrust::device_ptr<double> sse_ptr = &sse_d[0]; thrust::copy(sse_ptr, sse_ptr + smry.num_occupied, sse_h.begin()); //copy to SEXP SEXP out = PROTECT(allocVector(REALSXP, smry.num_occupied)); double *Rout = REAL(out); for(int i=0; i<smry.num_occupied; ++i){ Rout[i] = sse_h[i]; } UNPROTECT(1); return out; } //extern "C" SEXP Rtest_draw_tau2(SEXP Rseed, SEXP Rdata, SEXP Rchain, SEXP Rpriors){ // int seed = INTEGER(Rseed)[0]; // data_t data = Rdata_wrap(Rdata); // chain_t chain = Rchain_wrap(Rchain); // priors_t priors = Rpriors_wrap(Rpriors); // summary2 smry(chain.K, chain.zeta, data); // // //instantiate RNGs // hiprandState_t *devStates; // CUDA_CALL(hipMalloc((void **) &devStates, priors.K * sizeof(hiprandState_t))); // hipLaunchKernelGGL(( setup_kernel), dim3(priors.K), dim3(1), 0, 0, seed, priors.K, devStates); // // std::cout << "tau2 before:\n"; // printVec(chain.tau2, chain.K, 1); // // draw_tau2(devStates, chain, priors, data, smry); // // std::cout << "tau2 after:\n"; // printVec(chain.tau2, chain.K, 1); // // fvec_h tau2(chain.K); // thrust::copy(chain.tau2.begin(), chain.tau2.end(), tau2.begin()); // // SEXP out = PROTECT(allocVector(REALSXP, chain.K)); // for(int i=0; i<chain.K; ++i){ // REAL(out)[i] = tau2[i]; // } // // //clean up // CUDA_CALL(hipFree(devStates)); // UNPROTECT(1); // return out; //} extern "C" SEXP Rtest_draw_pi(SEXP Rseed, SEXP Rchain, SEXP Rpriors, SEXP Rdata, SEXP RmethodPi){ int seed = INTEGER(Rseed)[0]; data_t data = Rdata_wrap(Rdata); chain_t chain = Rchain_wrap(Rchain); priors_t priors = Rpriors_wrap(Rpriors); summary2 smry(chain.K, chain.zeta, data); std::cout << "Mk:\n"; printVec(smry.Mk, chain.K, 1); int methodPi = INTEGER(RmethodPi)[0]; //instantiate RNGs hiprandState_t *devStates; CUDA_CALL(hipMalloc((void **) &devStates, priors.K * sizeof(hiprandState_t))); hipLaunchKernelGGL(( setup_kernel), dim3(priors.K), dim3(1), 0, 0, seed, priors.K, devStates); if(methodPi == 0){ draw_pi(devStates, chain, priors, smry, 2); } else if(methodPi == 1) { draw_pi_SD(devStates, chain, priors, smry, 2); } SEXP out = PROTECT(allocVector(REALSXP, chain.K)); for(int i=0; i<chain.K; ++i){ REAL(out)[i] = chain.pi[i]; } //clean up CUDA_CALL(hipFree(devStates)); UNPROTECT(1); return out; } extern "C" SEXP Rtest_draw_zeta(SEXP Rseed, SEXP Rchain, SEXP Rpriors, SEXP Rdata){ int seed = INTEGER(Rseed)[0]; chain_t chain = Rchain_wrap(Rchain); priors_t priors = Rpriors_wrap(Rpriors); data_t data = Rdata_wrap(Rdata); //instantiate RNGs hiprandState_t *devStates; CUDA_CALL(hipMalloc((void **) &devStates, data.G * sizeof(hiprandState_t))); hipLaunchKernelGGL(( setup_kernel), dim3(data.G), dim3(1), 0, 0, seed, data.G, devStates); draw_zeta(devStates, data, chain, priors, 0); //printVec(chain.zeta, data.G, 1); ivec_h zeta_h(data.G); thrust::copy(chain.zeta.begin(), chain.zeta.end(), zeta_h.begin()); SEXP out = PROTECT(allocVector(INTSXP, data.G)); for(int i=0; i<data.G; ++i){ INTEGER(out)[i] = zeta_h[i]; } //clean up CUDA_CALL(hipFree(devStates)); UNPROTECT(1); return out; } extern "C" SEXP Rtest_running_mean(SEXP Rmean, SEXP Rnew, SEXP Rpow, SEXP Rstep){ int pow = INTEGER(Rpow)[0]; int step = INTEGER(Rstep)[0]; int len = length(Rmean); fvec_h mean_h(REAL(Rmean), REAL(Rmean) + len); fvec_h new_h(REAL(Rnew), REAL(Rnew) + len); fvec_d mean_d(mean_h.begin(), mean_h.end()); fvec_d new_d(new_h.begin(), new_h.end()); update_running_means(mean_d, new_d, len, step, pow); thrust::copy(mean_d.begin(), mean_d.end(), mean_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, len)); for(int i=0; i<len; i++) REAL(out)[i] = mean_h[i]; UNPROTECT(1); return out; } extern "C" SEXP Rtest_update_means(SEXP Rchain, SEXP Rstep){ int step = INTEGER(Rstep)[0]; chain_t chain = Rchain_wrap(Rchain); int G = chain.G, V = chain.V, n_hyp = chain.n_hyp; chain.update_means(step); chain.update_probabilities(step); SEXP out = PROTECT(allocVector(VECSXP, 3)); SEXP means = PROTECT(allocVector(REALSXP, G*V)); SEXP meansquares = PROTECT(allocVector(REALSXP, G*V)); SEXP probs = PROTECT(allocVector(REALSXP, G*n_hyp)); for(int i=0; i<n_hyp*G; i++){ REAL(probs)[i] = chain.probs[i]; } for(int i=0; i<G*V; i++){ REAL(means)[i] = chain.means_betas[i]; REAL(meansquares)[i] = chain.meansquares_betas[i]; } SET_VECTOR_ELT(out, 0, probs); SET_VECTOR_ELT(out, 1, means); SET_VECTOR_ELT(out, 2, meansquares); UNPROTECT(4); return out; } /* extern "C" SEXP Rtest_write_samples(SEXP Rchain, SEXP Ridx, SEXP Rn_iter){ chain_t chain = Rchain_wrap(Rchain); int *idx = INTEGER(Ridx), n_iter = INTEGER(Rn_iter)[0], G_out = length(Ridx); samples_t samples = samples_t(n_iter, n_iter, G_out, chain.K, chain.V, idx); for(int i=0; i<n_iter; i++){ //need to pass data to get summaries samples.write_g_samples(chain); std::cout << "Completed step " << samples.step <<"\n"; } std::cout << "zeta:\n"; printVec(chain.zeta, chain.G, 1); SEXP out = Csamples_wrap(samples); UNPROTECT(6); return out; } */ extern "C" SEXP Rtest_draw_beta(SEXP Rchain, SEXP Rdata, SEXP Rpriors, SEXP Rn_iter, SEXP Ridx_save, SEXP Rseed){ data_t data = Rdata_wrap(Rdata); priors_t priors = Rpriors_wrap(Rpriors); chain_t chain = Rchain_wrap(Rchain); int n_iter = INTEGER(Rn_iter)[0], G_save = length(Ridx_save), seed = INTEGER(Rseed)[0]; samples_t samples(n_iter, 0, G_save, priors.K, data.V, INTEGER(Ridx_save), true); //instantiate RNGs hiprandState_t *devStates; CUDA_CALL(hipMalloc((void **) &devStates, data.G * data.V * sizeof(hiprandState_t))); hipLaunchKernelGGL(( setup_kernel), dim3(data.G), dim3(data.V), 0, 0, seed, data.G*data.V, devStates); summary2 summary = summary2(chain.K, chain.zeta, data); for(int i=0; i<n_iter; i++){ //Gibbs steps draw_beta(devStates, data, chain, priors, summary, 0); samples.write_g_samples(chain, summary); } CUDA_CALL(hipFree(devStates)); SEXP samples_out = Csamples_wrap(samples); UNPROTECT(7); return samples_out; } extern "C" SEXP Rtest_draw_tau2(SEXP Rchain, SEXP Rdata, SEXP Rpriors, SEXP Rn_iter, SEXP Ridx_save, SEXP Rseed){ data_t data = Rdata_wrap(Rdata); priors_t priors = Rpriors_wrap(Rpriors); chain_t chain = Rchain_wrap(Rchain); int n_iter = INTEGER(Rn_iter)[0], G_save = length(Ridx_save), seed = INTEGER(Rseed)[0]; samples_t samples(n_iter, 0, G_save, priors.K, data.V, INTEGER(Ridx_save), true); //instantiate RNGs hiprandState_t *devStates; CUDA_CALL(hipMalloc((void **) &devStates, data.G * data.V * sizeof(hiprandState_t))); hipLaunchKernelGGL(( setup_kernel), dim3(data.G), dim3(data.V), 0, 0, seed, data.G*data.V, devStates); summary2 summary = summary2(chain.K, chain.zeta, data); for(int i=0; i<n_iter; i++){ //Gibbs steps draw_tau2(devStates, chain, priors, data, summary, 2); std::cout << "tau2:\n"; printVec(chain.tau2, priors.K, 1); samples.write_g_samples(chain, summary); std::cout << "step " << samples.step_g << ":\n"; printVec(samples.save_tau2, chain.G, n_iter); } CUDA_CALL(hipFree(devStates)); SEXP samples_out = Csamples_wrap(samples); UNPROTECT(7); return samples_out; } extern "C" SEXP Rtest_draw_alpha_SD(SEXP RN, SEXP Rchain, SEXP Rpriors, SEXP Rverbose){ int verbose = INTEGER(Rverbose)[0], N = INTEGER(RN)[0]; chain_t chain = Rchain_wrap(Rchain); priors_t priors = Rpriors_wrap(Rpriors); std::cout << "pi:\n"; printVec(chain.pi, priors.K, 1); SEXP out = PROTECT(allocVector(REALSXP, N)); double *outp = REAL(out); for(int i=0; i<N; i++){ draw_alpha_SD_slice(chain, priors, verbose); outp[i] = chain.alpha; } //clean up UNPROTECT(1); return out; } extern "C" SEXP Rtest_weighted_sum(SEXP Rdata, SEXP Rpriors, SEXP Rchain, SEXP Rverbose){ int verbose = INTEGER(Rverbose)[0]; data_t data = Rdata_wrap(Rdata); priors_t priors = Rpriors_wrap(Rpriors); chain_t chain = Rchain_wrap(Rchain); summary2 smry(priors.K, chain.zeta, data); fvec_d wt_sum(priors.K * chain.V, 0.0); construct_weighted_sum(wt_sum, smry, priors, chain, verbose); fvec_h wt_sum_h(priors.K * chain.V); thrust::copy(wt_sum.begin(), wt_sum.end(), wt_sum_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, priors.K*chain.V)); for(int i=0; i<priors.K*chain.V; i++){ REAL(out)[i] = wt_sum_h[i]; } UNPROTECT(1); return out; }
a20206b62a806ed4add5dc23876914042899c5e5.cu
#include "header/cuda_usage.h" #include "header/chain.h" #include "header/iterator.h" #include "header/printing.h" #include "header/cluster_probability.h" #include "header/multinomial.h" #include "header/distribution.h" #include "header/beta_hat.h" #include "header/wrap_R.h" #include "header/summary2.h" #include "header/cholesky.h" #include "header/construct_prec.h" #include "header/multi_dot_product.h" #include "header/gibbs.h" #include "header/running_mean.h" #include <R.h> #include <Rinternals.h> #include <Rmath.h> // This prevents the replacement of "beta" by Rmath.h #ifdef beta #undef beta #endif #include <cuda.h> extern "C" SEXP Rdata_init(SEXP ytyR, SEXP xtyR, SEXP xtxR, SEXP G, SEXP V, SEXP N){ int g = INTEGER(G)[0], v = INTEGER(V)[0], n = INTEGER(N)[0]; fvec_h xtx(v*v, 1.0); double *ytyp = REAL(ytyR); double *xtyp = REAL(xtyR); double *xtxp = &(xtx[0]); data_t data(ytyp, xtyp, xtxp, g, v, n, false); printVec(data.xtx, v, v); printVec(data.xty, v, g); printVec(data.ytx, g, v); SEXP zero = PROTECT(allocVector(INTSXP, 1)); INTEGER(zero)[0] = 0; UNPROTECT(1); return zero; } extern "C" SEXP Rcluster_weights(SEXP Rdata, SEXP Rchain, SEXP Rpriors, SEXP Rverbose){ int verbose = INTEGER(Rverbose)[0]; data_t data = Rdata_wrap(Rdata, verbose); chain_t chain = Rchain_wrap(Rchain, verbose); priors_t priors = Rpriors_wrap(Rpriors, verbose); fvec_d grid(data.G*priors.K); cluster_weights_no_voom(grid, data, chain, 0); fvec_h grid_h(data.G*priors.K); thrust::copy(grid.begin(), grid.end(), grid_h.begin()); SEXP OUT = PROTECT(allocVector(REALSXP, data.G*priors.K)); for(int i=0; i<data.G*priors.K; ++i) REAL(OUT)[i] = grid_h[i]; UNPROTECT(1); return OUT; } extern "C" SEXP Rnormalize_wts(SEXP grid, SEXP dim1, SEXP dim2){ int k=INTEGER(dim1)[0], g = INTEGER(dim2)[0]; fvec_h grd_h(REAL(grid), REAL(grid) + k*g); fvec_d grd_d(k*g); thrust::copy(grd_h.begin(), grd_h.end(), grd_d.begin()); normalize_wts(grd_d, k, g); thrust::copy(grd_d.begin(), grd_d.end(), grd_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, g*k)); for(int i=0; i<k*g; ++i) REAL(out)[i] = grd_h[i]; UNPROTECT(1); return out; } extern "C" SEXP RgetUniform(SEXP Rseed, SEXP upperR){ int n = length(upperR), seed = INTEGER(Rseed)[0]; //instantiate RNGs curandState *devStates; CUDA_CALL(cudaMalloc((void **) &devStates, n * sizeof(curandState))); //temporary memory fvec_h upper(REAL(upperR), REAL(upperR) + n); fvec_d upper_d(upper.begin(), upper.end()); double *upper_d_ptr = thrust::raw_pointer_cast(upper_d.data()); int block_size=512; int n_blocks = n/block_size + 1; //set up RNGs setup_kernel<<<n_blocks,block_size>>>(seed, n, devStates); //sample from U(0, upper) getUniform<<<n_blocks,block_size>>>(devStates, n, upper_d_ptr); thrust::copy(upper_d.begin(), upper_d.end(), upper.begin()); SEXP out = PROTECT(allocVector(REALSXP, n)); for(int i = 0; i < n; ++i) REAL(out)[i] = upper_d[i]; //clean up CUDA_CALL(cudaFree(devStates)); UNPROTECT(1); return out; } extern "C" SEXP Rgnl_multinomial(SEXP Rseed, SEXP probs, SEXP K, SEXP G){ int k = INTEGER(K)[0], g = INTEGER(G)[0], seed = INTEGER(Rseed)[0]; //instantiate RNGs curandState *devStates; CUDA_CALL(cudaMalloc((void **) &devStates, g * sizeof(curandState))); //temporary memory fvec_h probs_h(REAL(probs), REAL(probs) + g*k); fvec_d probs_d(probs_h.begin(), probs_h.end()); ivec_h zeta_h(g); ivec_d zeta_d(g); double *probs_d_ptr = thrust::raw_pointer_cast(probs_d.data()); //set up RNGs int block_size=512; int n_blocks = g/block_size + 1; setup_kernel<<<n_blocks,block_size>>>(seed, g, devStates); //get multinomial draws gnl_multinomial(zeta_d, probs_d, devStates, k, g); thrust::copy(probs_d.begin(), probs_d.end(), probs_h.begin()); thrust::copy(zeta_d.begin(), zeta_d.end(), zeta_h.begin()); SEXP out = PROTECT(allocVector(VECSXP, 2)); SEXP out_z = PROTECT(allocVector(INTSXP, g)); SEXP out_p = PROTECT(allocVector(REALSXP, k*g)); for(int i = 0; i < g; ++i){ INTEGER(out_z)[i] = zeta_h[i]; for(int j=0; j < k; ++j){ REAL(out_p)[i*k + j] = probs_h[i*k + j]; } } SET_VECTOR_ELT(out, 0, out_z); SET_VECTOR_ELT(out, 1, out_p); //clean up CUDA_CALL(cudaFree(devStates)); UNPROTECT(3); return out; } extern "C" SEXP Rbeta_hat(SEXP R_Lvec, SEXP R_xty, SEXP K, SEXP V){ int k=INTEGER(K)[0], v=INTEGER(V)[0]; fvec_h L_h(REAL(R_Lvec), REAL(R_Lvec)+k*v*v), b_h(REAL(R_xty), REAL(R_xty)+k*v); fvec_d L_d(k*v*v); fvec_d b_d(k*v); thrust::copy(L_h.begin(), L_h.end(), L_d.begin()); thrust::copy(b_h.begin(), b_h.end(), b_d.begin()); beta_hat(L_d, b_d, k, v); thrust::copy(b_d.begin(), b_d.end(), b_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, k*v)); for(int i=0; i<k*v; ++i) REAL(out)[i] = b_h[i]; UNPROTECT(1); return out; } extern "C" SEXP Rtest_data_wrap(SEXP Rdata, SEXP Rpriors, SEXP Rchain){ data_t data = Rdata_wrap(Rdata, 1); priors_t priors = Rpriors_wrap(Rpriors, 1); chain_t chain = Rchain_wrap(Rchain, 1); std::cout << "y transpose x\n"; printVec(data.ytx, data.G, data.V); std::cout << "prior location\n"; printVec(priors.mu0, priors.V, 1); std::cout << "Contrasts on location parameters"; printVec(chain.C, chain.P, chain.V); std::cout << "slice sampler width parameter: " << chain.slice_width << "\n"; SEXP out = PROTECT(allocVector(INTSXP, 1)); INTEGER(out)[0] = 0; UNPROTECT(1); return out; } //Temporarily disable while fixing draw_MVNormal /* extern"C" SEXP Rtest_MVNormal(SEXP Rseed, SEXP Rzeta, SEXP Rdata, SEXP Rpriors){ int seed = INTEGER(Rseed)[0]; data_t data = Rdata_wrap(Rdata); priors_t priors = Rpriors_wrap(Rpriors); ivec_h zeta_h(INTEGER(Rzeta), INTEGER(Rzeta) + data.G); ivec_d zeta_d(zeta_h.begin(),zeta_h.end()); summary2 smry(priors.K, zeta_d, data); smry.print_Mk(); smry.print_yty(); smry.print_xty(); //instantiate RNGs curandState *devStates; int V = data.V, K = data.K; CUDA_CALL(cudaMalloc((void **) &devStates, V*K*sizeof(curandState))); int block_size=(512 / (32*V)) * (32*V); int n_blocks = (K*V)/block_size + 1; setup_kernel<<<n_blocks, block_size>>>(seed, priors.K*data.V, devStates); //make precision matrices fvec_d prec(smry.num_occupied * smry.V * smry.V, 0.0); fvec_d tau2(priors.K, 1.0); void construct_prec(fvec_d &prec, data_t &data, priors_t &priors, chain_t &chain, ivec_d &Mk); construct_prec(prec, data, priors, ) construct_prec(prec.begin(), prec.end(), priors.lambda2.begin(), priors.lambda2.end(), tau2.begin(), tau2.end(), smry.Mk.begin(), smry.Mk.end(), data.xtx.begin(), data.xtx.end(), smry.num_occupied, data.V); std::cout << "prec_matrices:\n"; printVec(prec, smry.V*smry.V, smry.num_occupied); //cholesky decomposition realIter b=prec.begin(), e = prec.end(); chol_multiple(b, e, data.V, smry.num_occupied); std::cout << "chol_matrices:\n"; printVec(prec, smry.V*smry.V, smry.num_occupied); std::cout << "smry.V= " << smry.V << "\n"; std::cout << "smry.num_occupied= " << smry.num_occupied << "\n"; //conditional means std::cout << "xty_sums:\n"; thrust::device_ptr<double> xty_ptr = &smry.xty_sums[0]; thrust::copy(xty_ptr, xty_ptr + smry.num_occupied * data.V, std::ostream_iterator<double>(std::cout, " ")); fvec_d bhat(smry.num_occupied * data.V); thrust::copy(xty_ptr, xty_ptr + smry.num_occupied * data.V, bhat.begin()); cudaDeviceSynchronize(); std::cout << "container for beta_hat (initialized):\n"; printVec(bhat, data.V, smry.num_occupied); beta_hat(prec, bhat, smry.num_occupied, data.V); std::cout << "beta_hat:\n"; printVec(bhat, data.V, smry.num_occupied); //draw beta int beta_size = data.V*priors.K; fvec_h beta_h(beta_size, 0.0); fvec_d beta(beta_h.begin(), beta_h.end()); draw_MVNormal(devStates, bhat, prec, beta, priors, smry, 0); //print value std::cout << "beta_draws:\n"; printVec(beta, data.V, priors.K); thrust::copy(beta.begin(), beta.end(), beta_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, beta_size)); for(int i=0; i < beta_size; ++i){ REAL(out)[i] = beta_h[i]; } //clean up CUDA_CALL(cudaFree(devStates)); UNPROTECT(1); return out; }*/ extern "C" SEXP Rmulti_dot_prod(SEXP Rx, SEXP Ry, SEXP Rdim, SEXP Rn){ int dim = INTEGER(Rdim)[0], n = INTEGER(Rn)[0]; fvec_h x_h(REAL(Rx), REAL(Rx) + dim*n); fvec_h y_h(REAL(Ry), REAL(Ry) + dim*n); fvec_d x_d(x_h.begin(), x_h.end()); fvec_d y_d(y_h.begin(), y_h.end()); fvec_d z_d(n); multi_dot_prod(x_d, y_d, z_d, dim, n); fvec_h z_h(n); thrust::copy(z_d.begin(), z_d.end(), z_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, n)); for(int i = 0; i < n; ++i){ REAL(out)[i] = z_h[i]; } UNPROTECT(1); return out; } extern "C" SEXP RsumSqErr(SEXP Rdata, SEXP Rzeta, SEXP K, SEXP Rbeta, SEXP verbose){ int k = INTEGER(K)[0], verb = INTEGER(verbose)[0]; data_t data = Rdata_wrap(Rdata); ivec_h zeta_h(INTEGER(Rzeta), INTEGER(Rzeta) + data.G); ivec_d zeta_d(zeta_h.begin(), zeta_h.end()); // get cluster summaries summary2 smry(k, zeta_d, data); fvec_d beta(REAL(Rbeta), REAL(Rbeta) + k*data.V); fvec_d sse_d(smry.num_occupied); if(verb>0){ // debugging std::cout << "beta:\n"; printVec(beta, data.V, k); std::cout << "xty sums:\n"; printVec(smry.xty_sums, smry.V, smry.num_occupied); std::cout << "yty sums:\n"; printVec(smry.yty_sums, smry.num_occupied, verb); std::cout << "xtx sums:\n"; printVec(smry.xtx_sums, smry.V*smry.V, smry.num_occupied); } // calculate SSE for given value of beta smry.sumSqErr(sse_d, beta, verb); //transfer to host vector fvec_h sse_h(smry.num_occupied); thrust::device_ptr<double> sse_ptr = &sse_d[0]; thrust::copy(sse_ptr, sse_ptr + smry.num_occupied, sse_h.begin()); //copy to SEXP SEXP out = PROTECT(allocVector(REALSXP, smry.num_occupied)); double *Rout = REAL(out); for(int i=0; i<smry.num_occupied; ++i){ Rout[i] = sse_h[i]; } UNPROTECT(1); return out; } //extern "C" SEXP Rtest_draw_tau2(SEXP Rseed, SEXP Rdata, SEXP Rchain, SEXP Rpriors){ // int seed = INTEGER(Rseed)[0]; // data_t data = Rdata_wrap(Rdata); // chain_t chain = Rchain_wrap(Rchain); // priors_t priors = Rpriors_wrap(Rpriors); // summary2 smry(chain.K, chain.zeta, data); // // //instantiate RNGs // curandState *devStates; // CUDA_CALL(cudaMalloc((void **) &devStates, priors.K * sizeof(curandState))); // setup_kernel<<<priors.K, 1>>>(seed, priors.K, devStates); // // std::cout << "tau2 before:\n"; // printVec(chain.tau2, chain.K, 1); // // draw_tau2(devStates, chain, priors, data, smry); // // std::cout << "tau2 after:\n"; // printVec(chain.tau2, chain.K, 1); // // fvec_h tau2(chain.K); // thrust::copy(chain.tau2.begin(), chain.tau2.end(), tau2.begin()); // // SEXP out = PROTECT(allocVector(REALSXP, chain.K)); // for(int i=0; i<chain.K; ++i){ // REAL(out)[i] = tau2[i]; // } // // //clean up // CUDA_CALL(cudaFree(devStates)); // UNPROTECT(1); // return out; //} extern "C" SEXP Rtest_draw_pi(SEXP Rseed, SEXP Rchain, SEXP Rpriors, SEXP Rdata, SEXP RmethodPi){ int seed = INTEGER(Rseed)[0]; data_t data = Rdata_wrap(Rdata); chain_t chain = Rchain_wrap(Rchain); priors_t priors = Rpriors_wrap(Rpriors); summary2 smry(chain.K, chain.zeta, data); std::cout << "Mk:\n"; printVec(smry.Mk, chain.K, 1); int methodPi = INTEGER(RmethodPi)[0]; //instantiate RNGs curandState *devStates; CUDA_CALL(cudaMalloc((void **) &devStates, priors.K * sizeof(curandState))); setup_kernel<<<priors.K, 1>>>(seed, priors.K, devStates); if(methodPi == 0){ draw_pi(devStates, chain, priors, smry, 2); } else if(methodPi == 1) { draw_pi_SD(devStates, chain, priors, smry, 2); } SEXP out = PROTECT(allocVector(REALSXP, chain.K)); for(int i=0; i<chain.K; ++i){ REAL(out)[i] = chain.pi[i]; } //clean up CUDA_CALL(cudaFree(devStates)); UNPROTECT(1); return out; } extern "C" SEXP Rtest_draw_zeta(SEXP Rseed, SEXP Rchain, SEXP Rpriors, SEXP Rdata){ int seed = INTEGER(Rseed)[0]; chain_t chain = Rchain_wrap(Rchain); priors_t priors = Rpriors_wrap(Rpriors); data_t data = Rdata_wrap(Rdata); //instantiate RNGs curandState *devStates; CUDA_CALL(cudaMalloc((void **) &devStates, data.G * sizeof(curandState))); setup_kernel<<<data.G, 1>>>(seed, data.G, devStates); draw_zeta(devStates, data, chain, priors, 0); //printVec(chain.zeta, data.G, 1); ivec_h zeta_h(data.G); thrust::copy(chain.zeta.begin(), chain.zeta.end(), zeta_h.begin()); SEXP out = PROTECT(allocVector(INTSXP, data.G)); for(int i=0; i<data.G; ++i){ INTEGER(out)[i] = zeta_h[i]; } //clean up CUDA_CALL(cudaFree(devStates)); UNPROTECT(1); return out; } extern "C" SEXP Rtest_running_mean(SEXP Rmean, SEXP Rnew, SEXP Rpow, SEXP Rstep){ int pow = INTEGER(Rpow)[0]; int step = INTEGER(Rstep)[0]; int len = length(Rmean); fvec_h mean_h(REAL(Rmean), REAL(Rmean) + len); fvec_h new_h(REAL(Rnew), REAL(Rnew) + len); fvec_d mean_d(mean_h.begin(), mean_h.end()); fvec_d new_d(new_h.begin(), new_h.end()); update_running_means(mean_d, new_d, len, step, pow); thrust::copy(mean_d.begin(), mean_d.end(), mean_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, len)); for(int i=0; i<len; i++) REAL(out)[i] = mean_h[i]; UNPROTECT(1); return out; } extern "C" SEXP Rtest_update_means(SEXP Rchain, SEXP Rstep){ int step = INTEGER(Rstep)[0]; chain_t chain = Rchain_wrap(Rchain); int G = chain.G, V = chain.V, n_hyp = chain.n_hyp; chain.update_means(step); chain.update_probabilities(step); SEXP out = PROTECT(allocVector(VECSXP, 3)); SEXP means = PROTECT(allocVector(REALSXP, G*V)); SEXP meansquares = PROTECT(allocVector(REALSXP, G*V)); SEXP probs = PROTECT(allocVector(REALSXP, G*n_hyp)); for(int i=0; i<n_hyp*G; i++){ REAL(probs)[i] = chain.probs[i]; } for(int i=0; i<G*V; i++){ REAL(means)[i] = chain.means_betas[i]; REAL(meansquares)[i] = chain.meansquares_betas[i]; } SET_VECTOR_ELT(out, 0, probs); SET_VECTOR_ELT(out, 1, means); SET_VECTOR_ELT(out, 2, meansquares); UNPROTECT(4); return out; } /* extern "C" SEXP Rtest_write_samples(SEXP Rchain, SEXP Ridx, SEXP Rn_iter){ chain_t chain = Rchain_wrap(Rchain); int *idx = INTEGER(Ridx), n_iter = INTEGER(Rn_iter)[0], G_out = length(Ridx); samples_t samples = samples_t(n_iter, n_iter, G_out, chain.K, chain.V, idx); for(int i=0; i<n_iter; i++){ //need to pass data to get summaries samples.write_g_samples(chain); std::cout << "Completed step " << samples.step <<"\n"; } std::cout << "zeta:\n"; printVec(chain.zeta, chain.G, 1); SEXP out = Csamples_wrap(samples); UNPROTECT(6); return out; } */ extern "C" SEXP Rtest_draw_beta(SEXP Rchain, SEXP Rdata, SEXP Rpriors, SEXP Rn_iter, SEXP Ridx_save, SEXP Rseed){ data_t data = Rdata_wrap(Rdata); priors_t priors = Rpriors_wrap(Rpriors); chain_t chain = Rchain_wrap(Rchain); int n_iter = INTEGER(Rn_iter)[0], G_save = length(Ridx_save), seed = INTEGER(Rseed)[0]; samples_t samples(n_iter, 0, G_save, priors.K, data.V, INTEGER(Ridx_save), true); //instantiate RNGs curandState *devStates; CUDA_CALL(cudaMalloc((void **) &devStates, data.G * data.V * sizeof(curandState))); setup_kernel<<<data.G, data.V>>>(seed, data.G*data.V, devStates); summary2 summary = summary2(chain.K, chain.zeta, data); for(int i=0; i<n_iter; i++){ //Gibbs steps draw_beta(devStates, data, chain, priors, summary, 0); samples.write_g_samples(chain, summary); } CUDA_CALL(cudaFree(devStates)); SEXP samples_out = Csamples_wrap(samples); UNPROTECT(7); return samples_out; } extern "C" SEXP Rtest_draw_tau2(SEXP Rchain, SEXP Rdata, SEXP Rpriors, SEXP Rn_iter, SEXP Ridx_save, SEXP Rseed){ data_t data = Rdata_wrap(Rdata); priors_t priors = Rpriors_wrap(Rpriors); chain_t chain = Rchain_wrap(Rchain); int n_iter = INTEGER(Rn_iter)[0], G_save = length(Ridx_save), seed = INTEGER(Rseed)[0]; samples_t samples(n_iter, 0, G_save, priors.K, data.V, INTEGER(Ridx_save), true); //instantiate RNGs curandState *devStates; CUDA_CALL(cudaMalloc((void **) &devStates, data.G * data.V * sizeof(curandState))); setup_kernel<<<data.G, data.V>>>(seed, data.G*data.V, devStates); summary2 summary = summary2(chain.K, chain.zeta, data); for(int i=0; i<n_iter; i++){ //Gibbs steps draw_tau2(devStates, chain, priors, data, summary, 2); std::cout << "tau2:\n"; printVec(chain.tau2, priors.K, 1); samples.write_g_samples(chain, summary); std::cout << "step " << samples.step_g << ":\n"; printVec(samples.save_tau2, chain.G, n_iter); } CUDA_CALL(cudaFree(devStates)); SEXP samples_out = Csamples_wrap(samples); UNPROTECT(7); return samples_out; } extern "C" SEXP Rtest_draw_alpha_SD(SEXP RN, SEXP Rchain, SEXP Rpriors, SEXP Rverbose){ int verbose = INTEGER(Rverbose)[0], N = INTEGER(RN)[0]; chain_t chain = Rchain_wrap(Rchain); priors_t priors = Rpriors_wrap(Rpriors); std::cout << "pi:\n"; printVec(chain.pi, priors.K, 1); SEXP out = PROTECT(allocVector(REALSXP, N)); double *outp = REAL(out); for(int i=0; i<N; i++){ draw_alpha_SD_slice(chain, priors, verbose); outp[i] = chain.alpha; } //clean up UNPROTECT(1); return out; } extern "C" SEXP Rtest_weighted_sum(SEXP Rdata, SEXP Rpriors, SEXP Rchain, SEXP Rverbose){ int verbose = INTEGER(Rverbose)[0]; data_t data = Rdata_wrap(Rdata); priors_t priors = Rpriors_wrap(Rpriors); chain_t chain = Rchain_wrap(Rchain); summary2 smry(priors.K, chain.zeta, data); fvec_d wt_sum(priors.K * chain.V, 0.0); construct_weighted_sum(wt_sum, smry, priors, chain, verbose); fvec_h wt_sum_h(priors.K * chain.V); thrust::copy(wt_sum.begin(), wt_sum.end(), wt_sum_h.begin()); SEXP out = PROTECT(allocVector(REALSXP, priors.K*chain.V)); for(int i=0; i<priors.K*chain.V; i++){ REAL(out)[i] = wt_sum_h[i]; } UNPROTECT(1); return out; }
4224155bd420bf75ffd16aef652bf1421a2558fa.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #define NVCC #include "printing.cuh" #include "cuda_err_check.h" #include "read_volume.h" #include "write_volume.h" #include "wavelet_slow.h" #include "wavelet_slow.cuh" #include "compare.h" #include "diff.h" #include "norms.h" #include "init_x.h" #include "init_random.h" const int FORWARD = 0; const int INVERSE = 1; int err_check(float *x_gpu, float *d_x, float *x, const int nx, const int ny, const int nz, const int bx, const int by, const int bz, const bool verbose=false, const double l2_tol=1e-5, const double l1_tol=1e-5, const double linf_tol=1e-5) { size_t b = bx * by * bz; size_t n = nx * ny * nz; size_t num_bytes = b * n * sizeof(float); hipMemcpy(x_gpu, d_x, num_bytes, hipMemcpyDeviceToHost); const char *errtype[] = {"abs.", "rel."}; for (int a = 0; a < 2; ++a) { double l2err = l2norm(x, x_gpu, b * n, a); double l1err = l1norm(x, x_gpu, b * n, a); double linferr = linfnorm(x, x_gpu, b * n, a); if (verbose) printf("%s l2 error = %g l1 error = %g linf error = %g \n", errtype[a], l2err, l1err, linferr); if (a == 1 && (l2err > l2_tol || l1err > l1_tol || linferr > linf_tol) ) return 1; } return 0; } void print_status(int err) { if (!err) printf("OK\n"); else printf("FAILED\n"); } int test_kernel(enum kernel k, const int nx, const int ny, const int nz, const int bx, const int by, const int bz, const int verbose) { float *x; init_random(x, nx, ny, nz, bx, by, bz); size_t num_bytes = sizeof(float) * nx * ny * nz * bx * by * bz; float *x_gpu = (float*)malloc(num_bytes); float *d_x; hipMalloc((void**)&d_x, num_bytes); hipMemcpy(d_x, x, num_bytes, hipMemcpyHostToDevice); printf("%s \t [%d, %d, %d] [%d, %d, %d] \n", get_kernel_name(k), nx, ny, nz, bx, by, bz); wl79_h<FORWARD>(k, d_x, bx, by, bz); wl79_h<INVERSE>(k, d_x, bx, by, bz); hipDeviceSynchronize(); int err = err_check(x_gpu, d_x, x, nx, ny, nz, bx, by, bz, verbose); print_status(err); free(x); free(x_gpu); hipFree(d_x); return err; } int main(int argc, char **argv) { const int verbose = 0; int bx = 11; int by = 9; int bz = 8; test_kernel(WL79_8x8x8, bz, bz, bz, bx, by, bz, verbose); test_kernel(WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT1WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT2WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT3WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT4WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT5WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT6WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT7WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); }
4224155bd420bf75ffd16aef652bf1421a2558fa.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #define NVCC #include "printing.cuh" #include "cuda_err_check.h" #include "read_volume.h" #include "write_volume.h" #include "wavelet_slow.h" #include "wavelet_slow.cuh" #include "compare.h" #include "diff.h" #include "norms.h" #include "init_x.h" #include "init_random.h" const int FORWARD = 0; const int INVERSE = 1; int err_check(float *x_gpu, float *d_x, float *x, const int nx, const int ny, const int nz, const int bx, const int by, const int bz, const bool verbose=false, const double l2_tol=1e-5, const double l1_tol=1e-5, const double linf_tol=1e-5) { size_t b = bx * by * bz; size_t n = nx * ny * nz; size_t num_bytes = b * n * sizeof(float); cudaMemcpy(x_gpu, d_x, num_bytes, cudaMemcpyDeviceToHost); const char *errtype[] = {"abs.", "rel."}; for (int a = 0; a < 2; ++a) { double l2err = l2norm(x, x_gpu, b * n, a); double l1err = l1norm(x, x_gpu, b * n, a); double linferr = linfnorm(x, x_gpu, b * n, a); if (verbose) printf("%s l2 error = %g l1 error = %g linf error = %g \n", errtype[a], l2err, l1err, linferr); if (a == 1 && (l2err > l2_tol || l1err > l1_tol || linferr > linf_tol) ) return 1; } return 0; } void print_status(int err) { if (!err) printf("OK\n"); else printf("FAILED\n"); } int test_kernel(enum kernel k, const int nx, const int ny, const int nz, const int bx, const int by, const int bz, const int verbose) { float *x; init_random(x, nx, ny, nz, bx, by, bz); size_t num_bytes = sizeof(float) * nx * ny * nz * bx * by * bz; float *x_gpu = (float*)malloc(num_bytes); float *d_x; cudaMalloc((void**)&d_x, num_bytes); cudaMemcpy(d_x, x, num_bytes, cudaMemcpyHostToDevice); printf("%s \t [%d, %d, %d] [%d, %d, %d] \n", get_kernel_name(k), nx, ny, nz, bx, by, bz); wl79_h<FORWARD>(k, d_x, bx, by, bz); wl79_h<INVERSE>(k, d_x, bx, by, bz); cudaDeviceSynchronize(); int err = err_check(x_gpu, d_x, x, nx, ny, nz, bx, by, bz, verbose); print_status(err); free(x); free(x_gpu); cudaFree(d_x); return err; } int main(int argc, char **argv) { const int verbose = 0; int bx = 11; int by = 9; int bz = 8; test_kernel(WL79_8x8x8, bz, bz, bz, bx, by, bz, verbose); test_kernel(WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT1WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT2WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT3WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT4WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT5WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT6WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); test_kernel(OPT7WL79_32x32x32, 32, 32, 32, bx, by, bz, verbose); }
28e08a82eb0a69434cec53e4f9ba1c154c3a3ed3.hip
// !!! This is a file automatically generated by hipify!!! #include "AdjointSolver.h" #include "CommonKernels.h" #include <cuMat/Core> #include <cinder/app/AppBase.h> #include "tinyformat.h" #include "GradientDescent.h" #include "RpropGradientDescent.h" #include "LBFGS.h" #include "Utils3D.h" #include "CoordinateTransformation.h" #include "CudaTimer.h" #include "DebugUtils.h" //For testing: set to 1 to enforce a symmetric matrix in the CG //If 0, small unsymmetries of a few ulps are in the matrix due to the ordering of the operations //If 1, the upper and lower triangular parts are averaged to create a numerically exact symmetric matrix #define MAKE_NEWMARK_SYMMETRIC 0 //Specifies if the forward iteration shall be stopped if the linear solver did not converge // 1: no convergence // 2: NaN #define FORWARD_BREAK_ON_DIVERGENCE 2 //Specifies if the adjoint step shall ignore the current step (no gradients added) if the linear solver did not converge #define ADJOINT_IGNORE_DIVERGENCE 0 //Some way more verbose logging #define ADJOINT_VERBOSE_LOGGING 0 namespace ar3d { AdjointSolver::PrecomputedValues AdjointSolver::allocatePrecomputedValues(const Input& input) { PrecomputedValues p; p.lumpedMass_ = VectorX(input.numActiveNodes_); p.lumpedMass_.setZero(); p.bodyForces_ = Vector3X(input.numActiveNodes_); p.bodyForces_.setZero(); p.initialVelocity_ = Vector3X(input.numActiveNodes_); p.initialVelocity_.setZero(); return p; } AdjointSolver::ForwardState AdjointSolver::allocateForwardState(const Input& input) { ForwardState s; s.displacements_ = Vector3X(input.numActiveNodes_); s.displacements_.setZero(); s.velocities_ = Vector3X(input.numActiveNodes_); s.velocities_.setZero(); return s; } AdjointSolver::ForwardStorage AdjointSolver::allocateForwardStorage(const Input& input) { ForwardStorage s; s.forces_ = Vector3X(input.numActiveNodes_); s.stiffness_ = SMatrix3x3(input.sparsityPattern_); s.newmarkA_ = SMatrix3x3(input.sparsityPattern_); s.newmarkB_ = Vector3X(input.numActiveNodes_); return s; } void AdjointSolver::BackwardState::reset() { adjDisplacements_.setZero(); adjVelocities_.setZero(); if (adjGridDisplacements_.size() > 0) adjGridDisplacements_.setZero(); } AdjointSolver::BackwardState AdjointSolver::allocateBackwardState(const Input& input, int costFunctionInput) { BackwardState s; s.adjDisplacements_ = Vector3X(input.numActiveNodes_); s.adjVelocities_ = Vector3X(input.numActiveNodes_); if (costFunctionInput & int(ICostFunction::RequiredInput::GridDisplacements)) s.adjGridDisplacements_ = WorldGridData<real3>::DeviceArray_t(input.grid_->getSize().x(), input.grid_->getSize().y(), input.grid_->getSize().z()); s.reset(); return s; } AdjointSolver::BackwardStorage AdjointSolver::allocateBackwardStorage(const Input& input) { BackwardStorage s; s.unaryLumpedMass_ = VectorX(input.numActiveNodes_); s.unaryBodyForces_ = Vector3X(input.numActiveNodes_); s.adjNewmarkA_ = SMatrix3x3(input.sparsityPattern_); s.adjNewmarkB_ = Vector3X(input.numActiveNodes_); s.adjStiffness_ = SMatrix3x3(input.sparsityPattern_); s.adjForces_ = Vector3X(input.numActiveNodes_); s.adjMass_ = VectorX(input.numActiveNodes_); s.adjForces_.setZero(); s.adjMass_.setZero(); return s; } AdjointSolver::AdjointVariables& AdjointSolver::AdjointVariables::operator*=(double scaling) { adjGravity_ *= scaling; adjYoungsModulus_ *= scaling; adjPoissonRatio_ *= scaling; adjMass_ *= scaling; adjMassDamping_ *= scaling; adjStiffnessDamping_ *= scaling; adjGroundPlane_ *= scaling; adjInitialAngularVelocity *= scaling; adjInitialLinearVelocity *= scaling; return *this; } AdjointSolver::InputVariables::InputVariables() : optimizeGravity_(false) , currentGravity_(make_real3(0, -10, 0)) , optimizeYoungsModulus_(false) , currentYoungsModulus_(2000) , optimizePoissonRatio_(false) , currentPoissonRatio_(0.45) , optimizeMass_(false) , currentMass_(1) , optimizeMassDamping_(false) , currentMassDamping_(0.1) , optimizeStiffnessDamping_(false) , currentStiffnessDamping_(0.01) , optimizeInitialLinearVelocity_(false) , currentInitialLinearVelocity_(make_real3(0,0,0)) , optimizeInitialAngularVelocity_(false) , currentInitialAngularVelocity_(make_real3(0, 0, 0)) , optimizeGroundPlane_(false) , currentGroundPlane_(make_real4(0, 1, 0, 0)) { } AdjointSolver::CostFunctionTmp AdjointSolver::allocateCostFunctionTmp(const Input & input, CostFunctionPtr costFunction) { CostFunctionTmp tmp; if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::ActiveDisplacements)) { tmp.costOutput_.adjDisplacements_ = Vector3X(input.numActiveNodes_); tmp.costOutput_.adjVelocities_ = Vector3X(input.numActiveNodes_); } if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::GridDisplacements)) { tmp.costOutput_.adjGridDisplacements_ = WorldGridData<real3>::DeviceArray_t(input.grid_->getSize().x(), input.grid_->getSize().y(), input.grid_->getSize().z()); } return tmp; } bool AdjointSolver::performForwardStep( const ForwardState& prevState, ForwardState& nextStateOut, ForwardStorage& nextStorageOut, const Input& input, const PrecomputedValues& precomputed, const SoftBodySimulation3D::Settings& settings, int costFunctionRequiredInput, bool memorySaving) { //reset storage nextStorageOut.forces_.setZero(); nextStorageOut.stiffness_.setZero(); nextStorageOut.newmarkA_.setZero(); nextStorageOut.newmarkB_.setZero(); SoftBodyGrid3D::State s; s.displacements_ = prevState.displacements_; s.velocities_ = prevState.velocities_; //1. collision forces nextStorageOut.forces_.inplace() = precomputed.bodyForces_; if (settings.enableCollision_) { SoftBodyGrid3D::applyCollisionForces(input, settings, s, nextStorageOut.forces_); } //2. stiffness matrix SoftBodyGrid3D::computeStiffnessMatrix(input, s, settings, nextStorageOut.stiffness_, nextStorageOut.forces_); //3. Solve CI_LOG_D("Norm of PrevDisplacement: " << static_cast<real>(prevState.displacements_.norm())); CommonKernels::newmarkTimeIntegration( nextStorageOut.stiffness_, nextStorageOut.forces_, precomputed.lumpedMass_, prevState.displacements_, prevState.velocities_, settings.dampingAlpha_, settings.dampingBeta_, settings.timestep_, nextStorageOut.newmarkA_, nextStorageOut.newmarkB_, settings.newmarkTheta_); nextStateOut.displacements_.inplace() = prevState.displacements_ + make_real3(settings.timestep_) * prevState.velocities_; int iterations = settings.solverIterations_; real tolError = settings.solverTolerance_; #if MAKE_NEWMARK_SYMMETRIC == 1 nextStorageOut.newmarkA_ = DebugUtils::makeSymmetric(nextStorageOut.newmarkA_); #endif CommonKernels::solveCG( nextStorageOut.newmarkA_, nextStorageOut.newmarkB_, nextStateOut.displacements_, iterations, tolError); CI_LOG_D("Norm of NextDisplacement: " << static_cast<real>(nextStateOut.displacements_.norm())); CommonKernels::newmarkComputeVelocity( prevState.displacements_, prevState.velocities_, nextStateOut.displacements_, nextStateOut.velocities_, settings.timestep_, settings.newmarkTheta_); #if FORWARD_BREAK_ON_DIVERGENCE==2 bool failedToConverge = std::isnan(tolError); #else bool failedToConverge = iterations == settings.solverIterations_; #endif //4. Post-Processing if needed if (costFunctionRequiredInput & int(ICostFunction::RequiredInput::GridDisplacements)) { //diffuse displacements over the whole grid const Eigen::Vector3i& size = input.grid_->getSize(); nextStateOut.gridDisplacements_ = WorldGridData<real3>::DeviceArray_t::Constant(size.x(), size.y(), size.z(), make_real3(0)); SoftBodyGrid3D::State diffusionState; diffusionState.displacements_ = nextStateOut.displacements_; SoftBodyGrid3D::DiffusionRhs diffusionTmp1 = SoftBodyGrid3D::DiffusionRhs(input.numDiffusedNodes_, 1, 3); SoftBodyGrid3D::DiffusionRhs diffusionTmp2 = SoftBodyGrid3D::DiffusionRhs(input.numDiffusedNodes_, 1, 3); SoftBodyGrid3D::diffuseDisplacements( input, diffusionState, nextStateOut.gridDisplacements_, diffusionTmp1, diffusionTmp2); } return !failedToConverge; } real AdjointSolver::evaluateCostFunction( CostFunctionPtr costFunction, int timestep, CostFunctionTmp& tmp, const ForwardState& forwardState, BackwardState& backwardStateOut, const Input& input) { if (!costFunction->hasTimestep(timestep)) return 0; tmp.costOutput_.cost_ = 0; //prepare input and output if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::ActiveDisplacements)) { tmp.costInput_.displacements_ = forwardState.displacements_; tmp.costInput_.velocities_ = forwardState.velocities_; tmp.costOutput_.adjDisplacements_.setZero(); tmp.costOutput_.adjVelocities_.setZero(); } if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::GridDisplacements)) { tmp.costInput_.gridDisplacements_ = forwardState.gridDisplacements_; tmp.costInput_.referenceSDF_ = input.referenceSdf_; tmp.costOutput_.adjGridDisplacements_.setZero(); } //evaluate cost function costFunction->evaluate(timestep, tmp.costInput_, tmp.costOutput_); //apply output if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::ActiveDisplacements)) { backwardStateOut.adjDisplacements_ += tmp.costOutput_.adjDisplacements_; backwardStateOut.adjVelocities_ += tmp.costOutput_.adjVelocities_; } if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::GridDisplacements)) { backwardStateOut.adjGridDisplacements_ += tmp.costOutput_.adjGridDisplacements_; } return tmp.costOutput_.cost_; } void AdjointSolver::performBackwardStep( const ForwardState& prevState, const ForwardState& nextState, const BackwardState& adjNextState, BackwardState& adjPrevStateOut, AdjointVariables& adjVariablesOut, const Input& input, const PrecomputedValues& precomputed, ForwardStorage& nextStorage, BackwardStorage& adjStorage, const SoftBodySimulation3D::Settings& settings, int costFunctionRequiredInput, bool memorySaving) { if (memorySaving) { //in memory saving mode, the intermediate variables (in nextStorage) are not saved //and thus we recompute them here. //This is a copy of performForwardStep without the linear solvers nextStorage.forces_.setZero(); nextStorage.stiffness_.setZero(); nextStorage.newmarkA_.setZero(); nextStorage.newmarkB_.setZero(); SoftBodyGrid3D::State s; s.displacements_ = prevState.displacements_; s.velocities_ = prevState.velocities_; //1. collision forces nextStorage.forces_.inplace() = precomputed.bodyForces_; if (settings.enableCollision_) { SoftBodyGrid3D::applyCollisionForces(input, settings, s, nextStorage.forces_); } //2. stiffness matrix SoftBodyGrid3D::computeStiffnessMatrix(input, s, settings, nextStorage.stiffness_, nextStorage.forces_); //3. Newmark time integration / Solve is ommitted CommonKernels::newmarkTimeIntegration( nextStorage.stiffness_, nextStorage.forces_, precomputed.lumpedMass_, prevState.displacements_, prevState.velocities_, settings.dampingAlpha_, settings.dampingBeta_, settings.timestep_, nextStorage.newmarkA_, nextStorage.newmarkB_, settings.newmarkTheta_); #if MAKE_NEWMARK_SYMMETRIC==1 nextStorage.newmarkA_ = DebugUtils::makeSymmetric(nextStorage.newmarkA_); #endif } Vector3X adjNextDisplacement = adjNextState.adjDisplacements_.deepClone(); //adj4. Postprocessing if (costFunctionRequiredInput & int(ICostFunction::RequiredInput::GridDisplacements)) { //adjoint of displacement diffusion over the whole grid adjointDiffuseDisplacements(input, adjNextState.adjGridDisplacements_, adjNextDisplacement); } #if ADJOINT_VERBOSE_LOGGING==1 std::vector<real3> adjNextDisplacementHost(adjNextDisplacement.size()); adjNextDisplacement.copyToHost(&adjNextDisplacementHost[0]); cinder::app::console() << "adjNextDisplacement:\n"; for (int i = 0; i < adjNextDisplacementHost.size(); ++i) { real3 v = adjNextDisplacementHost[i]; tinyformat::format(cinder::app::console(), " [%3d](%7.5f, %7.5f, %7.5f)\n", i, v.x, v.y, v.z); } #endif //adj3. Solve CI_LOG_D("Norm of adjNextDisplacement: " << static_cast<real>(adjNextDisplacement.norm())); CommonKernels::adjointNewmarkComputeVelocity( adjNextState.adjVelocities_, adjNextDisplacement, adjPrevStateOut.adjVelocities_, adjPrevStateOut.adjDisplacements_, settings.timestep_, settings.newmarkTheta_); adjStorage.adjNewmarkA_.setZero(); adjStorage.adjNewmarkB_.setZero(); CI_LOG_D("Norm of adjNextDisplacement: " << static_cast<real>(adjNextDisplacement.norm())); bool converged = CommonKernels::adjointSolveCG( nextStorage.newmarkA_, nextStorage.newmarkB_, nextState.displacements_, adjNextDisplacement, adjStorage.adjNewmarkA_, adjStorage.adjNewmarkB_, settings.solverIterations_*2, settings.solverTolerance_); //adjoint solve needs longer (no good initial guess) CI_LOG_D("Norm of adjNewmarkA: " << static_cast<real>(adjStorage.adjNewmarkA_.norm())); CI_LOG_D("Norm of adjNewmarkB: " << static_cast<real>(adjStorage.adjNewmarkB_.norm())); #if ADJOINT_IGNORE_DIVERGENCE==1 if (!converged) { CI_LOG_E("adjoint CG not converged, force gradients to zero"); //This may be a bit too harsh, since all gradients are lost, // but I don't have a better idea adjStorage.adjNewmarkA_.setZero(); adjStorage.adjNewmarkB_.setZero(); } #endif adjStorage.adjStiffness_.setZero(); adjStorage.adjMass_.setZero(); adjStorage.adjForces_.setZero(); DeviceScalar adjMassDamping = DeviceScalar::Zero(); DeviceScalar adjStiffnessDamping = DeviceScalar::Zero(); CommonKernels::adjointNewmarkTimeIntegration( nextStorage.stiffness_, nextStorage.forces_, precomputed.lumpedMass_, prevState.displacements_, prevState.velocities_, settings.dampingAlpha_, settings.dampingBeta_, adjStorage.adjNewmarkA_, adjStorage.adjNewmarkB_, adjStorage.adjStiffness_, adjStorage.adjForces_, adjStorage.adjMass_, adjPrevStateOut.adjDisplacements_, adjPrevStateOut.adjVelocities_, adjMassDamping, adjStiffnessDamping, settings.timestep_, settings.newmarkTheta_); adjVariablesOut.adjMassDamping_ += static_cast<real>(adjMassDamping); adjVariablesOut.adjStiffnessDamping_ += static_cast<real>(adjStiffnessDamping); CI_LOG_D("Norm of adjPrevDisplacement: " << static_cast<real>(adjPrevStateOut.adjDisplacements_.norm())); CI_LOG_D("Norm of adjPrevVelocities: " << static_cast<real>(adjPrevStateOut.adjVelocities_.norm())); //adj2. Stiffness matrix adjStorage.adjLambda_.setZero(); adjStorage.adjMu_.setZero(); adjointComputeStiffnessMatrix( input, prevState.displacements_, settings, adjStorage.adjStiffness_, adjStorage.adjForces_, adjPrevStateOut.adjDisplacements_, adjStorage.adjLambda_, adjStorage.adjMu_); real adjLambda = static_cast<real>(adjStorage.adjLambda_); real adjMu = static_cast<real>(adjStorage.adjMu_); adjointComputeMaterialParameters( settings.youngsModulus_, settings.poissonsRatio_, adjMu, adjLambda, adjVariablesOut.adjYoungsModulus_, adjVariablesOut.adjPoissonRatio_); //adj1. Collision Forces if (settings.enableCollision_) { adjointApplyCollisionForces(input, settings, prevState.displacements_, prevState.velocities_, adjStorage.adjForces_, adjPrevStateOut.adjDisplacements_, adjPrevStateOut.adjVelocities_, adjVariablesOut.adjGroundPlane_); } //Adjoint of body forces and mass adjVariablesOut.adjMass_ += static_cast<real>( precomputed.lumpedMass_.dot(adjStorage.adjMass_)); real3 adjGravityTmp = static_cast<real3>( precomputed.bodyForces_.cwiseMul(adjStorage.adjForces_) .reduction<cuMat::functor::Sum<real3>, cuMat::Axis::All>(cuMat::functor::Sum<real3>(), make_real3(0, 0, 0))); adjVariablesOut.adjGravity_ += make_double3(-adjGravityTmp.x, -adjGravityTmp.y, -adjGravityTmp.z); //HACK: the gradient points into the wrong direction. Hence I added a minus here #if 0 //Scale gradient //IMPORTANT: With the fix to adjointNewmarkComputeVelocity, I don't need that anymore! real dispNorm = static_cast<real>(adjPrevStateOut.adjDisplacements_.norm()); double scale = dispNorm < 1e-10 ? 1 : 1.0 / dispNorm; adjPrevStateOut.scale_ = scale * adjNextState.scale_; CI_LOG_I("Current scale: " << scale << ", total scale: " << adjPrevStateOut.scale_); adjPrevStateOut.adjDisplacements_ *= make_real3(static_cast<real>(scale)); adjPrevStateOut.adjVelocities_ *= make_real3(static_cast<real>(scale)); adjVariablesOut *= scale; #endif } real AdjointSolver::computeGradient( const Input& input, const SoftBodySimulation3D::Settings& settings_, const InputVariables& variables, CostFunctionPtr costFunction, AdjointVariables& adjointVariablesOut, bool memorySaving, BackgroundWorker2* worker, Statistics* statistics) { adjointVariablesOut = { 0 }; //update settings with the current state SoftBodySimulation3D::Settings settings = settings_; if (variables.optimizeGravity_) settings.gravity_ = variables.currentGravity_; if (variables.optimizeYoungsModulus_) settings.youngsModulus_ = variables.currentYoungsModulus_; if (variables.optimizePoissonRatio_) settings.poissonsRatio_ = variables.currentPoissonRatio_; if (variables.optimizeMass_) settings.mass_ = variables.currentMass_; if (variables.optimizeMassDamping_) settings.dampingAlpha_ = variables.currentMassDamping_; if (variables.optimizeStiffnessDamping_) settings.dampingBeta_ = variables.currentStiffnessDamping_; if (variables.optimizeInitialLinearVelocity_) settings.initialLinearVelocity_ = variables.currentInitialLinearVelocity_; if (variables.optimizeInitialAngularVelocity_) settings.initialAngularVelocity_ = variables.currentInitialAngularVelocity_; if (variables.optimizeGroundPlane_) settings.groundPlane_ = variables.currentGroundPlane_; settings.validate(); if (statistics) { statistics->numActiveNodes = input.numActiveNodes_; statistics->numEmptyNodes = input.grid_->getSize().prod() - input.numActiveNodes_; statistics->numActiveElements = input.numActiveCells_; } CudaTimer timer; //query number of timesteps int numSteps = costFunction->getNumSteps(); CI_LOG_D(numSteps << " timesteps have to be computed"); //first check if enough memory is available size_t freeMemory = cuMat::Context::getFreeDeviceMemory(); size_t totalMemory = cuMat::Context::getTotalDeviceMemory(); size_t requiredMemory = memorySaving ? (numSteps + 1) * (input.numActiveNodes_ * 2 * sizeof(real3)) + (input.numActiveNodes_ * 6 * sizeof(real3)) + 2 * input.sparsityPattern_.nnz * sizeof(real3x3) + (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::GridDisplacements) ? 2 * input.grid_->getSize().prod() * sizeof(real3) : 0) : (numSteps + 1) * (input.numActiveNodes_ * 6 * sizeof(real3) + 2 * input.sparsityPattern_.nnz * sizeof(real3x3) + (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::GridDisplacements) ? 2 * input.grid_->getSize().prod() * sizeof(real3) : 0)); if (requiredMemory > freeMemory) { CI_LOG_E("Not enough memory! Free memory: " << (freeMemory >> 20) << "MB, required memory: " << (requiredMemory >> 20) << "MB (total: " << (totalMemory >> 20) << "MB)"); if (!memorySaving) CI_LOG_E("consider enabling memory saving mode"); return 0; } CI_LOG_I("Enough memory available. Free memory: " << (freeMemory >> 20) << "MB, required memory: " << (requiredMemory >> 20) << "MB (total: " << (totalMemory >> 20) << "MB)"); //allocations std::vector<ForwardState> forwardStates(numSteps + 1); std::vector<ForwardStorage> forwardStorages(memorySaving ? 1 : numSteps + 1); std::vector<BackwardState> backwardStates(memorySaving ? 2 : numSteps + 1); if (memorySaving) { for (int t = 0; t <= numSteps; ++t) forwardStates[t] = allocateForwardState(input); forwardStorages[0] = allocateForwardStorage(input); backwardStates[0] = allocateBackwardState(input, costFunction->getRequiredInput()); backwardStates[1] = allocateBackwardState(input, costFunction->getRequiredInput()); } else { for (int t = 0; t <= numSteps; ++t) { forwardStates[t] = allocateForwardState(input); if (t > 0) forwardStorages[t] = allocateForwardStorage(input); backwardStates[t] = allocateBackwardState(input, costFunction->getRequiredInput()); } } BackwardStorage adjStorage = allocateBackwardStorage(input); //precomputations CI_LOG_D("precompute values"); PrecomputedValues precomputed = allocatePrecomputedValues(input); { SoftBodySimulation3D::Settings settings2 = settings; settings2.gravity_ = make_real3(1, 1, 1); settings2.mass_ = 1; adjStorage.unaryLumpedMass_.setZero(); adjStorage.unaryBodyForces_.setZero(); SoftBodyGrid3D::computeMassMatrix(input, settings2, adjStorage.unaryLumpedMass_); SoftBodyGrid3D::computeBodyForces(input, settings2, adjStorage.unaryBodyForces_); } precomputed.lumpedMass_ = settings.mass_ * adjStorage.unaryLumpedMass_; precomputed.bodyForces_ = settings.gravity_ * adjStorage.unaryBodyForces_; SoftBodyGrid3D::computeInitialVelocity(input, settings, precomputed.initialVelocity_); //apply initial velocity forwardStates[0].velocities_.inplace() = precomputed.initialVelocity_; //forward CI_LOG_D("forward steps"); for (int t=1; t<=numSteps; ++t) { if (worker && worker->isInterrupted()) return 0; CI_LOG_D("Timestep " << t); timer.start(); bool converged = performForwardStep( forwardStates[t - 1], forwardStates[t], forwardStorages[memorySaving ? 0 : t], input, precomputed, settings, costFunction->getRequiredInput(), memorySaving); timer.stop(); if (statistics) statistics->forwardTime.push_back(timer.duration()); #if FORWARD_BREAK_ON_DIVERGENCE>0 if (!converged) { CI_LOG_E("Linear solver in the forward step did not converge, stop iteration and evaluate gradient only until timestep " << (t - 1)); std::cout << "Linear solver in the forward step did not converge, stop iteration and evaluate gradient only until timestep " << (t - 1) << std::endl; numSteps = t - 1; break; } #endif } //cost function real finalCost = 0; CostFunctionTmp costFunctionTmp = allocateCostFunctionTmp(input, costFunction); if (!memorySaving) { CI_LOG_D("evaluate cost function"); for (int t = 1; t <= numSteps; ++t) { if (worker && worker->isInterrupted()) return 0; timer.start(); finalCost += evaluateCostFunction( costFunction, t - 1, costFunctionTmp, forwardStates[t], backwardStates[t], input); timer.stop(); if (statistics) statistics->costTime.push_back(timer.duration()); } } //backward/adjoint CI_LOG_D("adjoint steps"); for (int t=numSteps; t>0; --t) { if (worker && worker->isInterrupted()) return 0; CI_LOG_I("Timestep " << t); if (!memorySaving) { timer.start(); performBackwardStep( forwardStates[t - 1], forwardStates[t], backwardStates[t], backwardStates[t - 1], adjointVariablesOut, input, precomputed, forwardStorages[t], adjStorage, settings, costFunction->getRequiredInput(), false); timer.stop(); if (statistics) statistics->backwardTime.push_back(timer.duration()); } else { const int idxCurrent = t % 2; const int idxNext = 1 - idxCurrent; backwardStates[idxNext].reset(); timer.start(); finalCost += evaluateCostFunction( costFunction, t - 1, costFunctionTmp, forwardStates[t], backwardStates[idxCurrent], input); timer.stop(); if (statistics) statistics->costTime.push_back(timer.duration()); timer.start(); performBackwardStep( forwardStates[t - 1], forwardStates[t], backwardStates[idxCurrent], backwardStates[idxNext], adjointVariablesOut, input, precomputed, forwardStorages[0], adjStorage, settings, costFunction->getRequiredInput(), true); timer.stop(); if (statistics) statistics->backwardTime.push_back(timer.duration()); } } adjointVariablesOut *= 1.0 / backwardStates[0].scale_; //adjoint of precomputations (initial velocities) if (variables.optimizeInitialLinearVelocity_ || variables.optimizeInitialAngularVelocity_) adjointComputeInitialVelocity(input, settings.initialLinearVelocity_, settings.initialAngularVelocity_, backwardStates[0].adjVelocities_, adjointVariablesOut.adjInitialLinearVelocity, adjointVariablesOut.adjInitialAngularVelocity); //done return finalCost; } real AdjointSolver::computeGradientFiniteDifferences( const Input& input, const SoftBodySimulation3D::Settings& settings_, const InputVariables& variables, CostFunctionPtr costFunction, AdjointVariables& adjointVariablesOut, real finiteDifferencesDelta, BackgroundWorker2* worker, Statistics* statistics) { adjointVariablesOut = { 0 }; //update settings with the current state SoftBodySimulation3D::Settings settings = settings_; if (variables.optimizeGravity_) settings.gravity_ = variables.currentGravity_; if (variables.optimizeYoungsModulus_) settings.youngsModulus_ = variables.currentYoungsModulus_; if (variables.optimizePoissonRatio_) settings.poissonsRatio_ = variables.currentPoissonRatio_; if (variables.optimizeMass_) settings.mass_ = variables.currentMass_; if (variables.optimizeMassDamping_) settings.dampingAlpha_ = variables.currentMassDamping_; if (variables.optimizeStiffnessDamping_) settings.dampingBeta_ = variables.currentStiffnessDamping_; if (variables.optimizeInitialLinearVelocity_) settings.initialLinearVelocity_ = variables.currentInitialLinearVelocity_; if (variables.optimizeInitialAngularVelocity_) settings.initialAngularVelocity_ = variables.currentInitialAngularVelocity_; if (variables.optimizeGroundPlane_) settings.groundPlane_ = variables.currentGroundPlane_; settings.validate(); if (statistics) { statistics->numActiveNodes = input.numActiveNodes_; statistics->numEmptyNodes = input.grid_->getSize().prod() - input.numActiveNodes_; statistics->numActiveElements = input.numActiveCells_; } CudaTimer timer; //query number of timesteps int numSteps = costFunction->getNumSteps(); CI_LOG_D(numSteps << " timesteps have to be computed"); //allocations ForwardState forwardStates[2]; forwardStates[0] = allocateForwardState(input); forwardStates[1] = allocateForwardState(input); ForwardStorage forwardStorage = allocateForwardStorage(input); PrecomputedValues precomputed = allocatePrecomputedValues(input); BackwardState backwardState = allocateBackwardState(input, costFunction->getRequiredInput()); //single forward evaluation auto evaluate = [&](SoftBodySimulation3D::Settings settings2, Statistics* statistics2) -> real { settings2.validate(); //precomputations + initial computations precomputed.lumpedMass_.setZero(); precomputed.bodyForces_.setZero(); precomputed.initialVelocity_.setZero(); for (int i = 0; i < 2; ++i) { forwardStates[i].displacements_.setZero(); forwardStates[i].gridDisplacements_.setZero(); forwardStates[i].velocities_.setZero(); } SoftBodyGrid3D::computeMassMatrix(input, settings2, precomputed.lumpedMass_); SoftBodyGrid3D::computeBodyForces(input, settings2, precomputed.bodyForces_); SoftBodyGrid3D::computeInitialVelocity(input, settings2, precomputed.initialVelocity_); forwardStates[0].velocities_.inplace() = precomputed.initialVelocity_; //forward + cost function real finalCost = 0; CostFunctionTmp costFunctionTmp = allocateCostFunctionTmp(input, costFunction); CI_LOG_D("forward steps + cost function"); for (int t = 1; t <= numSteps; ++t) { if (worker && worker->isInterrupted()) return 0; CI_LOG_D("Timestep " << t); timer.start(); bool converged = performForwardStep( forwardStates[(t-1)%2], forwardStates[t%2], forwardStorage, input, precomputed, settings2, costFunction->getRequiredInput(), true); timer.stop(); if (statistics2) statistics2->forwardTime.push_back(timer.duration()); timer.start(); finalCost += evaluateCostFunction( costFunction, t - 1, costFunctionTmp, forwardStates[t % 2], backwardState, input); timer.stop(); if (statistics2) statistics2->costTime.push_back(timer.duration()); } return finalCost; }; //evaluate current setting CI_LOG_D("main evaluation"); real finalCost = evaluate(settings, statistics); //evaluate once for every parameter #define EVALUATE_FD(optimVar, settingsVar, currentVar, adjVar) \ if (variables.optimVar) { \ CI_LOG_D("evaluate " CUMAT_STR(settingsVar)); \ SoftBodySimulation3D::Settings settings2 = settings; \ settings2.settingsVar = variables.currentVar + settings_.settingsVar * finiteDifferencesDelta; \ real cost = evaluate(settings2, nullptr); \ adjointVariablesOut. adjVar = (cost - finalCost) / (settings_.settingsVar * finiteDifferencesDelta); \ std::cout << "Evaluate " << #optimVar << \ ", x1=" << settings.settingsVar << ", x2=" << settings2.settingsVar << \ ", c1=" << finalCost << ", c2=" << cost << \ " -> grad=" << adjointVariablesOut.adjVar << std::endl; \ } EVALUATE_FD(optimizeGravity_, gravity_.x, currentGravity_.x, adjGravity_.x); EVALUATE_FD(optimizeGravity_, gravity_.y, currentGravity_.y, adjGravity_.y); EVALUATE_FD(optimizeGravity_, gravity_.z, currentGravity_.z, adjGravity_.z); EVALUATE_FD(optimizeMassDamping_, dampingAlpha_, currentMassDamping_, adjMassDamping_); EVALUATE_FD(optimizeStiffnessDamping_, dampingBeta_, currentStiffnessDamping_, adjStiffnessDamping_); EVALUATE_FD(optimizePoissonRatio_, poissonsRatio_, currentPoissonRatio_, adjPoissonRatio_); //EVALUATE_FD(optimizeYoungsModulus_, youngsModulus_, currentYoungsModulus_, adjYoungsModulus_); if (variables.optimizeYoungsModulus_) { CI_LOG_D("evaluate " CUMAT_STR(youngsModulus_)); SoftBodySimulation3D::Settings settings2 = settings; settings2.youngsModulus_ = variables.currentYoungsModulus_ + settings_.youngsModulus_ * finiteDifferencesDelta; real cost = evaluate(settings2, nullptr); adjointVariablesOut.adjYoungsModulus_ = (cost - finalCost) / (settings_.youngsModulus_ * finiteDifferencesDelta); std::cout << "Evaluate " << "optimizeYoungsModulus_" << ", x1=" << settings.youngsModulus_ << ", x2=" << settings2.youngsModulus_ << ", c1=" << finalCost << ", c2=" << cost << " -> grad=" << adjointVariablesOut.adjYoungsModulus_ << std::endl; }; #undef EVALUATE_FD //done return finalCost; } void AdjointSolver::adjointComputeMaterialParameters( double k, double p, double adjMu, double adjLambda, double& adjYoungOut, double& adjPoissonOut) { adjYoungOut += adjMu * (1 / (2.*(1 + p))) + adjLambda * (p / ((1 - 2 * p)*(1 + p))); adjPoissonOut += adjMu * (-k / (2.*ar3d::utils::square(1 + p))) + adjLambda * (-((k*p) / ((1 - 2 * p)*ar3d::utils::square(1 + p))) + k / ((1 - 2 * p)*(1 + p)) + (2 * k*p) / (ar3d::utils::square(1 - 2 * p)*(1 + p))); } AdjointSolver::Settings::Settings() : numIterations_(20), optimizer_(GRADIENT_DESCENT), memorySaving_(false), normalizeUnits_(true) { gradientDescentSettings_.epsilon_ = "1e-7"; gradientDescentSettings_.linearStepsize_ = "0.001"; gradientDescentSettings_.maxStepsize_ = ""; gradientDescentSettings_.minStepsize_ = ""; rpropSettings_.epsilon_ = "1e-7"; rpropSettings_.initialStepsize_ = "0.001"; lbfgsSettings_.epsilon_ = "1e-7"; lbfgsSettings_.past_ = 0; lbfgsSettings_.delta_ = ""; lbfgsSettings_.lineSearchAlg_ = LbfgsSettings::Wolfe; lbfgsSettings_.linesearchMaxTrials_ = 2; lbfgsSettings_.linesearchMinStep_ = ""; lbfgsSettings_.linesearchMaxStep_ = ""; } AdjointSolver::GUI::GUI() { } void AdjointSolver::GUI::initParams(cinder::params::InterfaceGlRef params, const std::string& group, const bool noInitialValues) { params_ = params; static const std::string t = "visible=true"; static const std::string f = "visible=false"; //GENERAL PARAMETERS params->addParam("AdjointSolver-NumIterations", &settings_.numIterations_) .group(group).label("Num Iterations").min(1); params->addParam("AdjointSolver-MemorySaving", &settings_.memorySaving_) .group(group).label("Memory Saving") .optionsStr("help='False: save everything from the forward pass (fast, memory intense). True: only save minimal information, recompute more (slower, less memory)'"); params->addParam("AdjointSolver-NormalizeUnits", &settings_.normalizeUnits_) .group(group).label("Normalize Units"); std::vector<std::string> optimizerNames = { "Gradient Descent", "Rprop", "LBFGS" }; params->addParam("AdjointSolver-Optimizer", optimizerNames, reinterpret_cast<int*>(&settings_.optimizer_)) .group(group).label("Optimizer").updateFn([params, this]() { bool v = settings_.optimizer_ == Settings::GRADIENT_DESCENT; params->setOptions("AdjointSolver-GD-Epsilon", v ? t : f); params->setOptions("AdjointSolver-GD-LinearStepsize", v ? t : f); params->setOptions("AdjointSolver-GD-MaxStepsize", v ? t : f); params->setOptions("AdjointSolver-GD-MinStepsize", v ? t : f); v = settings_.optimizer_ == Settings::RPROP; params->setOptions("AdjointSolver-Rprop-Epsilon", v ? t : f); params->setOptions("AdjointSolver-Rprop-InitialStepsize", v ? t : f); v = settings_.optimizer_ == Settings::LBFGS; params->setOptions("AdjointSolver-LBFGS-Epsilon", v ? t : f); params->setOptions("AdjointSolver-LBFGS-Past", v ? t : f); params->setOptions("AdjointSolver-LBFGS-Delta", v ? t : f); params->setOptions("AdjointSolver-LBFGS-Algorithm", v ? t : f); params->setOptions("AdjointSolver-LBFGS-LinesearchMaxTrials", v ? t : f); params->setOptions("AdjointSolver-LBFGS-LinesearchMinStep", v ? t : f); params->setOptions("AdjointSolver-LBFGS-LinesearchMaxStep", v ? t : f); params->setOptions("AdjointSolver-LBFGS-LinesearchTol", v ? t : f); }); //OPTIMIZED VARIABLES params->addParam("AdjointSolver-OptimizeGravity", &settings_.variables_.optimizeGravity_) .group(group).label("Optimize Gravity").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeGravity_ = v; if (!noInitialValues) { params->setOptions("AdjointSolver-InitialGravityX", v ? t : f); params->setOptions("AdjointSolver-InitialGravityY", v ? t : f); params->setOptions("AdjointSolver-InitialGravityZ", v ? t : f); } }, [this]() { return settings_.variables_.optimizeGravity_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialGravityX", &settings_.variables_.currentGravity_.x) .group(group).label("Initial Gravity X").step(0.01f).visible(settings_.variables_.optimizeGravity_); params->addParam("AdjointSolver-InitialGravityY", &settings_.variables_.currentGravity_.y) .group(group).label("Initial Gravity Y").step(0.01f).visible(settings_.variables_.optimizeGravity_); params->addParam("AdjointSolver-InitialGravityZ", &settings_.variables_.currentGravity_.z) .group(group).label("Initial Gravity Z").step(0.01f).visible(settings_.variables_.optimizeGravity_); } params->addParam("AdjointSolver-OptimizeYoungsModulus", &settings_.variables_.optimizeYoungsModulus_) .group(group).label("Optimize Young's Modulus").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeYoungsModulus_ = v; if (!noInitialValues) params->setOptions("AdjointSolver-InitialYoungsModulus", v ? t : f); }, [this]() { return settings_.variables_.optimizeYoungsModulus_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialYoungsModulus", &settings_.variables_.currentYoungsModulus_) .group(group).label("Initial Young's Modulus").step(0.01f).min(0).visible(settings_.variables_.optimizeYoungsModulus_); } params->addParam("AdjointSolver-OptimizePoissonRatio", &settings_.variables_.optimizePoissonRatio_) .group(group).label("Optimize Poisson Ratio").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizePoissonRatio_ = v; if (!noInitialValues) params->setOptions("AdjointSolver-InitialPoissonRatio", v ? t : f); }, [this]() { return settings_.variables_.optimizePoissonRatio_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialPoissonRatio", &settings_.variables_.currentPoissonRatio_) .group(group).label("Initial Poisson Ratio").step(0.001f).min(0.1f).max(0.49f).visible(settings_.variables_.optimizePoissonRatio_); } params->addParam("AdjointSolver-OptimizeMass", &settings_.variables_.optimizeMass_) .group(group).label("Optimize Mass").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeMass_ = v; if (!noInitialValues) params->setOptions("AdjointSolver-InitialMass", v ? t : f); }, [this]() { return settings_.variables_.optimizeMass_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialMass", &settings_.variables_.currentMass_) .group(group).label("Initial Mass").step(0.01f).min(0.01f).visible(settings_.variables_.optimizeMass_); } params->addParam("AdjointSolver-OptimizeMassDamping", &settings_.variables_.optimizeMassDamping_) .group(group).label("Optimize Mass Damping").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeMassDamping_ = v; if (!noInitialValues) params->setOptions("AdjointSolver-InitialMassDamping", v ? t : f); }, [this]() { return settings_.variables_.optimizeMassDamping_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialMassDamping", &settings_.variables_.currentMassDamping_) .group(group).label("Initial Mass Damping").step(0.001f).min(0.0f).visible(settings_.variables_.optimizeMassDamping_); } params->addParam("AdjointSolver-OptimizeStiffnessDamping", &settings_.variables_.optimizeStiffnessDamping_) .group(group).label("Optimize Stiffness Damping").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeStiffnessDamping_ = v; if (!noInitialValues) params->setOptions("AdjointSolver-InitialStiffnessDamping", v ? t : f); }, [this]() { return settings_.variables_.optimizeStiffnessDamping_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialStiffnessDamping", &settings_.variables_.currentStiffnessDamping_) .group(group).label("Initial Stiffness Damping").step(0.001f).min(0.0f).visible(settings_.variables_.optimizeStiffnessDamping_); } params->addParam("AdjointSolver-OptimizeInitialLinearVelocity", &settings_.variables_.optimizeInitialLinearVelocity_) .group(group).label("Optimize Initial Linear Velocity").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeInitialLinearVelocity_ = v; if (!noInitialValues) { params->setOptions("AdjointSolver-InitialLinearVelocityX", v ? t : f); params->setOptions("AdjointSolver-InitialLinearVelocityY", v ? t : f); params->setOptions("AdjointSolver-InitialLinearVelocityZ", v ? t : f); } }, [this]() { return settings_.variables_.optimizeInitialLinearVelocity_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialLinearVelocityX", &settings_.variables_.currentInitialLinearVelocity_.x) .group(group).label("Initial Linear Velocity X").step(0.01f).visible(settings_.variables_.optimizeInitialLinearVelocity_); params->addParam("AdjointSolver-InitialLinearVelocityY", &settings_.variables_.currentInitialLinearVelocity_.y) .group(group).label("Initial Linear Velocity Y").step(0.01f).visible(settings_.variables_.optimizeInitialLinearVelocity_); params->addParam("AdjointSolver-InitialLinearVelocityZ", &settings_.variables_.currentInitialLinearVelocity_.z) .group(group).label("Initial Linear Velocity Z").step(0.01f).visible(settings_.variables_.optimizeInitialLinearVelocity_); } params->addParam("AdjointSolver-OptimizeInitialAngularVelocity", &settings_.variables_.optimizeInitialAngularVelocity_) .group(group).label("Optimize Initial Angular Velocity").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeInitialAngularVelocity_ = v; if (!noInitialValues) { params->setOptions("AdjointSolver-InitialAngularVelocityX", v ? t : f); params->setOptions("AdjointSolver-InitialAngularVelocityY", v ? t : f); params->setOptions("AdjointSolver-InitialAngularVelocityZ", v ? t : f); } }, [this]() { return settings_.variables_.optimizeInitialAngularVelocity_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialAngularVelocityX", &settings_.variables_.currentInitialAngularVelocity_.x) .group(group).label("Initial Angular Velocity X").step(0.01f).visible(settings_.variables_.optimizeInitialAngularVelocity_); params->addParam("AdjointSolver-InitialAngularVelocityY", &settings_.variables_.currentInitialAngularVelocity_.y) .group(group).label("Initial Angular Velocity Y").step(0.01f).visible(settings_.variables_.optimizeInitialAngularVelocity_); params->addParam("AdjointSolver-InitialAngularVelocityZ", &settings_.variables_.currentInitialAngularVelocity_.z) .group(group).label("Initial Angular Velocity Z").step(0.01f).visible(settings_.variables_.optimizeInitialAngularVelocity_); } params->addParam("AdjointSolver-OptimizeGroundPlane", &settings_.variables_.optimizeGroundPlane_) .group(group).label("Optimize Ground Plane").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeGroundPlane_ = v; if (!noInitialValues) { params->setOptions("AdjointSolver-InitialGroundPlaneAngle", v ? t : f); params->setOptions("AdjointSolver-InitialGroundPlaneHeight", v ? t : f); } }, [this]() { return settings_.variables_.optimizeGroundPlane_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialGroundPlaneAngle", reinterpret_cast<glm::tvec3<real, glm::highp>*>(&settings_.variables_.currentGroundPlane_.x)) .group(group).label("Initial Ground Plane Angle").visible(settings_.variables_.optimizeGroundPlane_); params->addParam("AdjointSolver-InitialGroundPlaneHeight", &settings_.variables_.currentGroundPlane_.w) .group(group).label("Initial Ground Plane Height").step(0.01f).visible(settings_.variables_.optimizeGroundPlane_); } //OPTIMIZER SETTINGS params->addParam("AdjointSolver-GD-Epsilon", &settings_.gradientDescentSettings_.epsilon_) .group(group).label("GD: Epsilon").visible(settings_.optimizer_ == Settings::GRADIENT_DESCENT) .optionsStr("help='Terminates if the norm of the gradient falls below this epsilon. Leave empty for default value.'"); params->addParam("AdjointSolver-GD-LinearStepsize", &settings_.gradientDescentSettings_.linearStepsize_) .group(group).label("GD: Initial").visible(settings_.optimizer_ == Settings::GRADIENT_DESCENT) .optionsStr("help='Initial step size. Leave empty for default value.'"); params->addParam("AdjointSolver-GD-MaxStepsize", &settings_.gradientDescentSettings_.maxStepsize_) .group(group).label("GD: Max Stepsize").visible(settings_.optimizer_ == Settings::GRADIENT_DESCENT) .optionsStr("help='Maximal step size. If empty, no restriction is applied'"); params->addParam("AdjointSolver-GD-MinStepsize", &settings_.gradientDescentSettings_.minStepsize_) .group(group).label("GD: Min Stepsize").visible(settings_.optimizer_ == Settings::GRADIENT_DESCENT) .optionsStr("help='Minimal step size. If empty, no restriction is applied'"); params->addParam("AdjointSolver-Rprop-Epsilon", &settings_.rpropSettings_.epsilon_) .group(group).label("Rprop: Epsilon").visible(settings_.optimizer_ == Settings::RPROP) .optionsStr("help='Terminates if the norm of the gradient falls below this epsilon. Leave empty for default value.'"); params->addParam("AdjointSolver-Rprop-InitialStepsize", &settings_.rpropSettings_.initialStepsize_) .group(group).label("Rprop: Initial").visible(settings_.optimizer_ == Settings::RPROP) .optionsStr("help='Initial step size. Leave empty for default value.'"); params->addParam("AdjointSolver-LBFGS-Epsilon", &settings_.lbfgsSettings_.epsilon_) .group(group).label("LBFGS: Epsilon").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Terminates if the norm of the gradient falls below this epsilon. Leave empty for default value.'"); params->addParam("AdjointSolver-LBFGS-Past", &settings_.lbfgsSettings_.past_).min(0) .group(group).label("LBFGS: Past Distance").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Number of steps into the past for tests if the cost function reached a plateau. Set to zero to disable.'"); params->addParam("AdjointSolver-LBFGS-Delta", &settings_.lbfgsSettings_.delta_) .group(group).label("LBFGS: Past Delta").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Tolerance for plateau termination criterion'"); std::vector<std::string> lbfgsLinesearchAlgs = { "Armijo", "Wolfe", "StrongWolfe" }; params->addParam("AdjointSolver-LBFGS-Algorithm", lbfgsLinesearchAlgs, reinterpret_cast<int*>(&settings_.lbfgsSettings_.lineSearchAlg_)) .group(group).label("LBFGS: LS Algorithm").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='The linesearch algorithm used to find the best step size'"); params->addParam("AdjointSolver-LBFGS-LinesearchMaxTrials", &settings_.lbfgsSettings_.linesearchMaxTrials_) .group(group).label("LBFGS: LS max trials").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='The maximal number of trials in the line search'"); params->addParam("AdjointSolver-LBFGS-LinesearchMinStep", &settings_.lbfgsSettings_.linesearchMinStep_) .group(group).label("LBFGS: LS min step").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Minimal step size in the line search step. Leave empty for default value.'"); params->addParam("AdjointSolver-LBFGS-LinesearchMaxStep", &settings_.lbfgsSettings_.linesearchMaxStep_) .group(group).label("LBFGS: LS max step").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Maximal step size in the line search step. Leave empty for default value.'"); params->addParam("AdjointSolver-LBFGS-LinesearchTol", &settings_.lbfgsSettings_.linesearchTol_) .group(group).label("LBFGS: LS tolerance").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Tolerance in Armijo condition. Leave empty for default value.'"); } void AdjointSolver::GUI::load(const cinder::JsonTree& parent, bool noInitialValues) { settings_.numIterations_ = parent.getValueForKey<int>("NumIterations"); settings_.optimizer_ = Settings::ToOptimizer(parent.getValueForKey("Optimizer")); if (parent.hasChild("MemorySaving")) settings_.memorySaving_ = parent.getValueForKey<bool>("MemorySaving"); if (parent.hasChild("NormalizeUnits")) settings_.normalizeUnits_ = parent.getValueForKey<bool>("NormalizeUnits"); const cinder::JsonTree& gd = parent.getChild("GradientDescent"); settings_.gradientDescentSettings_.epsilon_ = gd.getValueForKey("Epsilon"); settings_.gradientDescentSettings_.linearStepsize_ = gd.getValueForKey("LinearStepsize"); settings_.gradientDescentSettings_.maxStepsize_ = gd.getValueForKey("MaxStepsize"); settings_.gradientDescentSettings_.minStepsize_ = gd.getValueForKey("MinStepsize"); if (parent.hasChild("Rprop")) { const cinder::JsonTree& gd = parent.getChild("Rprop"); settings_.rpropSettings_.epsilon_ = gd.getValueForKey("Epsilon"); settings_.rpropSettings_.initialStepsize_ = gd.getValueForKey("InitialStepsize"); } const cinder::JsonTree& lbfgs = parent.getChild("LBFGS"); settings_.lbfgsSettings_.epsilon_ = lbfgs.getValueForKey("Epsilon"); settings_.lbfgsSettings_.past_ = lbfgs.getValueForKey<int>("Past"); settings_.lbfgsSettings_.delta_ = lbfgs.getValueForKey("Delta"); settings_.lbfgsSettings_.lineSearchAlg_ = Settings::LbfgsSettings::ToLineSearchAlg(lbfgs.getValueForKey("LineSearchAlg")); settings_.lbfgsSettings_.linesearchMaxTrials_ = lbfgs.getValueForKey<int>("LineSearchMaxTrials"); settings_.lbfgsSettings_.linesearchMinStep_ = lbfgs.getValueForKey("LineSearchMinStep"); settings_.lbfgsSettings_.linesearchMaxStep_ = lbfgs.getValueForKey("LineSearchMaxStep"); settings_.lbfgsSettings_.linesearchTol_ = lbfgs.getValueForKey("LineSearchTol"); const cinder::JsonTree& input = parent.getChild("InitialValues"); settings_.variables_.optimizeGravity_ = input.getValueForKey<bool>("OptimizeGravity"); if (!noInitialValues) { settings_.variables_.currentGravity_.x = input.getChild("InitialGravity").getValueAtIndex<real>(0); settings_.variables_.currentGravity_.y = input.getChild("InitialGravity").getValueAtIndex<real>(1); settings_.variables_.currentGravity_.z = input.getChild("InitialGravity").getValueAtIndex<real>(2); } settings_.variables_.optimizeYoungsModulus_ = input.getValueForKey<bool>("OptimizeYoungsModulus"); if (!noInitialValues) settings_.variables_.currentYoungsModulus_ = input.getValueForKey<real>("InitialYoungsModulus"); settings_.variables_.optimizePoissonRatio_ = input.getValueForKey<bool>("OptimizePoissonRatio"); if (!noInitialValues) settings_.variables_.currentPoissonRatio_ = input.getValueForKey<real>("InitialPoissonRatio"); settings_.variables_.optimizeMass_ = input.getValueForKey<bool>("OptimizeMass"); if (!noInitialValues) settings_.variables_.currentMass_ = input.getValueForKey<real>("InitialMass"); settings_.variables_.optimizeMassDamping_ = input.getValueForKey<bool>("OptimizeMassDamping"); if (!noInitialValues) settings_.variables_.currentMassDamping_ = input.getValueForKey<real>("InitialMassDamping"); settings_.variables_.optimizeStiffnessDamping_ = input.getValueForKey<bool>("OptimizeStiffnessDamping"); if (!noInitialValues) settings_.variables_.currentStiffnessDamping_ = input.getValueForKey<real>("InitialStiffnessDamping"); if (input.hasChild("OptimizeInitialLinearVelocity")) settings_.variables_.optimizeInitialLinearVelocity_ = input.getValueForKey<bool>("OptimizeInitialLinearVelocity"); if (!noInitialValues && input.hasChild("InitialLinearVelocity")) { settings_.variables_.currentInitialLinearVelocity_.x = input.getChild("InitialLinearVelocity").getValueAtIndex<real>(0); settings_.variables_.currentInitialLinearVelocity_.y = input.getChild("InitialLinearVelocity").getValueAtIndex<real>(1); settings_.variables_.currentInitialLinearVelocity_.z = input.getChild("InitialLinearVelocity").getValueAtIndex<real>(2); } if (input.hasChild("OptimizeInitialAngularVelocity")) settings_.variables_.optimizeInitialAngularVelocity_ = input.getValueForKey<bool>("OptimizeInitialAngularVelocity"); if (!noInitialValues && input.hasChild("InitialAngularVelocity")) { settings_.variables_.currentInitialAngularVelocity_.x = input.getChild("InitialAngularVelocity").getValueAtIndex<real>(0); settings_.variables_.currentInitialAngularVelocity_.y = input.getChild("InitialAngularVelocity").getValueAtIndex<real>(1); settings_.variables_.currentInitialAngularVelocity_.z = input.getChild("InitialAngularVelocity").getValueAtIndex<real>(2); } settings_.variables_.optimizeGroundPlane_ = input.getValueForKey<bool>("OptimizeGroundPlane"); if (!noInitialValues) { settings_.variables_.currentGroundPlane_.x = input.getChild("InitialGroundPlane").getValueAtIndex<real>(0); settings_.variables_.currentGroundPlane_.y = input.getChild("InitialGroundPlane").getValueAtIndex<real>(1); settings_.variables_.currentGroundPlane_.z = input.getChild("InitialGroundPlane").getValueAtIndex<real>(2); settings_.variables_.currentGroundPlane_.w = input.getChild("InitialGroundPlane").getValueAtIndex<real>(3); } if (params_) { static const std::string t = "visible=true"; static const std::string f = "visible=false"; bool v = settings_.optimizer_ == Settings::GRADIENT_DESCENT; params_->setOptions("AdjointSolver-GD-Epsilon", v ? t : f); params_->setOptions("AdjointSolver-GD-LinearStepsize", v ? t : f); params_->setOptions("AdjointSolver-GD-MaxStepsize", v ? t : f); params_->setOptions("AdjointSolver-GD-MinStepsize", v ? t : f); v = settings_.optimizer_ == Settings::LBFGS; params_->setOptions("AdjointSolver-LBFGS-Epsilon", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-Past", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-Delta", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-Algorithm", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-LinesearchMaxTrials", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-LinesearchMinStep", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-LinesearchMaxStep", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-LinesearchTol", v ? t : f); if (!noInitialValues) { params_->setOptions("AdjointSolver-InitialGravityX", settings_.variables_.optimizeGravity_ ? t : f); params_->setOptions("AdjointSolver-InitialGravityY", settings_.variables_.optimizeGravity_ ? t : f); params_->setOptions("AdjointSolver-InitialGravityZ", settings_.variables_.optimizeGravity_ ? t : f); params_->setOptions("AdjointSolver-InitialYoungsModulus", settings_.variables_.optimizeYoungsModulus_ ? t : f); params_->setOptions("AdjointSolver-InitialPoissonRatio", settings_.variables_.optimizePoissonRatio_ ? t : f); params_->setOptions("AdjointSolver-InitialMass", settings_.variables_.optimizeMass_ ? t : f); params_->setOptions("AdjointSolver-InitialMassDamping", settings_.variables_.optimizeMassDamping_ ? t : f); params_->setOptions("AdjointSolver-InitialStiffnessDamping", settings_.variables_.optimizeStiffnessDamping_ ? t : f); params_->setOptions("AdjointSolver-InitialLinearVelocityX", settings_.variables_.optimizeInitialLinearVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialLinearVelocityY", settings_.variables_.optimizeInitialLinearVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialLinearVelocityZ", settings_.variables_.optimizeInitialLinearVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialAngularVelocityX", settings_.variables_.optimizeInitialAngularVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialAngularVelocityY", settings_.variables_.optimizeInitialAngularVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialAngularVelocityZ", settings_.variables_.optimizeInitialAngularVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialGroundPlaneAngle", settings_.variables_.optimizeGroundPlane_ ? t : f); params_->setOptions("AdjointSolver-InitialGroundPlaneHeight", settings_.variables_.optimizeGroundPlane_ ? t : f); } } } void AdjointSolver::GUI::save(cinder::JsonTree& parent, bool noInitialValues) const { parent.addChild(cinder::JsonTree("NumIterations", settings_.numIterations_)); parent.addChild(cinder::JsonTree("Optimizer", Settings::FromOptimizer(settings_.optimizer_))); parent.addChild(cinder::JsonTree("MemorySaving", settings_.memorySaving_)); parent.addChild(cinder::JsonTree("NormalizeUnits", settings_.normalizeUnits_)); cinder::JsonTree gd = cinder::JsonTree::makeObject("GradientDescent"); gd.addChild(cinder::JsonTree("Epsilon", settings_.gradientDescentSettings_.epsilon_)); gd.addChild(cinder::JsonTree("LinearStepsize", settings_.gradientDescentSettings_.linearStepsize_)); gd.addChild(cinder::JsonTree("MaxStepsize", settings_.gradientDescentSettings_.maxStepsize_)); gd.addChild(cinder::JsonTree("MinStepsize", settings_.gradientDescentSettings_.minStepsize_)); parent.addChild(gd); cinder::JsonTree rprop = cinder::JsonTree::makeObject("Rprop"); rprop.addChild(cinder::JsonTree("Epsilon", settings_.rpropSettings_.epsilon_)); rprop.addChild(cinder::JsonTree("InitialStepsize", settings_.rpropSettings_.initialStepsize_)); parent.addChild(rprop); cinder::JsonTree lbfgs = cinder::JsonTree::makeObject("LBFGS"); lbfgs.addChild(cinder::JsonTree("Epsilon", settings_.lbfgsSettings_.epsilon_)); lbfgs.addChild(cinder::JsonTree("Past", settings_.lbfgsSettings_.past_)); lbfgs.addChild(cinder::JsonTree("Delta", settings_.lbfgsSettings_.delta_)); lbfgs.addChild(cinder::JsonTree("LineSearchAlg", Settings::LbfgsSettings::FromLineSearchAlg(settings_.lbfgsSettings_.lineSearchAlg_))); lbfgs.addChild(cinder::JsonTree("LineSearchMaxTrials", settings_.lbfgsSettings_.linesearchMaxTrials_)); lbfgs.addChild(cinder::JsonTree("LineSearchMinStep", settings_.lbfgsSettings_.linesearchMinStep_)); lbfgs.addChild(cinder::JsonTree("LineSearchMaxStep", settings_.lbfgsSettings_.linesearchMaxStep_)); lbfgs.addChild(cinder::JsonTree("LineSearchTol", settings_.lbfgsSettings_.linesearchTol_)); parent.addChild(lbfgs); cinder::JsonTree input = cinder::JsonTree::makeObject("InitialValues"); input.addChild(cinder::JsonTree("OptimizeGravity", settings_.variables_.optimizeGravity_)); if (!noInitialValues) input.addChild(cinder::JsonTree::makeArray("InitialGravity") .addChild(cinder::JsonTree("", settings_.variables_.currentGravity_.x)) .addChild(cinder::JsonTree("", settings_.variables_.currentGravity_.y)) .addChild(cinder::JsonTree("", settings_.variables_.currentGravity_.z))); input.addChild(cinder::JsonTree("OptimizeYoungsModulus", settings_.variables_.optimizeYoungsModulus_)); if (!noInitialValues) input.addChild(cinder::JsonTree("InitialYoungsModulus", settings_.variables_.currentYoungsModulus_)); input.addChild(cinder::JsonTree("OptimizePoissonRatio", settings_.variables_.optimizePoissonRatio_)); if (!noInitialValues) input.addChild(cinder::JsonTree("InitialPoissonRatio", settings_.variables_.currentPoissonRatio_)); input.addChild(cinder::JsonTree("OptimizeMass", settings_.variables_.optimizeMass_)); if (!noInitialValues) input.addChild(cinder::JsonTree("InitialMass", settings_.variables_.currentMass_)); input.addChild(cinder::JsonTree("OptimizeMassDamping", settings_.variables_.optimizeMassDamping_)); if (!noInitialValues) input.addChild(cinder::JsonTree("InitialMassDamping", settings_.variables_.currentMassDamping_)); input.addChild(cinder::JsonTree("OptimizeStiffnessDamping", settings_.variables_.optimizeStiffnessDamping_)); if (!noInitialValues) input.addChild(cinder::JsonTree("InitialStiffnessDamping", settings_.variables_.currentStiffnessDamping_)); input.addChild(cinder::JsonTree("OptimizeInitialLinearVelocity", settings_.variables_.optimizeInitialLinearVelocity_)); if (!noInitialValues) input.addChild(cinder::JsonTree::makeArray("InitialLinearVelocity") .addChild(cinder::JsonTree("", settings_.variables_.currentInitialLinearVelocity_.x)) .addChild(cinder::JsonTree("", settings_.variables_.currentInitialLinearVelocity_.y)) .addChild(cinder::JsonTree("", settings_.variables_.currentInitialLinearVelocity_.z))); input.addChild(cinder::JsonTree("OptimizeInitialAngularVelocity", settings_.variables_.optimizeInitialAngularVelocity_)); if (!noInitialValues) input.addChild(cinder::JsonTree::makeArray("InitialAngularVelocity") .addChild(cinder::JsonTree("", settings_.variables_.currentInitialAngularVelocity_.x)) .addChild(cinder::JsonTree("", settings_.variables_.currentInitialAngularVelocity_.y)) .addChild(cinder::JsonTree("", settings_.variables_.currentInitialAngularVelocity_.z))); input.addChild(cinder::JsonTree("OptimizeGroundPlane", settings_.variables_.optimizeGroundPlane_)); if (!noInitialValues) input.addChild(cinder::JsonTree::makeArray("InitialGroundPlane") .addChild(cinder::JsonTree("", settings_.variables_.currentGroundPlane_.x)) .addChild(cinder::JsonTree("", settings_.variables_.currentGroundPlane_.y)) .addChild(cinder::JsonTree("", settings_.variables_.currentGroundPlane_.z)) .addChild(cinder::JsonTree("", settings_.variables_.currentGroundPlane_.w))); parent.addChild(input); } AdjointSolver::AdjointSolver(SimulationResults3DPtr reference, const Settings& settings, CostFunctionPtr costFunction) : reference_(reference) , settings_(settings) , costFunction_(costFunction) { reference_->input_.assertSizes(); reference_->settings_.validate(); } bool AdjointSolver::solve(const Callback_t& callback, BackgroundWorker2* worker) { //helper functions typedef Eigen::Matrix<double, Eigen::Dynamic, 1> Vec; const static auto packInputVariables = [](const InputVariables& var) -> Vec { std::vector<double> params; if (var.optimizeGravity_) { params.push_back(var.currentGravity_.x); params.push_back(var.currentGravity_.y); params.push_back(var.currentGravity_.z); } if (var.optimizeYoungsModulus_) params.push_back(var.currentYoungsModulus_); if (var.optimizePoissonRatio_) params.push_back(var.currentPoissonRatio_); if (var.optimizeMass_) params.push_back(var.currentMass_); if (var.optimizeMassDamping_) params.push_back(var.currentMassDamping_); if (var.optimizeStiffnessDamping_) params.push_back(var.currentStiffnessDamping_); if (var.optimizeInitialLinearVelocity_) { params.push_back(var.currentInitialLinearVelocity_.x); params.push_back(var.currentInitialLinearVelocity_.y); params.push_back(var.currentInitialLinearVelocity_.z); } if (var.optimizeInitialAngularVelocity_) { params.push_back(var.currentInitialAngularVelocity_.x); params.push_back(var.currentInitialAngularVelocity_.y); params.push_back(var.currentInitialAngularVelocity_.z); } if (var.optimizeGroundPlane_) { const real4 spherical = CoordinateTransformation::cartesian2spherical(var.currentGroundPlane_); params.push_back(spherical.y); params.push_back(spherical.z); params.push_back(var.currentGroundPlane_.w); } if (params.empty()) return Vec(); Vec result = Eigen::Map<Vec>(params.data(), params.size()); return result; }; const static auto packMinMax = [](const InputVariables& var) -> std::pair<Vec, Vec> { std::vector<double> min, max; static const double BIG = 1e10; static const double SMALL = 1e-10; if (var.optimizeGravity_) { min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); } if (var.optimizeYoungsModulus_) { min.push_back(1); max.push_back(BIG); } if (var.optimizePoissonRatio_) { min.push_back(0.01); max.push_back(0.49); } if (var.optimizeMass_) { min.push_back(SMALL); max.push_back(BIG); } if (var.optimizeMassDamping_) { min.push_back(SMALL); max.push_back(BIG); } if (var.optimizeStiffnessDamping_) { min.push_back(SMALL); max.push_back(BIG); } if (var.optimizeInitialLinearVelocity_) { min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); } if (var.optimizeInitialAngularVelocity_) { min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); } if (var.optimizeGroundPlane_) { min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); } Vec minVec = Eigen::Map<Vec>(min.data(), min.size()); Vec maxVec = Eigen::Map<Vec>(max.data(), max.size()); return std::make_pair(minVec, maxVec); }; const static auto unpackInputVariables = [](const InputVariables& ref, const Vec& params) -> InputVariables { InputVariables var; int i = 0; if (ref.optimizeGravity_) { var.optimizeGravity_ = true; var.currentGravity_.x = static_cast<real>(params[i++]); var.currentGravity_.y = static_cast<real>(params[i++]); var.currentGravity_.z = static_cast<real>(params[i++]); } if (ref.optimizeYoungsModulus_) { var.optimizeYoungsModulus_ = true; var.currentYoungsModulus_ = static_cast<real>(params[i++]); } if (ref.optimizePoissonRatio_) { var.optimizePoissonRatio_ = true; var.currentPoissonRatio_ = static_cast<real>(params[i++]); } if (ref.optimizeMass_) { var.optimizeMass_ = true; var.currentMass_ = static_cast<real>(params[i++]); } if (ref.optimizeMassDamping_) { var.optimizeMassDamping_ = true; var.currentMassDamping_ = static_cast<real>(params[i++]); } if (ref.optimizeStiffnessDamping_) { var.optimizeStiffnessDamping_ = true; var.currentStiffnessDamping_ = static_cast<real>(params[i++]); } if (ref.optimizeInitialLinearVelocity_) { var.optimizeInitialLinearVelocity_ = true; var.currentInitialLinearVelocity_.x = static_cast<real>(params[i++]); var.currentInitialLinearVelocity_.y = static_cast<real>(params[i++]); var.currentInitialLinearVelocity_.z = static_cast<real>(params[i++]); } if (ref.optimizeInitialAngularVelocity_) { var.optimizeInitialAngularVelocity_ = true; var.currentInitialAngularVelocity_.x = static_cast<real>(params[i++]); var.currentInitialAngularVelocity_.y = static_cast<real>(params[i++]); var.currentInitialAngularVelocity_.z = static_cast<real>(params[i++]); } if (ref.optimizeGroundPlane_) { double theta = params[i++]; double phi = params[i++]; const double3 spherical = make_double3(1, theta, phi); const double3 cartesian = CoordinateTransformation::spherical2cartesian(spherical); var.optimizeGroundPlane_ = true; var.currentGroundPlane_.x = static_cast<real>(cartesian.x); var.currentGroundPlane_.y = static_cast<real>(cartesian.y); var.currentGroundPlane_.z = static_cast<real>(cartesian.z); var.currentGroundPlane_.w = static_cast<real>(params[i++]); } return var; }; static const auto packGradient = [](const InputVariables& ref, const InputVariables& in, const AdjointVariables& adj) -> Vec { std::vector<double> params; if (ref.optimizeGravity_) { params.push_back(adj.adjGravity_.x); params.push_back(adj.adjGravity_.y); params.push_back(adj.adjGravity_.z); } if (ref.optimizeYoungsModulus_) params.push_back(adj.adjYoungsModulus_); if (ref.optimizePoissonRatio_) params.push_back(adj.adjPoissonRatio_); if (ref.optimizeMass_) params.push_back(adj.adjMass_); if (ref.optimizeMassDamping_) params.push_back(adj.adjMassDamping_); if (ref.optimizeStiffnessDamping_) params.push_back(adj.adjStiffnessDamping_); if (ref.optimizeInitialLinearVelocity_) { params.push_back(adj.adjInitialLinearVelocity.x); params.push_back(adj.adjInitialLinearVelocity.y); params.push_back(adj.adjInitialLinearVelocity.z); } if (ref.optimizeInitialAngularVelocity_) { params.push_back(adj.adjInitialAngularVelocity.x); params.push_back(adj.adjInitialAngularVelocity.y); params.push_back(adj.adjInitialAngularVelocity.z); } if (ref.optimizeGroundPlane_) { const double4 spherical = CoordinateTransformation::cartesian2spherical(make_double4(in.currentGroundPlane_.x, in.currentGroundPlane_.y, in.currentGroundPlane_.z, 0)); const double4 adjSpherical = CoordinateTransformation::spherical2cartesianAdjoint(spherical, adj.adjGroundPlane_); params.push_back(adjSpherical.y); params.push_back(adjSpherical.z); params.push_back(adj.adjGroundPlane_.w); } Vec result = Eigen::Map<Vec>(params.data(), params.size()); return result; }; static const auto varToSettings = [](const InputVariables& var) -> SoftBodySimulation3D::Settings { SoftBodySimulation3D::Settings settings; settings.gravity_ = var.currentGravity_; settings.youngsModulus_ = var.currentYoungsModulus_; settings.poissonsRatio_ = var.currentPoissonRatio_; settings.mass_ = var.currentMass_; settings.dampingAlpha_ = var.currentMassDamping_; settings.dampingBeta_ = var.currentStiffnessDamping_; settings.initialLinearVelocity_ = var.currentInitialLinearVelocity_; settings.initialAngularVelocity_ = var.currentInitialAngularVelocity_; settings.groundPlane_ = var.currentGroundPlane_; return settings; }; static const auto toDouble = [](const std::string& str, double def) -> double { try { double val = std::stod(str); return val; } catch (const std::invalid_argument& ex) { return def; } }; //initialize statistics Statistics statistics; //Prepare initial values Vec initial = packInputVariables(settings_.variables_); Vec min, max; std::tie(min, max) = packMinMax(settings_.variables_); //callback(varToSettings(settings_.variables_), 0); //Initialize scaling Vec paramScaling = Vec::Ones(initial.size()); Vec paramScalingInv = Vec::Ones(initial.size()); if (settings_.normalizeUnits_) { int i = 0; if (settings_.variables_.optimizeGravity_) { real scale = ::max({ real(1e-8), abs(settings_.variables_.currentGravity_.x), abs(settings_.variables_.currentGravity_.y), abs(settings_.variables_.currentGravity_.z) }); paramScalingInv[i] = paramScalingInv[i + 1] = paramScalingInv[i + 2] = scale; i += 3; } if (settings_.variables_.optimizeYoungsModulus_) paramScalingInv[i++] = settings_.variables_.currentYoungsModulus_; if (settings_.variables_.optimizePoissonRatio_) paramScalingInv[i++] = settings_.variables_.currentPoissonRatio_; if (settings_.variables_.optimizeMass_) paramScalingInv[i++] = settings_.variables_.currentMass_; if (settings_.variables_.optimizeMassDamping_) paramScalingInv[i++] = ::max(real(1e-5), settings_.variables_.currentMassDamping_); if (settings_.variables_.optimizeStiffnessDamping_) paramScalingInv[i++] = ::max(real(1e-5), settings_.variables_.currentStiffnessDamping_); if (settings_.variables_.optimizeInitialLinearVelocity_) { paramScalingInv[i++] = ::max(real(1), abs(settings_.variables_.currentInitialLinearVelocity_.x)); paramScalingInv[i++] = ::max(real(1), abs(settings_.variables_.currentInitialLinearVelocity_.y)); paramScalingInv[i++] = ::max(real(1), abs(settings_.variables_.currentInitialLinearVelocity_.z)); } if (settings_.variables_.optimizeInitialAngularVelocity_) { paramScalingInv[i++] = ::max(real(1), abs(settings_.variables_.currentInitialAngularVelocity_.x)); paramScalingInv[i++] = ::max(real(1), abs(settings_.variables_.currentInitialAngularVelocity_.y)); paramScalingInv[i++] = ::max(real(1), abs(settings_.variables_.currentInitialAngularVelocity_.z)); } if (settings_.variables_.optimizeGroundPlane_) { const real4 spherical = CoordinateTransformation::cartesian2spherical(settings_.variables_.currentGroundPlane_); real scale = ::max({ real(1e-8), spherical.y, spherical.z }); paramScalingInv[i] = paramScalingInv[i + 1] = scale; paramScalingInv[i + 2] = ::max(real(1), settings_.variables_.currentGroundPlane_.w); i += 3; } paramScaling = paramScalingInv.cwiseInverse(); CI_LOG_I("parameter scaling: " << paramScalingInv.transpose()); } if (settings_.optimizer_ == Settings::GRADIENT_DESCENT) { //Gradient descent real finalCost = 0; const auto fun = [this, &finalCost, &statistics, &paramScaling, &paramScalingInv, &worker](const Vec& xs) -> Vec { const Vec x = xs.cwiseProduct(paramScalingInv); CI_LOG_I("X: " << x.transpose()); InputVariables var = unpackInputVariables(this->settings_.variables_, x); AdjointVariables adj = { 0 }; if (isUseAdjoint()) finalCost = computeGradient( reference_->input_, reference_->settings_, var, costFunction_, adj, settings_.memorySaving_, worker, &statistics); else finalCost = computeGradientFiniteDifferences( reference_->input_, reference_->settings_, var, costFunction_, adj, finiteDifferencesDelta_, worker, &statistics); Vec gradient = packGradient(this->settings_.variables_, var, adj); CI_LOG_I("GradientDescent-Step:\n cost=" << finalCost << "\n values:" << var << "\n gradient:" << adj << " (" << gradient.transpose() << ")"); return gradient.cwiseProduct(paramScalingInv); }; ar::GradientDescent<Vec> gd(initial.cwiseProduct(paramScaling).eval(), fun); gd.setEpsilon(toDouble(settings_.gradientDescentSettings_.epsilon_, 1e-15)); gd.setLinearStepsize(toDouble(settings_.gradientDescentSettings_.linearStepsize_, 0.001)); gd.setMaxStepsize(toDouble(settings_.gradientDescentSettings_.maxStepsize_, 1e20)); gd.setMinStepsize(toDouble(settings_.gradientDescentSettings_.epsilon_, 0)); gd.setMinValues(min.cwiseProduct(paramScaling)); gd.setMaxValues(max.cwiseProduct(paramScaling)); for (int oi = 0; oi < settings_.numIterations_; ++oi) { worker->setStatus(tfm::format("Adjoint: optimization %d/%d", (oi + 1), settings_.numIterations_)); if (gd.step()) break; if (worker->isInterrupted()) break; //fetch intermediate this->finalCost_ = finalCost; this->finalVariables_ = varToSettings(unpackInputVariables(settings_.variables_, gd.getCurrentSolution().cwiseProduct(paramScalingInv))); auto gradient = varToSettings(unpackInputVariables(settings_.variables_, gd.getCurrentGradient().cwiseProduct(paramScaling))); callback(this->finalVariables_, gradient, this->finalCost_); } //fetch final output this->finalCost_ = finalCost; this->finalVariables_ = varToSettings(unpackInputVariables(settings_.variables_, gd.getCurrentSolution().cwiseProduct(paramScalingInv))); } else if (settings_.optimizer_ == Settings::RPROP) { //RProp (Resilient Back Propagation) Gradient descent real finalCost = 0; const auto fun = [this, &finalCost, &statistics, &paramScaling, &paramScalingInv, &worker](const Vec& xs) -> Vec { const Vec x = xs.cwiseProduct(paramScalingInv); CI_LOG_I("X: " << x.transpose()); InputVariables var = unpackInputVariables(this->settings_.variables_, x); AdjointVariables adj = { 0 }; if (isUseAdjoint()) finalCost = computeGradient( reference_->input_, reference_->settings_, var, costFunction_, adj, settings_.memorySaving_, worker, &statistics); else finalCost = computeGradientFiniteDifferences( reference_->input_, reference_->settings_, var, costFunction_, adj, finiteDifferencesDelta_, worker, &statistics); Vec gradient = packGradient(this->settings_.variables_, var, adj); CI_LOG_I("Rprop-GradientDescent-Step:\n cost=" << finalCost << "\n values:" << var << "\n gradient:" << adj << " (" << gradient.transpose() << ")"); return gradient.cwiseProduct(paramScalingInv); }; ar::RpropGradientDescent<Vec> rprop(initial.cwiseProduct(paramScaling).eval(), fun); rprop.setEpsilon(toDouble(settings_.rpropSettings_.epsilon_, 1e-15)); rprop.setInitialStepsize(toDouble(settings_.rpropSettings_.initialStepsize_, 0.001)); rprop.setMinValues(min.cwiseProduct(paramScaling)); rprop.setMaxValues(max.cwiseProduct(paramScaling)); for (int oi = 0; oi < settings_.numIterations_; ++oi) { worker->setStatus(tfm::format("Adjoint: optimization %d/%d", (oi + 1), settings_.numIterations_)); auto currentVariables = varToSettings(unpackInputVariables(settings_.variables_, rprop.getCurrentSolution().cwiseProduct(paramScalingInv))); if (rprop.step()) break; if (worker->isInterrupted()) break; //fetch intermediate this->finalCost_ = finalCost; this->finalVariables_ = varToSettings(unpackInputVariables(settings_.variables_, rprop.getCurrentSolution().cwiseProduct(paramScalingInv))); auto gradient = varToSettings(unpackInputVariables(settings_.variables_, rprop.getCurrentGradient().cwiseProduct(paramScaling))); callback(currentVariables, gradient, this->finalCost_); } //fetch final output this->finalCost_ = finalCost; this->finalVariables_ = varToSettings(unpackInputVariables(settings_.variables_, rprop.getCurrentSolution().cwiseProduct(paramScalingInv))); } else if (settings_.optimizer_ == Settings::LBFGS) { //LBFGS LBFGSpp::LBFGSParam<double> params; params.epsilon = toDouble(settings_.lbfgsSettings_.epsilon_, params.epsilon); params.past = settings_.lbfgsSettings_.past_; params.delta = toDouble(settings_.lbfgsSettings_.delta_, params.delta); params.linesearch = settings_.lbfgsSettings_.lineSearchAlg_ == Settings::LbfgsSettings::Armijo ? LBFGSpp::LINE_SEARCH_ALGORITHM::LBFGS_LINESEARCH_BACKTRACKING_ARMIJO : settings_.lbfgsSettings_.lineSearchAlg_ == Settings::LbfgsSettings::Wolfe ? LBFGSpp::LINE_SEARCH_ALGORITHM::LBFGS_LINESEARCH_BACKTRACKING_WOLFE : LBFGSpp::LINE_SEARCH_ALGORITHM::LBFGS_LINESEARCH_BACKTRACKING_STRONG_WOLFE; params.max_linesearch = settings_.lbfgsSettings_.linesearchMaxTrials_; params.min_step = toDouble(settings_.lbfgsSettings_.linesearchMinStep_, params.min_step); params.max_step = toDouble(settings_.lbfgsSettings_.linesearchMaxStep_, params.max_step); params.ftol = toDouble(settings_.lbfgsSettings_.linesearchTol_, params.ftol); params.max_iterations = settings_.numIterations_; LBFGSpp::LBFGSSolver<double> lbfgs(params); LBFGSpp::LBFGSSolver<double>::ObjectiveFunction_t fun = [this, &statistics, &paramScaling, &paramScalingInv, &worker](const Vec& xs, Vec& gradient) -> double { const Vec x = xs.cwiseProduct(paramScalingInv); InputVariables var = unpackInputVariables(this->settings_.variables_, x); AdjointVariables adj = { 0 }; double finalCost; if (isUseAdjoint()) finalCost = computeGradient( reference_->input_, reference_->settings_, var, costFunction_, adj, settings_.memorySaving_, worker, &statistics); else finalCost = computeGradientFiniteDifferences( reference_->input_, reference_->settings_, var, costFunction_, adj, finiteDifferencesDelta_, worker, &statistics); gradient = packGradient(this->settings_.variables_, var, adj).cwiseProduct(paramScalingInv); CI_LOG_I("LBFGS-Step:\n cost=" << finalCost << "\n values:" << var << "\n gradient:" << adj); return finalCost; }; LBFGSpp::LBFGSSolver<double>::CallbackFunction_t lbfgsCallback = [this, worker, callback, &paramScaling, &paramScalingInv](const Vec& x, const Vec& g, const double& v, int k) -> bool { worker->setStatus(tfm::format("Adjoint: optimization %d/%d, cost %f", k + 1, settings_.numIterations_, v)); InputVariables var = unpackInputVariables(this->settings_.variables_, x.cwiseProduct(paramScalingInv)); auto gradient = varToSettings(unpackInputVariables(this->settings_.variables_, g.cwiseProduct(paramScaling))); callback(varToSettings(var), gradient, static_cast<real>(v)); return !worker->isInterrupted(); }; LBFGSpp::LBFGSSolver<double>::ValidationFunction_t validation = [&min, &max, &paramScaling](const Vec& x) -> bool { return (x.array() >= min.cwiseProduct(paramScaling).array()).all() && (x.array() <= max.cwiseProduct(paramScaling).array()).all(); }; double finalCost = 0; Vec value = initial.cwiseProduct(paramScaling); try { int oi = lbfgs.minimize(fun, value, finalCost, lbfgsCallback, validation); CI_LOG_I("Optimized for " << oi << " iterations, final cost " << finalCost); } catch (const std::runtime_error& error) { CI_LOG_EXCEPTION("LBFGS failed", error); return false; } //fetch final output this->finalCost_ = static_cast<real>(finalCost); this->finalVariables_ = varToSettings(unpackInputVariables(settings_.variables_, value.cwiseProduct(paramScalingInv))); } SoftBodySimulation3D::Settings finalGradient; memset(&finalGradient, 0, sizeof(SoftBodySimulation3D::Settings)); callback(this->finalVariables_, finalGradient, this->finalCost_); CI_LOG_I("Result: cost = " << finalCost_ << ", values:" << this->finalVariables_); CI_LOG_I(statistics); return true; } void AdjointSolver::testGradient(BackgroundWorker2* worker) { static const int numSteps = 15; static const int halfNumSteps = numSteps / 2; static const real frac = 0.5; CI_LOG_I("TEST GRADIENT"); #define GRADIENT(OPT, VAR1, VAR2, VARADJ, NAME) \ if (settings_.variables_.OPT && !worker->isInterrupted()) \ { \ InputVariables variables; \ variables.OPT = true; \ const real minValue = reference_->settings_.VAR1==0 ? -halfNumSteps : reference_->settings_.VAR1 * frac; \ const real maxValue = reference_->settings_.VAR1==0 ? +halfNumSteps : 2*reference_->settings_.VAR1 - minValue; \ typedef std::array<double, 3> entry; \ std::vector<entry> entries; \ for (int i=0; i<numSteps && !worker->isInterrupted(); ++i) \ { \ worker->setStatus(tinyformat::format("%s gradient %d/%d", NAME, i+1, numSteps)); \ const real value = minValue + (maxValue - minValue) * i / (numSteps - 1); \ variables.VAR2 = value; \ AdjointVariables adj = { 0 }; \ const real cost = computeGradient( \ reference_->input_, reference_->settings_, variables, costFunction_, adj, settings_.memorySaving_); \ entries.push_back(entry{ value, cost, adj.VARADJ }); \ } \ std::stringstream ss; \ ss << NAME << ":" << std::endl; \ ss << " Value Cost Gradient" << std::endl; \ for (int i=0; i<entries.size(); ++i) \ { \ ss \ << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][0] \ << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][1] \ << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][2] \ << std::endl; \ } \ CI_LOG_I(ss.str()); \ } GRADIENT(optimizeMass_, mass_, currentMass_, adjMass_, "Mass"); //GRADIENT(optimizeGravity_, gravity_.x, currentGravity_.x, adjGravity_.x, "GravityX"); GRADIENT(optimizeGravity_, gravity_.y, currentGravity_.y, adjGravity_.y, "GravityY"); //GRADIENT(optimizeGravity_, gravity_.z, currentGravity_.z, adjGravity_.z, "GravityZ"); GRADIENT(optimizeMassDamping_, dampingAlpha_, currentMassDamping_, adjMassDamping_, "DampingMass"); GRADIENT(optimizeStiffnessDamping_, dampingBeta_, currentStiffnessDamping_, adjStiffnessDamping_, "DampingStiffness"); GRADIENT(optimizeYoungsModulus_, youngsModulus_, currentYoungsModulus_, adjYoungsModulus_, "Young's Modulus"); GRADIENT(optimizePoissonRatio_, poissonsRatio_, currentPoissonRatio_, adjPoissonRatio_, "Poisson Ratio"); GRADIENT(optimizeInitialLinearVelocity_, initialLinearVelocity_.x, currentInitialLinearVelocity_.x, adjInitialLinearVelocity.x, "LinearVelocityX"); GRADIENT(optimizeInitialLinearVelocity_, initialLinearVelocity_.y, currentInitialLinearVelocity_.y, adjInitialLinearVelocity.y, "LinearVelocityY"); GRADIENT(optimizeInitialLinearVelocity_, initialLinearVelocity_.z, currentInitialLinearVelocity_.z, adjInitialLinearVelocity.z, "LinearVelocityZ"); GRADIENT(optimizeInitialAngularVelocity_, initialAngularVelocity_.x, currentInitialAngularVelocity_.x, adjInitialAngularVelocity.x, "AngularVelocityX"); GRADIENT(optimizeInitialAngularVelocity_, initialAngularVelocity_.y, currentInitialAngularVelocity_.y, adjInitialAngularVelocity.y, "AngularVelocityY"); GRADIENT(optimizeInitialAngularVelocity_, initialAngularVelocity_.z, currentInitialAngularVelocity_.z, adjInitialAngularVelocity.z, "AngularVelocityZ"); #undef GRADIENT // young's modulus and poisson's ratio in uniform if (settings_.variables_.optimizeYoungsModulus_ && settings_.variables_.optimizePoissonRatio_ && !worker->isInterrupted()) { InputVariables variables; variables.optimizePoissonRatio_ = true; variables.optimizeYoungsModulus_ = true; const real minYoungValue = reference_->settings_.youngsModulus_ * frac; const real maxYoungValue = 2 * reference_->settings_.youngsModulus_ - minYoungValue; const real minPoissonValue = reference_->settings_.poissonsRatio_ * frac; const real maxPoissonValue = 2 * reference_->settings_.poissonsRatio_ - minPoissonValue; typedef std::array<double, 5> entry; std::vector<entry> entries; for (int i = 0; i < numSteps && !worker->isInterrupted(); ++i) for (int j = 0; j < numSteps && !worker->isInterrupted(); ++j) { worker->setStatus(tinyformat::format("%s gradient %d/%d", "Young+Poisson", i + 1, numSteps)); const real young = minYoungValue + (maxYoungValue - minYoungValue) * i / (numSteps - 1); const real poisson = minPoissonValue + (maxPoissonValue - minPoissonValue) * j / (numSteps - 1); variables.currentYoungsModulus_ = young; variables.currentPoissonRatio_ = poisson; AdjointVariables adj = {0}; const real cost = computeGradient( reference_->input_, reference_->settings_, variables, costFunction_, adj, settings_.memorySaving_); entries.push_back(entry{young, poisson, cost, adj.adjYoungsModulus_, adj.adjPoissonRatio_}); } std::stringstream ss; ss << "Young's Modulus and Poisson's Ratio:" << std::endl; ss << " YoungModulus PoissonRatio Cost GradientYoung GradientPoisson" << std::endl; for (int i = 0; i < entries.size(); ++i) { ss << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][0] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][1] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][2] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][3] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][4] << std::endl; } CI_LOG_I(ss.str()); } // stiffness and mass damping in uniform if (settings_.variables_.optimizeMassDamping_ && settings_.variables_.optimizeStiffnessDamping_ && !worker->isInterrupted()) { InputVariables variables; variables.optimizeMassDamping_ = true; variables.optimizeStiffnessDamping_ = true; const real minMassValue = reference_->settings_.dampingAlpha_ * frac; const real maxMassValue = 2 * reference_->settings_.dampingAlpha_ - minMassValue; const real minStiffnessValue = reference_->settings_.dampingBeta_ * frac; const real maxStiffnessValue = 2 * reference_->settings_.dampingBeta_ - minStiffnessValue; typedef std::array<double, 5> entry; std::vector<entry> entries; for (int i = 0; i < numSteps && !worker->isInterrupted(); ++i) for (int j = 0; j < numSteps && !worker->isInterrupted(); ++j) { worker->setStatus(tinyformat::format("%s gradient %d/%d", "AllDamping", i + 1, numSteps)); const real mass = minMassValue + (maxMassValue - minMassValue) * i / (numSteps - 1); const real stiffness = minStiffnessValue + (maxStiffnessValue - minStiffnessValue) * j / (numSteps - 1); variables.currentMassDamping_ = mass; variables.currentStiffnessDamping_ = stiffness; AdjointVariables adj = { 0 }; const real cost = computeGradient( reference_->input_, reference_->settings_, variables, costFunction_, adj, settings_.memorySaving_); entries.push_back(entry{ mass, stiffness, cost, adj.adjMassDamping_, adj.adjStiffnessDamping_ }); } std::stringstream ss; ss << "Mass Damping and Stiffness Damping:" << std::endl; ss << " Mass-Damping Stiffness-Damping Cost Gradient-Mass Gradient-Stiffness" << std::endl; for (int i = 0; i < entries.size(); ++i) { ss << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][0] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][1] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][2] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][3] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][4] << std::endl; } CI_LOG_I(ss.str()); } //Ground Plane if (settings_.variables_.optimizeGroundPlane_ && !worker->isInterrupted()) { InputVariables variables; variables.optimizeGroundPlane_ = true; const real referenceHeight = reference_->settings_.groundPlane_.w; const real referenceTheta = CoordinateTransformation::cartesian2spherical(reference_->settings_.groundPlane_).y; const real referencePhi = CoordinateTransformation::cartesian2spherical(reference_->settings_.groundPlane_).z; const real minTheta = referenceTheta - frac * M_PI * 0.5; const real maxTheta = referenceTheta + frac * M_PI * 0.5; const real minPhi = referencePhi - frac * M_PI * 0.5; const real maxPhi = referencePhi + frac * M_PI * 0.5; typedef std::array<double, 5> entry; std::vector<entry> entries; for (int i = 0; i < numSteps && !worker->isInterrupted(); ++i) for (int j = 0; j < numSteps && !worker->isInterrupted(); ++j) { worker->setStatus(tinyformat::format("%s gradient %d/%d", "GroundPlaneOrientation", i + 1, numSteps)); const real theta = minTheta + (maxTheta - minTheta) * i / (numSteps - 1); const real phi = minPhi + (maxPhi - minPhi) * j / (numSteps - 1); variables.currentGroundPlane_ = CoordinateTransformation::spherical2cartesian(make_real3(1, theta, phi)); variables.currentGroundPlane_.w = referenceHeight; AdjointVariables adj = { 0 }; const real cost = computeGradient( reference_->input_, reference_->settings_, variables, costFunction_, adj, settings_.memorySaving_); double4 adjSpherical = CoordinateTransformation::spherical2cartesianAdjoint(make_double4(1, theta, phi, 0), adj.adjGroundPlane_); entries.push_back(entry{ theta, phi, cost, adjSpherical.y, adjSpherical.z }); CI_LOG_V( << " " << std::fixed << std::setw(12) << std::setprecision(7) << theta << " " << std::fixed << std::setw(12) << std::setprecision(7) << phi << " " << std::fixed << std::setw(12) << std::setprecision(7) << cost << " " << std::fixed << std::setw(12) << std::setprecision(7) << adjSpherical.y << " " << std::fixed << std::setw(12) << std::setprecision(7) << adjSpherical.z << " (" << CoordinateTransformation::spherical2cartesian(make_real3(1, theta, phi)).x << ", " << CoordinateTransformation::spherical2cartesian(make_real3(1, theta, phi)).y << ", " << CoordinateTransformation::spherical2cartesian(make_real3(1, theta, phi)).z << ")" ); } std::stringstream ss; ss << "Ground Plane Orientation:" << std::endl; ss << " Theta Phi Cost Gradient-Theta Gradient-Phi" << std::endl; for (int i = 0; i < entries.size(); ++i) { ss << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][0] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][1] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][2] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][3] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][4] << std::endl; } CI_LOG_I(ss.str()); } if (settings_.variables_.optimizeGroundPlane_ && !worker->isInterrupted()) { InputVariables variables; variables.optimizeGroundPlane_ = true; const real minValue = reference_->settings_.groundPlane_.w - 0.1; const real maxValue = 2 * reference_->settings_.groundPlane_.w + 0.1; typedef std::array<double, 3> entry; std::vector<entry> entries; for (int i = 0; i < numSteps && !worker->isInterrupted(); ++i) { worker->setStatus(tinyformat::format("%s gradient %d/%d", "Ground Height", i + 1, numSteps)); const real value = minValue + (maxValue - minValue) * i / (numSteps - 1); variables.currentGroundPlane_ = reference_->settings_.groundPlane_; variables.currentGroundPlane_.w = value; AdjointVariables adj = { 0 }; const real cost = computeGradient(reference_->input_, reference_->settings_, variables, costFunction_, adj, settings_.memorySaving_); entries.push_back(entry{ value, cost, adj.adjGroundPlane_.w }); } std::stringstream ss; ss << "Ground Height" << ":" << std::endl; ss << " Value Cost Gradient" << std::endl; for (int i = 0; i < entries.size(); ++i) { ss << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][0] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][1] << " " << std:: fixed << std::setw(12) << std::setprecision(7) << entries[i][2] << std::endl; } CI_LOG_I(ss.str()); } CI_LOG_I("DONE"); } std::ostream& operator<<(std::ostream& os, const AdjointSolver::AdjointVariables& obj) { if (obj.adjGravity_.x != 0 || obj.adjGravity_.y != 0 || obj.adjGravity_.z != 0) os << " Gravity=(" << obj.adjGravity_.x << "," << obj.adjGravity_.y << "," << obj.adjGravity_.z << ")"; if (obj.adjYoungsModulus_ != 0) os << " YoungsModulus=" << obj.adjYoungsModulus_; if (obj.adjPoissonRatio_ != 0) os << " PoissonRatio=" << obj.adjPoissonRatio_; if (obj.adjMass_ != 0) os << " Mass=" << obj.adjMass_; if (obj.adjMassDamping_ != 0) os << " MassDamping=" << obj.adjMassDamping_; if (obj.adjStiffnessDamping_ != 0) os << " StiffnessDamping=" << obj.adjStiffnessDamping_; if (obj.adjInitialLinearVelocity.x != 0 || obj.adjInitialLinearVelocity.y != 0 || obj.adjInitialLinearVelocity.z != 0) os << " InitialLinearVelocity=(" << obj.adjInitialLinearVelocity.x << "," << obj.adjInitialLinearVelocity.y << "," << obj.adjInitialLinearVelocity.z << ")"; if (obj.adjInitialAngularVelocity.x != 0 || obj.adjInitialAngularVelocity.y != 0 || obj.adjInitialAngularVelocity.z != 0) os << " InitialAngularVelocity=(" << obj.adjInitialAngularVelocity.x << "," << obj.adjInitialAngularVelocity.y << "," << obj.adjInitialAngularVelocity.z << ")"; if (obj.adjGroundPlane_.x != 0 || obj.adjGroundPlane_.y != 0 || obj.adjGroundPlane_.z != 0 || obj.adjGroundPlane_.w != 0) os << " GroundPlane=(" << obj.adjGroundPlane_.x << "," << obj.adjGroundPlane_.y << "," << obj.adjGroundPlane_.z << "," << obj.adjGroundPlane_.w << ")"; return os; } std::ostream& operator<<(std::ostream& os, const AdjointSolver::InputVariables& obj) { if (obj.optimizeGravity_) os << " Gravity=(" << obj.currentGravity_.x << "," << obj.currentGravity_.y << "," << obj.currentGravity_.z << ")"; if (obj.optimizeYoungsModulus_) os << " YoungsModulus=" << obj.currentYoungsModulus_; if (obj.optimizePoissonRatio_) os << " PoissonRatio=" << obj.currentPoissonRatio_; if (obj.optimizeMass_) os << " Mass=" << obj.currentMass_; if (obj.optimizeMassDamping_) os << " MassDamping=" << obj.currentMassDamping_; if (obj.optimizeStiffnessDamping_) os << " StiffnessDamping=" << obj.currentStiffnessDamping_; if (obj.optimizeInitialLinearVelocity_) os << " InitialLinearVelocity=(" << obj.currentInitialLinearVelocity_.x << "," << obj.currentInitialLinearVelocity_.y << "," << obj.currentInitialLinearVelocity_.z << ")"; if (obj.optimizeInitialAngularVelocity_) os << " InitialAngularVelocity=(" << obj.currentInitialAngularVelocity_.x << "," << obj.currentInitialAngularVelocity_.y << "," << obj.currentInitialAngularVelocity_.z << ")"; if (obj.optimizeGroundPlane_) os << " GroundPlane=(" << obj.currentGroundPlane_.x << "," << obj.currentGroundPlane_.y << "," << obj.currentGroundPlane_.z << "," << obj.currentGroundPlane_.w << ")"; return os; } }
28e08a82eb0a69434cec53e4f9ba1c154c3a3ed3.cu
#include "AdjointSolver.h" #include "CommonKernels.h" #include <cuMat/Core> #include <cinder/app/AppBase.h> #include "tinyformat.h" #include "GradientDescent.h" #include "RpropGradientDescent.h" #include "LBFGS.h" #include "Utils3D.h" #include "CoordinateTransformation.h" #include "CudaTimer.h" #include "DebugUtils.h" //For testing: set to 1 to enforce a symmetric matrix in the CG //If 0, small unsymmetries of a few ulps are in the matrix due to the ordering of the operations //If 1, the upper and lower triangular parts are averaged to create a numerically exact symmetric matrix #define MAKE_NEWMARK_SYMMETRIC 0 //Specifies if the forward iteration shall be stopped if the linear solver did not converge // 1: no convergence // 2: NaN #define FORWARD_BREAK_ON_DIVERGENCE 2 //Specifies if the adjoint step shall ignore the current step (no gradients added) if the linear solver did not converge #define ADJOINT_IGNORE_DIVERGENCE 0 //Some way more verbose logging #define ADJOINT_VERBOSE_LOGGING 0 namespace ar3d { AdjointSolver::PrecomputedValues AdjointSolver::allocatePrecomputedValues(const Input& input) { PrecomputedValues p; p.lumpedMass_ = VectorX(input.numActiveNodes_); p.lumpedMass_.setZero(); p.bodyForces_ = Vector3X(input.numActiveNodes_); p.bodyForces_.setZero(); p.initialVelocity_ = Vector3X(input.numActiveNodes_); p.initialVelocity_.setZero(); return p; } AdjointSolver::ForwardState AdjointSolver::allocateForwardState(const Input& input) { ForwardState s; s.displacements_ = Vector3X(input.numActiveNodes_); s.displacements_.setZero(); s.velocities_ = Vector3X(input.numActiveNodes_); s.velocities_.setZero(); return s; } AdjointSolver::ForwardStorage AdjointSolver::allocateForwardStorage(const Input& input) { ForwardStorage s; s.forces_ = Vector3X(input.numActiveNodes_); s.stiffness_ = SMatrix3x3(input.sparsityPattern_); s.newmarkA_ = SMatrix3x3(input.sparsityPattern_); s.newmarkB_ = Vector3X(input.numActiveNodes_); return s; } void AdjointSolver::BackwardState::reset() { adjDisplacements_.setZero(); adjVelocities_.setZero(); if (adjGridDisplacements_.size() > 0) adjGridDisplacements_.setZero(); } AdjointSolver::BackwardState AdjointSolver::allocateBackwardState(const Input& input, int costFunctionInput) { BackwardState s; s.adjDisplacements_ = Vector3X(input.numActiveNodes_); s.adjVelocities_ = Vector3X(input.numActiveNodes_); if (costFunctionInput & int(ICostFunction::RequiredInput::GridDisplacements)) s.adjGridDisplacements_ = WorldGridData<real3>::DeviceArray_t(input.grid_->getSize().x(), input.grid_->getSize().y(), input.grid_->getSize().z()); s.reset(); return s; } AdjointSolver::BackwardStorage AdjointSolver::allocateBackwardStorage(const Input& input) { BackwardStorage s; s.unaryLumpedMass_ = VectorX(input.numActiveNodes_); s.unaryBodyForces_ = Vector3X(input.numActiveNodes_); s.adjNewmarkA_ = SMatrix3x3(input.sparsityPattern_); s.adjNewmarkB_ = Vector3X(input.numActiveNodes_); s.adjStiffness_ = SMatrix3x3(input.sparsityPattern_); s.adjForces_ = Vector3X(input.numActiveNodes_); s.adjMass_ = VectorX(input.numActiveNodes_); s.adjForces_.setZero(); s.adjMass_.setZero(); return s; } AdjointSolver::AdjointVariables& AdjointSolver::AdjointVariables::operator*=(double scaling) { adjGravity_ *= scaling; adjYoungsModulus_ *= scaling; adjPoissonRatio_ *= scaling; adjMass_ *= scaling; adjMassDamping_ *= scaling; adjStiffnessDamping_ *= scaling; adjGroundPlane_ *= scaling; adjInitialAngularVelocity *= scaling; adjInitialLinearVelocity *= scaling; return *this; } AdjointSolver::InputVariables::InputVariables() : optimizeGravity_(false) , currentGravity_(make_real3(0, -10, 0)) , optimizeYoungsModulus_(false) , currentYoungsModulus_(2000) , optimizePoissonRatio_(false) , currentPoissonRatio_(0.45) , optimizeMass_(false) , currentMass_(1) , optimizeMassDamping_(false) , currentMassDamping_(0.1) , optimizeStiffnessDamping_(false) , currentStiffnessDamping_(0.01) , optimizeInitialLinearVelocity_(false) , currentInitialLinearVelocity_(make_real3(0,0,0)) , optimizeInitialAngularVelocity_(false) , currentInitialAngularVelocity_(make_real3(0, 0, 0)) , optimizeGroundPlane_(false) , currentGroundPlane_(make_real4(0, 1, 0, 0)) { } AdjointSolver::CostFunctionTmp AdjointSolver::allocateCostFunctionTmp(const Input & input, CostFunctionPtr costFunction) { CostFunctionTmp tmp; if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::ActiveDisplacements)) { tmp.costOutput_.adjDisplacements_ = Vector3X(input.numActiveNodes_); tmp.costOutput_.adjVelocities_ = Vector3X(input.numActiveNodes_); } if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::GridDisplacements)) { tmp.costOutput_.adjGridDisplacements_ = WorldGridData<real3>::DeviceArray_t(input.grid_->getSize().x(), input.grid_->getSize().y(), input.grid_->getSize().z()); } return tmp; } bool AdjointSolver::performForwardStep( const ForwardState& prevState, ForwardState& nextStateOut, ForwardStorage& nextStorageOut, const Input& input, const PrecomputedValues& precomputed, const SoftBodySimulation3D::Settings& settings, int costFunctionRequiredInput, bool memorySaving) { //reset storage nextStorageOut.forces_.setZero(); nextStorageOut.stiffness_.setZero(); nextStorageOut.newmarkA_.setZero(); nextStorageOut.newmarkB_.setZero(); SoftBodyGrid3D::State s; s.displacements_ = prevState.displacements_; s.velocities_ = prevState.velocities_; //1. collision forces nextStorageOut.forces_.inplace() = precomputed.bodyForces_; if (settings.enableCollision_) { SoftBodyGrid3D::applyCollisionForces(input, settings, s, nextStorageOut.forces_); } //2. stiffness matrix SoftBodyGrid3D::computeStiffnessMatrix(input, s, settings, nextStorageOut.stiffness_, nextStorageOut.forces_); //3. Solve CI_LOG_D("Norm of PrevDisplacement: " << static_cast<real>(prevState.displacements_.norm())); CommonKernels::newmarkTimeIntegration( nextStorageOut.stiffness_, nextStorageOut.forces_, precomputed.lumpedMass_, prevState.displacements_, prevState.velocities_, settings.dampingAlpha_, settings.dampingBeta_, settings.timestep_, nextStorageOut.newmarkA_, nextStorageOut.newmarkB_, settings.newmarkTheta_); nextStateOut.displacements_.inplace() = prevState.displacements_ + make_real3(settings.timestep_) * prevState.velocities_; int iterations = settings.solverIterations_; real tolError = settings.solverTolerance_; #if MAKE_NEWMARK_SYMMETRIC == 1 nextStorageOut.newmarkA_ = DebugUtils::makeSymmetric(nextStorageOut.newmarkA_); #endif CommonKernels::solveCG( nextStorageOut.newmarkA_, nextStorageOut.newmarkB_, nextStateOut.displacements_, iterations, tolError); CI_LOG_D("Norm of NextDisplacement: " << static_cast<real>(nextStateOut.displacements_.norm())); CommonKernels::newmarkComputeVelocity( prevState.displacements_, prevState.velocities_, nextStateOut.displacements_, nextStateOut.velocities_, settings.timestep_, settings.newmarkTheta_); #if FORWARD_BREAK_ON_DIVERGENCE==2 bool failedToConverge = std::isnan(tolError); #else bool failedToConverge = iterations == settings.solverIterations_; #endif //4. Post-Processing if needed if (costFunctionRequiredInput & int(ICostFunction::RequiredInput::GridDisplacements)) { //diffuse displacements over the whole grid const Eigen::Vector3i& size = input.grid_->getSize(); nextStateOut.gridDisplacements_ = WorldGridData<real3>::DeviceArray_t::Constant(size.x(), size.y(), size.z(), make_real3(0)); SoftBodyGrid3D::State diffusionState; diffusionState.displacements_ = nextStateOut.displacements_; SoftBodyGrid3D::DiffusionRhs diffusionTmp1 = SoftBodyGrid3D::DiffusionRhs(input.numDiffusedNodes_, 1, 3); SoftBodyGrid3D::DiffusionRhs diffusionTmp2 = SoftBodyGrid3D::DiffusionRhs(input.numDiffusedNodes_, 1, 3); SoftBodyGrid3D::diffuseDisplacements( input, diffusionState, nextStateOut.gridDisplacements_, diffusionTmp1, diffusionTmp2); } return !failedToConverge; } real AdjointSolver::evaluateCostFunction( CostFunctionPtr costFunction, int timestep, CostFunctionTmp& tmp, const ForwardState& forwardState, BackwardState& backwardStateOut, const Input& input) { if (!costFunction->hasTimestep(timestep)) return 0; tmp.costOutput_.cost_ = 0; //prepare input and output if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::ActiveDisplacements)) { tmp.costInput_.displacements_ = forwardState.displacements_; tmp.costInput_.velocities_ = forwardState.velocities_; tmp.costOutput_.adjDisplacements_.setZero(); tmp.costOutput_.adjVelocities_.setZero(); } if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::GridDisplacements)) { tmp.costInput_.gridDisplacements_ = forwardState.gridDisplacements_; tmp.costInput_.referenceSDF_ = input.referenceSdf_; tmp.costOutput_.adjGridDisplacements_.setZero(); } //evaluate cost function costFunction->evaluate(timestep, tmp.costInput_, tmp.costOutput_); //apply output if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::ActiveDisplacements)) { backwardStateOut.adjDisplacements_ += tmp.costOutput_.adjDisplacements_; backwardStateOut.adjVelocities_ += tmp.costOutput_.adjVelocities_; } if (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::GridDisplacements)) { backwardStateOut.adjGridDisplacements_ += tmp.costOutput_.adjGridDisplacements_; } return tmp.costOutput_.cost_; } void AdjointSolver::performBackwardStep( const ForwardState& prevState, const ForwardState& nextState, const BackwardState& adjNextState, BackwardState& adjPrevStateOut, AdjointVariables& adjVariablesOut, const Input& input, const PrecomputedValues& precomputed, ForwardStorage& nextStorage, BackwardStorage& adjStorage, const SoftBodySimulation3D::Settings& settings, int costFunctionRequiredInput, bool memorySaving) { if (memorySaving) { //in memory saving mode, the intermediate variables (in nextStorage) are not saved //and thus we recompute them here. //This is a copy of performForwardStep without the linear solvers nextStorage.forces_.setZero(); nextStorage.stiffness_.setZero(); nextStorage.newmarkA_.setZero(); nextStorage.newmarkB_.setZero(); SoftBodyGrid3D::State s; s.displacements_ = prevState.displacements_; s.velocities_ = prevState.velocities_; //1. collision forces nextStorage.forces_.inplace() = precomputed.bodyForces_; if (settings.enableCollision_) { SoftBodyGrid3D::applyCollisionForces(input, settings, s, nextStorage.forces_); } //2. stiffness matrix SoftBodyGrid3D::computeStiffnessMatrix(input, s, settings, nextStorage.stiffness_, nextStorage.forces_); //3. Newmark time integration / Solve is ommitted CommonKernels::newmarkTimeIntegration( nextStorage.stiffness_, nextStorage.forces_, precomputed.lumpedMass_, prevState.displacements_, prevState.velocities_, settings.dampingAlpha_, settings.dampingBeta_, settings.timestep_, nextStorage.newmarkA_, nextStorage.newmarkB_, settings.newmarkTheta_); #if MAKE_NEWMARK_SYMMETRIC==1 nextStorage.newmarkA_ = DebugUtils::makeSymmetric(nextStorage.newmarkA_); #endif } Vector3X adjNextDisplacement = adjNextState.adjDisplacements_.deepClone(); //adj4. Postprocessing if (costFunctionRequiredInput & int(ICostFunction::RequiredInput::GridDisplacements)) { //adjoint of displacement diffusion over the whole grid adjointDiffuseDisplacements(input, adjNextState.adjGridDisplacements_, adjNextDisplacement); } #if ADJOINT_VERBOSE_LOGGING==1 std::vector<real3> adjNextDisplacementHost(adjNextDisplacement.size()); adjNextDisplacement.copyToHost(&adjNextDisplacementHost[0]); cinder::app::console() << "adjNextDisplacement:\n"; for (int i = 0; i < adjNextDisplacementHost.size(); ++i) { real3 v = adjNextDisplacementHost[i]; tinyformat::format(cinder::app::console(), " [%3d](%7.5f, %7.5f, %7.5f)\n", i, v.x, v.y, v.z); } #endif //adj3. Solve CI_LOG_D("Norm of adjNextDisplacement: " << static_cast<real>(adjNextDisplacement.norm())); CommonKernels::adjointNewmarkComputeVelocity( adjNextState.adjVelocities_, adjNextDisplacement, adjPrevStateOut.adjVelocities_, adjPrevStateOut.adjDisplacements_, settings.timestep_, settings.newmarkTheta_); adjStorage.adjNewmarkA_.setZero(); adjStorage.adjNewmarkB_.setZero(); CI_LOG_D("Norm of adjNextDisplacement: " << static_cast<real>(adjNextDisplacement.norm())); bool converged = CommonKernels::adjointSolveCG( nextStorage.newmarkA_, nextStorage.newmarkB_, nextState.displacements_, adjNextDisplacement, adjStorage.adjNewmarkA_, adjStorage.adjNewmarkB_, settings.solverIterations_*2, settings.solverTolerance_); //adjoint solve needs longer (no good initial guess) CI_LOG_D("Norm of adjNewmarkA: " << static_cast<real>(adjStorage.adjNewmarkA_.norm())); CI_LOG_D("Norm of adjNewmarkB: " << static_cast<real>(adjStorage.adjNewmarkB_.norm())); #if ADJOINT_IGNORE_DIVERGENCE==1 if (!converged) { CI_LOG_E("adjoint CG not converged, force gradients to zero"); //This may be a bit too harsh, since all gradients are lost, // but I don't have a better idea adjStorage.adjNewmarkA_.setZero(); adjStorage.adjNewmarkB_.setZero(); } #endif adjStorage.adjStiffness_.setZero(); adjStorage.adjMass_.setZero(); adjStorage.adjForces_.setZero(); DeviceScalar adjMassDamping = DeviceScalar::Zero(); DeviceScalar adjStiffnessDamping = DeviceScalar::Zero(); CommonKernels::adjointNewmarkTimeIntegration( nextStorage.stiffness_, nextStorage.forces_, precomputed.lumpedMass_, prevState.displacements_, prevState.velocities_, settings.dampingAlpha_, settings.dampingBeta_, adjStorage.adjNewmarkA_, adjStorage.adjNewmarkB_, adjStorage.adjStiffness_, adjStorage.adjForces_, adjStorage.adjMass_, adjPrevStateOut.adjDisplacements_, adjPrevStateOut.adjVelocities_, adjMassDamping, adjStiffnessDamping, settings.timestep_, settings.newmarkTheta_); adjVariablesOut.adjMassDamping_ += static_cast<real>(adjMassDamping); adjVariablesOut.adjStiffnessDamping_ += static_cast<real>(adjStiffnessDamping); CI_LOG_D("Norm of adjPrevDisplacement: " << static_cast<real>(adjPrevStateOut.adjDisplacements_.norm())); CI_LOG_D("Norm of adjPrevVelocities: " << static_cast<real>(adjPrevStateOut.adjVelocities_.norm())); //adj2. Stiffness matrix adjStorage.adjLambda_.setZero(); adjStorage.adjMu_.setZero(); adjointComputeStiffnessMatrix( input, prevState.displacements_, settings, adjStorage.adjStiffness_, adjStorage.adjForces_, adjPrevStateOut.adjDisplacements_, adjStorage.adjLambda_, adjStorage.adjMu_); real adjLambda = static_cast<real>(adjStorage.adjLambda_); real adjMu = static_cast<real>(adjStorage.adjMu_); adjointComputeMaterialParameters( settings.youngsModulus_, settings.poissonsRatio_, adjMu, adjLambda, adjVariablesOut.adjYoungsModulus_, adjVariablesOut.adjPoissonRatio_); //adj1. Collision Forces if (settings.enableCollision_) { adjointApplyCollisionForces(input, settings, prevState.displacements_, prevState.velocities_, adjStorage.adjForces_, adjPrevStateOut.adjDisplacements_, adjPrevStateOut.adjVelocities_, adjVariablesOut.adjGroundPlane_); } //Adjoint of body forces and mass adjVariablesOut.adjMass_ += static_cast<real>( precomputed.lumpedMass_.dot(adjStorage.adjMass_)); real3 adjGravityTmp = static_cast<real3>( precomputed.bodyForces_.cwiseMul(adjStorage.adjForces_) .reduction<cuMat::functor::Sum<real3>, cuMat::Axis::All>(cuMat::functor::Sum<real3>(), make_real3(0, 0, 0))); adjVariablesOut.adjGravity_ += make_double3(-adjGravityTmp.x, -adjGravityTmp.y, -adjGravityTmp.z); //HACK: the gradient points into the wrong direction. Hence I added a minus here #if 0 //Scale gradient //IMPORTANT: With the fix to adjointNewmarkComputeVelocity, I don't need that anymore! real dispNorm = static_cast<real>(adjPrevStateOut.adjDisplacements_.norm()); double scale = dispNorm < 1e-10 ? 1 : 1.0 / dispNorm; adjPrevStateOut.scale_ = scale * adjNextState.scale_; CI_LOG_I("Current scale: " << scale << ", total scale: " << adjPrevStateOut.scale_); adjPrevStateOut.adjDisplacements_ *= make_real3(static_cast<real>(scale)); adjPrevStateOut.adjVelocities_ *= make_real3(static_cast<real>(scale)); adjVariablesOut *= scale; #endif } real AdjointSolver::computeGradient( const Input& input, const SoftBodySimulation3D::Settings& settings_, const InputVariables& variables, CostFunctionPtr costFunction, AdjointVariables& adjointVariablesOut, bool memorySaving, BackgroundWorker2* worker, Statistics* statistics) { adjointVariablesOut = { 0 }; //update settings with the current state SoftBodySimulation3D::Settings settings = settings_; if (variables.optimizeGravity_) settings.gravity_ = variables.currentGravity_; if (variables.optimizeYoungsModulus_) settings.youngsModulus_ = variables.currentYoungsModulus_; if (variables.optimizePoissonRatio_) settings.poissonsRatio_ = variables.currentPoissonRatio_; if (variables.optimizeMass_) settings.mass_ = variables.currentMass_; if (variables.optimizeMassDamping_) settings.dampingAlpha_ = variables.currentMassDamping_; if (variables.optimizeStiffnessDamping_) settings.dampingBeta_ = variables.currentStiffnessDamping_; if (variables.optimizeInitialLinearVelocity_) settings.initialLinearVelocity_ = variables.currentInitialLinearVelocity_; if (variables.optimizeInitialAngularVelocity_) settings.initialAngularVelocity_ = variables.currentInitialAngularVelocity_; if (variables.optimizeGroundPlane_) settings.groundPlane_ = variables.currentGroundPlane_; settings.validate(); if (statistics) { statistics->numActiveNodes = input.numActiveNodes_; statistics->numEmptyNodes = input.grid_->getSize().prod() - input.numActiveNodes_; statistics->numActiveElements = input.numActiveCells_; } CudaTimer timer; //query number of timesteps int numSteps = costFunction->getNumSteps(); CI_LOG_D(numSteps << " timesteps have to be computed"); //first check if enough memory is available size_t freeMemory = cuMat::Context::getFreeDeviceMemory(); size_t totalMemory = cuMat::Context::getTotalDeviceMemory(); size_t requiredMemory = memorySaving ? (numSteps + 1) * (input.numActiveNodes_ * 2 * sizeof(real3)) + (input.numActiveNodes_ * 6 * sizeof(real3)) + 2 * input.sparsityPattern_.nnz * sizeof(real3x3) + (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::GridDisplacements) ? 2 * input.grid_->getSize().prod() * sizeof(real3) : 0) : (numSteps + 1) * (input.numActiveNodes_ * 6 * sizeof(real3) + 2 * input.sparsityPattern_.nnz * sizeof(real3x3) + (costFunction->getRequiredInput() & int(ICostFunction::RequiredInput::GridDisplacements) ? 2 * input.grid_->getSize().prod() * sizeof(real3) : 0)); if (requiredMemory > freeMemory) { CI_LOG_E("Not enough memory! Free memory: " << (freeMemory >> 20) << "MB, required memory: " << (requiredMemory >> 20) << "MB (total: " << (totalMemory >> 20) << "MB)"); if (!memorySaving) CI_LOG_E("consider enabling memory saving mode"); return 0; } CI_LOG_I("Enough memory available. Free memory: " << (freeMemory >> 20) << "MB, required memory: " << (requiredMemory >> 20) << "MB (total: " << (totalMemory >> 20) << "MB)"); //allocations std::vector<ForwardState> forwardStates(numSteps + 1); std::vector<ForwardStorage> forwardStorages(memorySaving ? 1 : numSteps + 1); std::vector<BackwardState> backwardStates(memorySaving ? 2 : numSteps + 1); if (memorySaving) { for (int t = 0; t <= numSteps; ++t) forwardStates[t] = allocateForwardState(input); forwardStorages[0] = allocateForwardStorage(input); backwardStates[0] = allocateBackwardState(input, costFunction->getRequiredInput()); backwardStates[1] = allocateBackwardState(input, costFunction->getRequiredInput()); } else { for (int t = 0; t <= numSteps; ++t) { forwardStates[t] = allocateForwardState(input); if (t > 0) forwardStorages[t] = allocateForwardStorage(input); backwardStates[t] = allocateBackwardState(input, costFunction->getRequiredInput()); } } BackwardStorage adjStorage = allocateBackwardStorage(input); //precomputations CI_LOG_D("precompute values"); PrecomputedValues precomputed = allocatePrecomputedValues(input); { SoftBodySimulation3D::Settings settings2 = settings; settings2.gravity_ = make_real3(1, 1, 1); settings2.mass_ = 1; adjStorage.unaryLumpedMass_.setZero(); adjStorage.unaryBodyForces_.setZero(); SoftBodyGrid3D::computeMassMatrix(input, settings2, adjStorage.unaryLumpedMass_); SoftBodyGrid3D::computeBodyForces(input, settings2, adjStorage.unaryBodyForces_); } precomputed.lumpedMass_ = settings.mass_ * adjStorage.unaryLumpedMass_; precomputed.bodyForces_ = settings.gravity_ * adjStorage.unaryBodyForces_; SoftBodyGrid3D::computeInitialVelocity(input, settings, precomputed.initialVelocity_); //apply initial velocity forwardStates[0].velocities_.inplace() = precomputed.initialVelocity_; //forward CI_LOG_D("forward steps"); for (int t=1; t<=numSteps; ++t) { if (worker && worker->isInterrupted()) return 0; CI_LOG_D("Timestep " << t); timer.start(); bool converged = performForwardStep( forwardStates[t - 1], forwardStates[t], forwardStorages[memorySaving ? 0 : t], input, precomputed, settings, costFunction->getRequiredInput(), memorySaving); timer.stop(); if (statistics) statistics->forwardTime.push_back(timer.duration()); #if FORWARD_BREAK_ON_DIVERGENCE>0 if (!converged) { CI_LOG_E("Linear solver in the forward step did not converge, stop iteration and evaluate gradient only until timestep " << (t - 1)); std::cout << "Linear solver in the forward step did not converge, stop iteration and evaluate gradient only until timestep " << (t - 1) << std::endl; numSteps = t - 1; break; } #endif } //cost function real finalCost = 0; CostFunctionTmp costFunctionTmp = allocateCostFunctionTmp(input, costFunction); if (!memorySaving) { CI_LOG_D("evaluate cost function"); for (int t = 1; t <= numSteps; ++t) { if (worker && worker->isInterrupted()) return 0; timer.start(); finalCost += evaluateCostFunction( costFunction, t - 1, costFunctionTmp, forwardStates[t], backwardStates[t], input); timer.stop(); if (statistics) statistics->costTime.push_back(timer.duration()); } } //backward/adjoint CI_LOG_D("adjoint steps"); for (int t=numSteps; t>0; --t) { if (worker && worker->isInterrupted()) return 0; CI_LOG_I("Timestep " << t); if (!memorySaving) { timer.start(); performBackwardStep( forwardStates[t - 1], forwardStates[t], backwardStates[t], backwardStates[t - 1], adjointVariablesOut, input, precomputed, forwardStorages[t], adjStorage, settings, costFunction->getRequiredInput(), false); timer.stop(); if (statistics) statistics->backwardTime.push_back(timer.duration()); } else { const int idxCurrent = t % 2; const int idxNext = 1 - idxCurrent; backwardStates[idxNext].reset(); timer.start(); finalCost += evaluateCostFunction( costFunction, t - 1, costFunctionTmp, forwardStates[t], backwardStates[idxCurrent], input); timer.stop(); if (statistics) statistics->costTime.push_back(timer.duration()); timer.start(); performBackwardStep( forwardStates[t - 1], forwardStates[t], backwardStates[idxCurrent], backwardStates[idxNext], adjointVariablesOut, input, precomputed, forwardStorages[0], adjStorage, settings, costFunction->getRequiredInput(), true); timer.stop(); if (statistics) statistics->backwardTime.push_back(timer.duration()); } } adjointVariablesOut *= 1.0 / backwardStates[0].scale_; //adjoint of precomputations (initial velocities) if (variables.optimizeInitialLinearVelocity_ || variables.optimizeInitialAngularVelocity_) adjointComputeInitialVelocity(input, settings.initialLinearVelocity_, settings.initialAngularVelocity_, backwardStates[0].adjVelocities_, adjointVariablesOut.adjInitialLinearVelocity, adjointVariablesOut.adjInitialAngularVelocity); //done return finalCost; } real AdjointSolver::computeGradientFiniteDifferences( const Input& input, const SoftBodySimulation3D::Settings& settings_, const InputVariables& variables, CostFunctionPtr costFunction, AdjointVariables& adjointVariablesOut, real finiteDifferencesDelta, BackgroundWorker2* worker, Statistics* statistics) { adjointVariablesOut = { 0 }; //update settings with the current state SoftBodySimulation3D::Settings settings = settings_; if (variables.optimizeGravity_) settings.gravity_ = variables.currentGravity_; if (variables.optimizeYoungsModulus_) settings.youngsModulus_ = variables.currentYoungsModulus_; if (variables.optimizePoissonRatio_) settings.poissonsRatio_ = variables.currentPoissonRatio_; if (variables.optimizeMass_) settings.mass_ = variables.currentMass_; if (variables.optimizeMassDamping_) settings.dampingAlpha_ = variables.currentMassDamping_; if (variables.optimizeStiffnessDamping_) settings.dampingBeta_ = variables.currentStiffnessDamping_; if (variables.optimizeInitialLinearVelocity_) settings.initialLinearVelocity_ = variables.currentInitialLinearVelocity_; if (variables.optimizeInitialAngularVelocity_) settings.initialAngularVelocity_ = variables.currentInitialAngularVelocity_; if (variables.optimizeGroundPlane_) settings.groundPlane_ = variables.currentGroundPlane_; settings.validate(); if (statistics) { statistics->numActiveNodes = input.numActiveNodes_; statistics->numEmptyNodes = input.grid_->getSize().prod() - input.numActiveNodes_; statistics->numActiveElements = input.numActiveCells_; } CudaTimer timer; //query number of timesteps int numSteps = costFunction->getNumSteps(); CI_LOG_D(numSteps << " timesteps have to be computed"); //allocations ForwardState forwardStates[2]; forwardStates[0] = allocateForwardState(input); forwardStates[1] = allocateForwardState(input); ForwardStorage forwardStorage = allocateForwardStorage(input); PrecomputedValues precomputed = allocatePrecomputedValues(input); BackwardState backwardState = allocateBackwardState(input, costFunction->getRequiredInput()); //single forward evaluation auto evaluate = [&](SoftBodySimulation3D::Settings settings2, Statistics* statistics2) -> real { settings2.validate(); //precomputations + initial computations precomputed.lumpedMass_.setZero(); precomputed.bodyForces_.setZero(); precomputed.initialVelocity_.setZero(); for (int i = 0; i < 2; ++i) { forwardStates[i].displacements_.setZero(); forwardStates[i].gridDisplacements_.setZero(); forwardStates[i].velocities_.setZero(); } SoftBodyGrid3D::computeMassMatrix(input, settings2, precomputed.lumpedMass_); SoftBodyGrid3D::computeBodyForces(input, settings2, precomputed.bodyForces_); SoftBodyGrid3D::computeInitialVelocity(input, settings2, precomputed.initialVelocity_); forwardStates[0].velocities_.inplace() = precomputed.initialVelocity_; //forward + cost function real finalCost = 0; CostFunctionTmp costFunctionTmp = allocateCostFunctionTmp(input, costFunction); CI_LOG_D("forward steps + cost function"); for (int t = 1; t <= numSteps; ++t) { if (worker && worker->isInterrupted()) return 0; CI_LOG_D("Timestep " << t); timer.start(); bool converged = performForwardStep( forwardStates[(t-1)%2], forwardStates[t%2], forwardStorage, input, precomputed, settings2, costFunction->getRequiredInput(), true); timer.stop(); if (statistics2) statistics2->forwardTime.push_back(timer.duration()); timer.start(); finalCost += evaluateCostFunction( costFunction, t - 1, costFunctionTmp, forwardStates[t % 2], backwardState, input); timer.stop(); if (statistics2) statistics2->costTime.push_back(timer.duration()); } return finalCost; }; //evaluate current setting CI_LOG_D("main evaluation"); real finalCost = evaluate(settings, statistics); //evaluate once for every parameter #define EVALUATE_FD(optimVar, settingsVar, currentVar, adjVar) \ if (variables.optimVar) { \ CI_LOG_D("evaluate " CUMAT_STR(settingsVar)); \ SoftBodySimulation3D::Settings settings2 = settings; \ settings2.settingsVar = variables.currentVar + settings_.settingsVar * finiteDifferencesDelta; \ real cost = evaluate(settings2, nullptr); \ adjointVariablesOut. adjVar = (cost - finalCost) / (settings_.settingsVar * finiteDifferencesDelta); \ std::cout << "Evaluate " << #optimVar << \ ", x1=" << settings.settingsVar << ", x2=" << settings2.settingsVar << \ ", c1=" << finalCost << ", c2=" << cost << \ " -> grad=" << adjointVariablesOut.adjVar << std::endl; \ } EVALUATE_FD(optimizeGravity_, gravity_.x, currentGravity_.x, adjGravity_.x); EVALUATE_FD(optimizeGravity_, gravity_.y, currentGravity_.y, adjGravity_.y); EVALUATE_FD(optimizeGravity_, gravity_.z, currentGravity_.z, adjGravity_.z); EVALUATE_FD(optimizeMassDamping_, dampingAlpha_, currentMassDamping_, adjMassDamping_); EVALUATE_FD(optimizeStiffnessDamping_, dampingBeta_, currentStiffnessDamping_, adjStiffnessDamping_); EVALUATE_FD(optimizePoissonRatio_, poissonsRatio_, currentPoissonRatio_, adjPoissonRatio_); //EVALUATE_FD(optimizeYoungsModulus_, youngsModulus_, currentYoungsModulus_, adjYoungsModulus_); if (variables.optimizeYoungsModulus_) { CI_LOG_D("evaluate " CUMAT_STR(youngsModulus_)); SoftBodySimulation3D::Settings settings2 = settings; settings2.youngsModulus_ = variables.currentYoungsModulus_ + settings_.youngsModulus_ * finiteDifferencesDelta; real cost = evaluate(settings2, nullptr); adjointVariablesOut.adjYoungsModulus_ = (cost - finalCost) / (settings_.youngsModulus_ * finiteDifferencesDelta); std::cout << "Evaluate " << "optimizeYoungsModulus_" << ", x1=" << settings.youngsModulus_ << ", x2=" << settings2.youngsModulus_ << ", c1=" << finalCost << ", c2=" << cost << " -> grad=" << adjointVariablesOut.adjYoungsModulus_ << std::endl; }; #undef EVALUATE_FD //done return finalCost; } void AdjointSolver::adjointComputeMaterialParameters( double k, double p, double adjMu, double adjLambda, double& adjYoungOut, double& adjPoissonOut) { adjYoungOut += adjMu * (1 / (2.*(1 + p))) + adjLambda * (p / ((1 - 2 * p)*(1 + p))); adjPoissonOut += adjMu * (-k / (2.*ar3d::utils::square(1 + p))) + adjLambda * (-((k*p) / ((1 - 2 * p)*ar3d::utils::square(1 + p))) + k / ((1 - 2 * p)*(1 + p)) + (2 * k*p) / (ar3d::utils::square(1 - 2 * p)*(1 + p))); } AdjointSolver::Settings::Settings() : numIterations_(20), optimizer_(GRADIENT_DESCENT), memorySaving_(false), normalizeUnits_(true) { gradientDescentSettings_.epsilon_ = "1e-7"; gradientDescentSettings_.linearStepsize_ = "0.001"; gradientDescentSettings_.maxStepsize_ = ""; gradientDescentSettings_.minStepsize_ = ""; rpropSettings_.epsilon_ = "1e-7"; rpropSettings_.initialStepsize_ = "0.001"; lbfgsSettings_.epsilon_ = "1e-7"; lbfgsSettings_.past_ = 0; lbfgsSettings_.delta_ = ""; lbfgsSettings_.lineSearchAlg_ = LbfgsSettings::Wolfe; lbfgsSettings_.linesearchMaxTrials_ = 2; lbfgsSettings_.linesearchMinStep_ = ""; lbfgsSettings_.linesearchMaxStep_ = ""; } AdjointSolver::GUI::GUI() { } void AdjointSolver::GUI::initParams(cinder::params::InterfaceGlRef params, const std::string& group, const bool noInitialValues) { params_ = params; static const std::string t = "visible=true"; static const std::string f = "visible=false"; //GENERAL PARAMETERS params->addParam("AdjointSolver-NumIterations", &settings_.numIterations_) .group(group).label("Num Iterations").min(1); params->addParam("AdjointSolver-MemorySaving", &settings_.memorySaving_) .group(group).label("Memory Saving") .optionsStr("help='False: save everything from the forward pass (fast, memory intense). True: only save minimal information, recompute more (slower, less memory)'"); params->addParam("AdjointSolver-NormalizeUnits", &settings_.normalizeUnits_) .group(group).label("Normalize Units"); std::vector<std::string> optimizerNames = { "Gradient Descent", "Rprop", "LBFGS" }; params->addParam("AdjointSolver-Optimizer", optimizerNames, reinterpret_cast<int*>(&settings_.optimizer_)) .group(group).label("Optimizer").updateFn([params, this]() { bool v = settings_.optimizer_ == Settings::GRADIENT_DESCENT; params->setOptions("AdjointSolver-GD-Epsilon", v ? t : f); params->setOptions("AdjointSolver-GD-LinearStepsize", v ? t : f); params->setOptions("AdjointSolver-GD-MaxStepsize", v ? t : f); params->setOptions("AdjointSolver-GD-MinStepsize", v ? t : f); v = settings_.optimizer_ == Settings::RPROP; params->setOptions("AdjointSolver-Rprop-Epsilon", v ? t : f); params->setOptions("AdjointSolver-Rprop-InitialStepsize", v ? t : f); v = settings_.optimizer_ == Settings::LBFGS; params->setOptions("AdjointSolver-LBFGS-Epsilon", v ? t : f); params->setOptions("AdjointSolver-LBFGS-Past", v ? t : f); params->setOptions("AdjointSolver-LBFGS-Delta", v ? t : f); params->setOptions("AdjointSolver-LBFGS-Algorithm", v ? t : f); params->setOptions("AdjointSolver-LBFGS-LinesearchMaxTrials", v ? t : f); params->setOptions("AdjointSolver-LBFGS-LinesearchMinStep", v ? t : f); params->setOptions("AdjointSolver-LBFGS-LinesearchMaxStep", v ? t : f); params->setOptions("AdjointSolver-LBFGS-LinesearchTol", v ? t : f); }); //OPTIMIZED VARIABLES params->addParam("AdjointSolver-OptimizeGravity", &settings_.variables_.optimizeGravity_) .group(group).label("Optimize Gravity").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeGravity_ = v; if (!noInitialValues) { params->setOptions("AdjointSolver-InitialGravityX", v ? t : f); params->setOptions("AdjointSolver-InitialGravityY", v ? t : f); params->setOptions("AdjointSolver-InitialGravityZ", v ? t : f); } }, [this]() { return settings_.variables_.optimizeGravity_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialGravityX", &settings_.variables_.currentGravity_.x) .group(group).label("Initial Gravity X").step(0.01f).visible(settings_.variables_.optimizeGravity_); params->addParam("AdjointSolver-InitialGravityY", &settings_.variables_.currentGravity_.y) .group(group).label("Initial Gravity Y").step(0.01f).visible(settings_.variables_.optimizeGravity_); params->addParam("AdjointSolver-InitialGravityZ", &settings_.variables_.currentGravity_.z) .group(group).label("Initial Gravity Z").step(0.01f).visible(settings_.variables_.optimizeGravity_); } params->addParam("AdjointSolver-OptimizeYoungsModulus", &settings_.variables_.optimizeYoungsModulus_) .group(group).label("Optimize Young's Modulus").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeYoungsModulus_ = v; if (!noInitialValues) params->setOptions("AdjointSolver-InitialYoungsModulus", v ? t : f); }, [this]() { return settings_.variables_.optimizeYoungsModulus_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialYoungsModulus", &settings_.variables_.currentYoungsModulus_) .group(group).label("Initial Young's Modulus").step(0.01f).min(0).visible(settings_.variables_.optimizeYoungsModulus_); } params->addParam("AdjointSolver-OptimizePoissonRatio", &settings_.variables_.optimizePoissonRatio_) .group(group).label("Optimize Poisson Ratio").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizePoissonRatio_ = v; if (!noInitialValues) params->setOptions("AdjointSolver-InitialPoissonRatio", v ? t : f); }, [this]() { return settings_.variables_.optimizePoissonRatio_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialPoissonRatio", &settings_.variables_.currentPoissonRatio_) .group(group).label("Initial Poisson Ratio").step(0.001f).min(0.1f).max(0.49f).visible(settings_.variables_.optimizePoissonRatio_); } params->addParam("AdjointSolver-OptimizeMass", &settings_.variables_.optimizeMass_) .group(group).label("Optimize Mass").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeMass_ = v; if (!noInitialValues) params->setOptions("AdjointSolver-InitialMass", v ? t : f); }, [this]() { return settings_.variables_.optimizeMass_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialMass", &settings_.variables_.currentMass_) .group(group).label("Initial Mass").step(0.01f).min(0.01f).visible(settings_.variables_.optimizeMass_); } params->addParam("AdjointSolver-OptimizeMassDamping", &settings_.variables_.optimizeMassDamping_) .group(group).label("Optimize Mass Damping").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeMassDamping_ = v; if (!noInitialValues) params->setOptions("AdjointSolver-InitialMassDamping", v ? t : f); }, [this]() { return settings_.variables_.optimizeMassDamping_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialMassDamping", &settings_.variables_.currentMassDamping_) .group(group).label("Initial Mass Damping").step(0.001f).min(0.0f).visible(settings_.variables_.optimizeMassDamping_); } params->addParam("AdjointSolver-OptimizeStiffnessDamping", &settings_.variables_.optimizeStiffnessDamping_) .group(group).label("Optimize Stiffness Damping").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeStiffnessDamping_ = v; if (!noInitialValues) params->setOptions("AdjointSolver-InitialStiffnessDamping", v ? t : f); }, [this]() { return settings_.variables_.optimizeStiffnessDamping_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialStiffnessDamping", &settings_.variables_.currentStiffnessDamping_) .group(group).label("Initial Stiffness Damping").step(0.001f).min(0.0f).visible(settings_.variables_.optimizeStiffnessDamping_); } params->addParam("AdjointSolver-OptimizeInitialLinearVelocity", &settings_.variables_.optimizeInitialLinearVelocity_) .group(group).label("Optimize Initial Linear Velocity").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeInitialLinearVelocity_ = v; if (!noInitialValues) { params->setOptions("AdjointSolver-InitialLinearVelocityX", v ? t : f); params->setOptions("AdjointSolver-InitialLinearVelocityY", v ? t : f); params->setOptions("AdjointSolver-InitialLinearVelocityZ", v ? t : f); } }, [this]() { return settings_.variables_.optimizeInitialLinearVelocity_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialLinearVelocityX", &settings_.variables_.currentInitialLinearVelocity_.x) .group(group).label("Initial Linear Velocity X").step(0.01f).visible(settings_.variables_.optimizeInitialLinearVelocity_); params->addParam("AdjointSolver-InitialLinearVelocityY", &settings_.variables_.currentInitialLinearVelocity_.y) .group(group).label("Initial Linear Velocity Y").step(0.01f).visible(settings_.variables_.optimizeInitialLinearVelocity_); params->addParam("AdjointSolver-InitialLinearVelocityZ", &settings_.variables_.currentInitialLinearVelocity_.z) .group(group).label("Initial Linear Velocity Z").step(0.01f).visible(settings_.variables_.optimizeInitialLinearVelocity_); } params->addParam("AdjointSolver-OptimizeInitialAngularVelocity", &settings_.variables_.optimizeInitialAngularVelocity_) .group(group).label("Optimize Initial Angular Velocity").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeInitialAngularVelocity_ = v; if (!noInitialValues) { params->setOptions("AdjointSolver-InitialAngularVelocityX", v ? t : f); params->setOptions("AdjointSolver-InitialAngularVelocityY", v ? t : f); params->setOptions("AdjointSolver-InitialAngularVelocityZ", v ? t : f); } }, [this]() { return settings_.variables_.optimizeInitialAngularVelocity_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialAngularVelocityX", &settings_.variables_.currentInitialAngularVelocity_.x) .group(group).label("Initial Angular Velocity X").step(0.01f).visible(settings_.variables_.optimizeInitialAngularVelocity_); params->addParam("AdjointSolver-InitialAngularVelocityY", &settings_.variables_.currentInitialAngularVelocity_.y) .group(group).label("Initial Angular Velocity Y").step(0.01f).visible(settings_.variables_.optimizeInitialAngularVelocity_); params->addParam("AdjointSolver-InitialAngularVelocityZ", &settings_.variables_.currentInitialAngularVelocity_.z) .group(group).label("Initial Angular Velocity Z").step(0.01f).visible(settings_.variables_.optimizeInitialAngularVelocity_); } params->addParam("AdjointSolver-OptimizeGroundPlane", &settings_.variables_.optimizeGroundPlane_) .group(group).label("Optimize Ground Plane").accessors( [params, this, noInitialValues](bool v) { settings_.variables_.optimizeGroundPlane_ = v; if (!noInitialValues) { params->setOptions("AdjointSolver-InitialGroundPlaneAngle", v ? t : f); params->setOptions("AdjointSolver-InitialGroundPlaneHeight", v ? t : f); } }, [this]() { return settings_.variables_.optimizeGroundPlane_; }); if (!noInitialValues) { params->addParam("AdjointSolver-InitialGroundPlaneAngle", reinterpret_cast<glm::tvec3<real, glm::highp>*>(&settings_.variables_.currentGroundPlane_.x)) .group(group).label("Initial Ground Plane Angle").visible(settings_.variables_.optimizeGroundPlane_); params->addParam("AdjointSolver-InitialGroundPlaneHeight", &settings_.variables_.currentGroundPlane_.w) .group(group).label("Initial Ground Plane Height").step(0.01f).visible(settings_.variables_.optimizeGroundPlane_); } //OPTIMIZER SETTINGS params->addParam("AdjointSolver-GD-Epsilon", &settings_.gradientDescentSettings_.epsilon_) .group(group).label("GD: Epsilon").visible(settings_.optimizer_ == Settings::GRADIENT_DESCENT) .optionsStr("help='Terminates if the norm of the gradient falls below this epsilon. Leave empty for default value.'"); params->addParam("AdjointSolver-GD-LinearStepsize", &settings_.gradientDescentSettings_.linearStepsize_) .group(group).label("GD: Initial").visible(settings_.optimizer_ == Settings::GRADIENT_DESCENT) .optionsStr("help='Initial step size. Leave empty for default value.'"); params->addParam("AdjointSolver-GD-MaxStepsize", &settings_.gradientDescentSettings_.maxStepsize_) .group(group).label("GD: Max Stepsize").visible(settings_.optimizer_ == Settings::GRADIENT_DESCENT) .optionsStr("help='Maximal step size. If empty, no restriction is applied'"); params->addParam("AdjointSolver-GD-MinStepsize", &settings_.gradientDescentSettings_.minStepsize_) .group(group).label("GD: Min Stepsize").visible(settings_.optimizer_ == Settings::GRADIENT_DESCENT) .optionsStr("help='Minimal step size. If empty, no restriction is applied'"); params->addParam("AdjointSolver-Rprop-Epsilon", &settings_.rpropSettings_.epsilon_) .group(group).label("Rprop: Epsilon").visible(settings_.optimizer_ == Settings::RPROP) .optionsStr("help='Terminates if the norm of the gradient falls below this epsilon. Leave empty for default value.'"); params->addParam("AdjointSolver-Rprop-InitialStepsize", &settings_.rpropSettings_.initialStepsize_) .group(group).label("Rprop: Initial").visible(settings_.optimizer_ == Settings::RPROP) .optionsStr("help='Initial step size. Leave empty for default value.'"); params->addParam("AdjointSolver-LBFGS-Epsilon", &settings_.lbfgsSettings_.epsilon_) .group(group).label("LBFGS: Epsilon").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Terminates if the norm of the gradient falls below this epsilon. Leave empty for default value.'"); params->addParam("AdjointSolver-LBFGS-Past", &settings_.lbfgsSettings_.past_).min(0) .group(group).label("LBFGS: Past Distance").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Number of steps into the past for tests if the cost function reached a plateau. Set to zero to disable.'"); params->addParam("AdjointSolver-LBFGS-Delta", &settings_.lbfgsSettings_.delta_) .group(group).label("LBFGS: Past Delta").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Tolerance for plateau termination criterion'"); std::vector<std::string> lbfgsLinesearchAlgs = { "Armijo", "Wolfe", "StrongWolfe" }; params->addParam("AdjointSolver-LBFGS-Algorithm", lbfgsLinesearchAlgs, reinterpret_cast<int*>(&settings_.lbfgsSettings_.lineSearchAlg_)) .group(group).label("LBFGS: LS Algorithm").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='The linesearch algorithm used to find the best step size'"); params->addParam("AdjointSolver-LBFGS-LinesearchMaxTrials", &settings_.lbfgsSettings_.linesearchMaxTrials_) .group(group).label("LBFGS: LS max trials").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='The maximal number of trials in the line search'"); params->addParam("AdjointSolver-LBFGS-LinesearchMinStep", &settings_.lbfgsSettings_.linesearchMinStep_) .group(group).label("LBFGS: LS min step").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Minimal step size in the line search step. Leave empty for default value.'"); params->addParam("AdjointSolver-LBFGS-LinesearchMaxStep", &settings_.lbfgsSettings_.linesearchMaxStep_) .group(group).label("LBFGS: LS max step").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Maximal step size in the line search step. Leave empty for default value.'"); params->addParam("AdjointSolver-LBFGS-LinesearchTol", &settings_.lbfgsSettings_.linesearchTol_) .group(group).label("LBFGS: LS tolerance").visible(settings_.optimizer_ == Settings::LBFGS) .optionsStr("help='Tolerance in Armijo condition. Leave empty for default value.'"); } void AdjointSolver::GUI::load(const cinder::JsonTree& parent, bool noInitialValues) { settings_.numIterations_ = parent.getValueForKey<int>("NumIterations"); settings_.optimizer_ = Settings::ToOptimizer(parent.getValueForKey("Optimizer")); if (parent.hasChild("MemorySaving")) settings_.memorySaving_ = parent.getValueForKey<bool>("MemorySaving"); if (parent.hasChild("NormalizeUnits")) settings_.normalizeUnits_ = parent.getValueForKey<bool>("NormalizeUnits"); const cinder::JsonTree& gd = parent.getChild("GradientDescent"); settings_.gradientDescentSettings_.epsilon_ = gd.getValueForKey("Epsilon"); settings_.gradientDescentSettings_.linearStepsize_ = gd.getValueForKey("LinearStepsize"); settings_.gradientDescentSettings_.maxStepsize_ = gd.getValueForKey("MaxStepsize"); settings_.gradientDescentSettings_.minStepsize_ = gd.getValueForKey("MinStepsize"); if (parent.hasChild("Rprop")) { const cinder::JsonTree& gd = parent.getChild("Rprop"); settings_.rpropSettings_.epsilon_ = gd.getValueForKey("Epsilon"); settings_.rpropSettings_.initialStepsize_ = gd.getValueForKey("InitialStepsize"); } const cinder::JsonTree& lbfgs = parent.getChild("LBFGS"); settings_.lbfgsSettings_.epsilon_ = lbfgs.getValueForKey("Epsilon"); settings_.lbfgsSettings_.past_ = lbfgs.getValueForKey<int>("Past"); settings_.lbfgsSettings_.delta_ = lbfgs.getValueForKey("Delta"); settings_.lbfgsSettings_.lineSearchAlg_ = Settings::LbfgsSettings::ToLineSearchAlg(lbfgs.getValueForKey("LineSearchAlg")); settings_.lbfgsSettings_.linesearchMaxTrials_ = lbfgs.getValueForKey<int>("LineSearchMaxTrials"); settings_.lbfgsSettings_.linesearchMinStep_ = lbfgs.getValueForKey("LineSearchMinStep"); settings_.lbfgsSettings_.linesearchMaxStep_ = lbfgs.getValueForKey("LineSearchMaxStep"); settings_.lbfgsSettings_.linesearchTol_ = lbfgs.getValueForKey("LineSearchTol"); const cinder::JsonTree& input = parent.getChild("InitialValues"); settings_.variables_.optimizeGravity_ = input.getValueForKey<bool>("OptimizeGravity"); if (!noInitialValues) { settings_.variables_.currentGravity_.x = input.getChild("InitialGravity").getValueAtIndex<real>(0); settings_.variables_.currentGravity_.y = input.getChild("InitialGravity").getValueAtIndex<real>(1); settings_.variables_.currentGravity_.z = input.getChild("InitialGravity").getValueAtIndex<real>(2); } settings_.variables_.optimizeYoungsModulus_ = input.getValueForKey<bool>("OptimizeYoungsModulus"); if (!noInitialValues) settings_.variables_.currentYoungsModulus_ = input.getValueForKey<real>("InitialYoungsModulus"); settings_.variables_.optimizePoissonRatio_ = input.getValueForKey<bool>("OptimizePoissonRatio"); if (!noInitialValues) settings_.variables_.currentPoissonRatio_ = input.getValueForKey<real>("InitialPoissonRatio"); settings_.variables_.optimizeMass_ = input.getValueForKey<bool>("OptimizeMass"); if (!noInitialValues) settings_.variables_.currentMass_ = input.getValueForKey<real>("InitialMass"); settings_.variables_.optimizeMassDamping_ = input.getValueForKey<bool>("OptimizeMassDamping"); if (!noInitialValues) settings_.variables_.currentMassDamping_ = input.getValueForKey<real>("InitialMassDamping"); settings_.variables_.optimizeStiffnessDamping_ = input.getValueForKey<bool>("OptimizeStiffnessDamping"); if (!noInitialValues) settings_.variables_.currentStiffnessDamping_ = input.getValueForKey<real>("InitialStiffnessDamping"); if (input.hasChild("OptimizeInitialLinearVelocity")) settings_.variables_.optimizeInitialLinearVelocity_ = input.getValueForKey<bool>("OptimizeInitialLinearVelocity"); if (!noInitialValues && input.hasChild("InitialLinearVelocity")) { settings_.variables_.currentInitialLinearVelocity_.x = input.getChild("InitialLinearVelocity").getValueAtIndex<real>(0); settings_.variables_.currentInitialLinearVelocity_.y = input.getChild("InitialLinearVelocity").getValueAtIndex<real>(1); settings_.variables_.currentInitialLinearVelocity_.z = input.getChild("InitialLinearVelocity").getValueAtIndex<real>(2); } if (input.hasChild("OptimizeInitialAngularVelocity")) settings_.variables_.optimizeInitialAngularVelocity_ = input.getValueForKey<bool>("OptimizeInitialAngularVelocity"); if (!noInitialValues && input.hasChild("InitialAngularVelocity")) { settings_.variables_.currentInitialAngularVelocity_.x = input.getChild("InitialAngularVelocity").getValueAtIndex<real>(0); settings_.variables_.currentInitialAngularVelocity_.y = input.getChild("InitialAngularVelocity").getValueAtIndex<real>(1); settings_.variables_.currentInitialAngularVelocity_.z = input.getChild("InitialAngularVelocity").getValueAtIndex<real>(2); } settings_.variables_.optimizeGroundPlane_ = input.getValueForKey<bool>("OptimizeGroundPlane"); if (!noInitialValues) { settings_.variables_.currentGroundPlane_.x = input.getChild("InitialGroundPlane").getValueAtIndex<real>(0); settings_.variables_.currentGroundPlane_.y = input.getChild("InitialGroundPlane").getValueAtIndex<real>(1); settings_.variables_.currentGroundPlane_.z = input.getChild("InitialGroundPlane").getValueAtIndex<real>(2); settings_.variables_.currentGroundPlane_.w = input.getChild("InitialGroundPlane").getValueAtIndex<real>(3); } if (params_) { static const std::string t = "visible=true"; static const std::string f = "visible=false"; bool v = settings_.optimizer_ == Settings::GRADIENT_DESCENT; params_->setOptions("AdjointSolver-GD-Epsilon", v ? t : f); params_->setOptions("AdjointSolver-GD-LinearStepsize", v ? t : f); params_->setOptions("AdjointSolver-GD-MaxStepsize", v ? t : f); params_->setOptions("AdjointSolver-GD-MinStepsize", v ? t : f); v = settings_.optimizer_ == Settings::LBFGS; params_->setOptions("AdjointSolver-LBFGS-Epsilon", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-Past", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-Delta", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-Algorithm", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-LinesearchMaxTrials", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-LinesearchMinStep", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-LinesearchMaxStep", v ? t : f); params_->setOptions("AdjointSolver-LBFGS-LinesearchTol", v ? t : f); if (!noInitialValues) { params_->setOptions("AdjointSolver-InitialGravityX", settings_.variables_.optimizeGravity_ ? t : f); params_->setOptions("AdjointSolver-InitialGravityY", settings_.variables_.optimizeGravity_ ? t : f); params_->setOptions("AdjointSolver-InitialGravityZ", settings_.variables_.optimizeGravity_ ? t : f); params_->setOptions("AdjointSolver-InitialYoungsModulus", settings_.variables_.optimizeYoungsModulus_ ? t : f); params_->setOptions("AdjointSolver-InitialPoissonRatio", settings_.variables_.optimizePoissonRatio_ ? t : f); params_->setOptions("AdjointSolver-InitialMass", settings_.variables_.optimizeMass_ ? t : f); params_->setOptions("AdjointSolver-InitialMassDamping", settings_.variables_.optimizeMassDamping_ ? t : f); params_->setOptions("AdjointSolver-InitialStiffnessDamping", settings_.variables_.optimizeStiffnessDamping_ ? t : f); params_->setOptions("AdjointSolver-InitialLinearVelocityX", settings_.variables_.optimizeInitialLinearVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialLinearVelocityY", settings_.variables_.optimizeInitialLinearVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialLinearVelocityZ", settings_.variables_.optimizeInitialLinearVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialAngularVelocityX", settings_.variables_.optimizeInitialAngularVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialAngularVelocityY", settings_.variables_.optimizeInitialAngularVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialAngularVelocityZ", settings_.variables_.optimizeInitialAngularVelocity_ ? t : f); params_->setOptions("AdjointSolver-InitialGroundPlaneAngle", settings_.variables_.optimizeGroundPlane_ ? t : f); params_->setOptions("AdjointSolver-InitialGroundPlaneHeight", settings_.variables_.optimizeGroundPlane_ ? t : f); } } } void AdjointSolver::GUI::save(cinder::JsonTree& parent, bool noInitialValues) const { parent.addChild(cinder::JsonTree("NumIterations", settings_.numIterations_)); parent.addChild(cinder::JsonTree("Optimizer", Settings::FromOptimizer(settings_.optimizer_))); parent.addChild(cinder::JsonTree("MemorySaving", settings_.memorySaving_)); parent.addChild(cinder::JsonTree("NormalizeUnits", settings_.normalizeUnits_)); cinder::JsonTree gd = cinder::JsonTree::makeObject("GradientDescent"); gd.addChild(cinder::JsonTree("Epsilon", settings_.gradientDescentSettings_.epsilon_)); gd.addChild(cinder::JsonTree("LinearStepsize", settings_.gradientDescentSettings_.linearStepsize_)); gd.addChild(cinder::JsonTree("MaxStepsize", settings_.gradientDescentSettings_.maxStepsize_)); gd.addChild(cinder::JsonTree("MinStepsize", settings_.gradientDescentSettings_.minStepsize_)); parent.addChild(gd); cinder::JsonTree rprop = cinder::JsonTree::makeObject("Rprop"); rprop.addChild(cinder::JsonTree("Epsilon", settings_.rpropSettings_.epsilon_)); rprop.addChild(cinder::JsonTree("InitialStepsize", settings_.rpropSettings_.initialStepsize_)); parent.addChild(rprop); cinder::JsonTree lbfgs = cinder::JsonTree::makeObject("LBFGS"); lbfgs.addChild(cinder::JsonTree("Epsilon", settings_.lbfgsSettings_.epsilon_)); lbfgs.addChild(cinder::JsonTree("Past", settings_.lbfgsSettings_.past_)); lbfgs.addChild(cinder::JsonTree("Delta", settings_.lbfgsSettings_.delta_)); lbfgs.addChild(cinder::JsonTree("LineSearchAlg", Settings::LbfgsSettings::FromLineSearchAlg(settings_.lbfgsSettings_.lineSearchAlg_))); lbfgs.addChild(cinder::JsonTree("LineSearchMaxTrials", settings_.lbfgsSettings_.linesearchMaxTrials_)); lbfgs.addChild(cinder::JsonTree("LineSearchMinStep", settings_.lbfgsSettings_.linesearchMinStep_)); lbfgs.addChild(cinder::JsonTree("LineSearchMaxStep", settings_.lbfgsSettings_.linesearchMaxStep_)); lbfgs.addChild(cinder::JsonTree("LineSearchTol", settings_.lbfgsSettings_.linesearchTol_)); parent.addChild(lbfgs); cinder::JsonTree input = cinder::JsonTree::makeObject("InitialValues"); input.addChild(cinder::JsonTree("OptimizeGravity", settings_.variables_.optimizeGravity_)); if (!noInitialValues) input.addChild(cinder::JsonTree::makeArray("InitialGravity") .addChild(cinder::JsonTree("", settings_.variables_.currentGravity_.x)) .addChild(cinder::JsonTree("", settings_.variables_.currentGravity_.y)) .addChild(cinder::JsonTree("", settings_.variables_.currentGravity_.z))); input.addChild(cinder::JsonTree("OptimizeYoungsModulus", settings_.variables_.optimizeYoungsModulus_)); if (!noInitialValues) input.addChild(cinder::JsonTree("InitialYoungsModulus", settings_.variables_.currentYoungsModulus_)); input.addChild(cinder::JsonTree("OptimizePoissonRatio", settings_.variables_.optimizePoissonRatio_)); if (!noInitialValues) input.addChild(cinder::JsonTree("InitialPoissonRatio", settings_.variables_.currentPoissonRatio_)); input.addChild(cinder::JsonTree("OptimizeMass", settings_.variables_.optimizeMass_)); if (!noInitialValues) input.addChild(cinder::JsonTree("InitialMass", settings_.variables_.currentMass_)); input.addChild(cinder::JsonTree("OptimizeMassDamping", settings_.variables_.optimizeMassDamping_)); if (!noInitialValues) input.addChild(cinder::JsonTree("InitialMassDamping", settings_.variables_.currentMassDamping_)); input.addChild(cinder::JsonTree("OptimizeStiffnessDamping", settings_.variables_.optimizeStiffnessDamping_)); if (!noInitialValues) input.addChild(cinder::JsonTree("InitialStiffnessDamping", settings_.variables_.currentStiffnessDamping_)); input.addChild(cinder::JsonTree("OptimizeInitialLinearVelocity", settings_.variables_.optimizeInitialLinearVelocity_)); if (!noInitialValues) input.addChild(cinder::JsonTree::makeArray("InitialLinearVelocity") .addChild(cinder::JsonTree("", settings_.variables_.currentInitialLinearVelocity_.x)) .addChild(cinder::JsonTree("", settings_.variables_.currentInitialLinearVelocity_.y)) .addChild(cinder::JsonTree("", settings_.variables_.currentInitialLinearVelocity_.z))); input.addChild(cinder::JsonTree("OptimizeInitialAngularVelocity", settings_.variables_.optimizeInitialAngularVelocity_)); if (!noInitialValues) input.addChild(cinder::JsonTree::makeArray("InitialAngularVelocity") .addChild(cinder::JsonTree("", settings_.variables_.currentInitialAngularVelocity_.x)) .addChild(cinder::JsonTree("", settings_.variables_.currentInitialAngularVelocity_.y)) .addChild(cinder::JsonTree("", settings_.variables_.currentInitialAngularVelocity_.z))); input.addChild(cinder::JsonTree("OptimizeGroundPlane", settings_.variables_.optimizeGroundPlane_)); if (!noInitialValues) input.addChild(cinder::JsonTree::makeArray("InitialGroundPlane") .addChild(cinder::JsonTree("", settings_.variables_.currentGroundPlane_.x)) .addChild(cinder::JsonTree("", settings_.variables_.currentGroundPlane_.y)) .addChild(cinder::JsonTree("", settings_.variables_.currentGroundPlane_.z)) .addChild(cinder::JsonTree("", settings_.variables_.currentGroundPlane_.w))); parent.addChild(input); } AdjointSolver::AdjointSolver(SimulationResults3DPtr reference, const Settings& settings, CostFunctionPtr costFunction) : reference_(reference) , settings_(settings) , costFunction_(costFunction) { reference_->input_.assertSizes(); reference_->settings_.validate(); } bool AdjointSolver::solve(const Callback_t& callback, BackgroundWorker2* worker) { //helper functions typedef Eigen::Matrix<double, Eigen::Dynamic, 1> Vec; const static auto packInputVariables = [](const InputVariables& var) -> Vec { std::vector<double> params; if (var.optimizeGravity_) { params.push_back(var.currentGravity_.x); params.push_back(var.currentGravity_.y); params.push_back(var.currentGravity_.z); } if (var.optimizeYoungsModulus_) params.push_back(var.currentYoungsModulus_); if (var.optimizePoissonRatio_) params.push_back(var.currentPoissonRatio_); if (var.optimizeMass_) params.push_back(var.currentMass_); if (var.optimizeMassDamping_) params.push_back(var.currentMassDamping_); if (var.optimizeStiffnessDamping_) params.push_back(var.currentStiffnessDamping_); if (var.optimizeInitialLinearVelocity_) { params.push_back(var.currentInitialLinearVelocity_.x); params.push_back(var.currentInitialLinearVelocity_.y); params.push_back(var.currentInitialLinearVelocity_.z); } if (var.optimizeInitialAngularVelocity_) { params.push_back(var.currentInitialAngularVelocity_.x); params.push_back(var.currentInitialAngularVelocity_.y); params.push_back(var.currentInitialAngularVelocity_.z); } if (var.optimizeGroundPlane_) { const real4 spherical = CoordinateTransformation::cartesian2spherical(var.currentGroundPlane_); params.push_back(spherical.y); params.push_back(spherical.z); params.push_back(var.currentGroundPlane_.w); } if (params.empty()) return Vec(); Vec result = Eigen::Map<Vec>(params.data(), params.size()); return result; }; const static auto packMinMax = [](const InputVariables& var) -> std::pair<Vec, Vec> { std::vector<double> min, max; static const double BIG = 1e10; static const double SMALL = 1e-10; if (var.optimizeGravity_) { min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); } if (var.optimizeYoungsModulus_) { min.push_back(1); max.push_back(BIG); } if (var.optimizePoissonRatio_) { min.push_back(0.01); max.push_back(0.49); } if (var.optimizeMass_) { min.push_back(SMALL); max.push_back(BIG); } if (var.optimizeMassDamping_) { min.push_back(SMALL); max.push_back(BIG); } if (var.optimizeStiffnessDamping_) { min.push_back(SMALL); max.push_back(BIG); } if (var.optimizeInitialLinearVelocity_) { min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); } if (var.optimizeInitialAngularVelocity_) { min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); } if (var.optimizeGroundPlane_) { min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); min.push_back(-BIG); max.push_back(BIG); } Vec minVec = Eigen::Map<Vec>(min.data(), min.size()); Vec maxVec = Eigen::Map<Vec>(max.data(), max.size()); return std::make_pair(minVec, maxVec); }; const static auto unpackInputVariables = [](const InputVariables& ref, const Vec& params) -> InputVariables { InputVariables var; int i = 0; if (ref.optimizeGravity_) { var.optimizeGravity_ = true; var.currentGravity_.x = static_cast<real>(params[i++]); var.currentGravity_.y = static_cast<real>(params[i++]); var.currentGravity_.z = static_cast<real>(params[i++]); } if (ref.optimizeYoungsModulus_) { var.optimizeYoungsModulus_ = true; var.currentYoungsModulus_ = static_cast<real>(params[i++]); } if (ref.optimizePoissonRatio_) { var.optimizePoissonRatio_ = true; var.currentPoissonRatio_ = static_cast<real>(params[i++]); } if (ref.optimizeMass_) { var.optimizeMass_ = true; var.currentMass_ = static_cast<real>(params[i++]); } if (ref.optimizeMassDamping_) { var.optimizeMassDamping_ = true; var.currentMassDamping_ = static_cast<real>(params[i++]); } if (ref.optimizeStiffnessDamping_) { var.optimizeStiffnessDamping_ = true; var.currentStiffnessDamping_ = static_cast<real>(params[i++]); } if (ref.optimizeInitialLinearVelocity_) { var.optimizeInitialLinearVelocity_ = true; var.currentInitialLinearVelocity_.x = static_cast<real>(params[i++]); var.currentInitialLinearVelocity_.y = static_cast<real>(params[i++]); var.currentInitialLinearVelocity_.z = static_cast<real>(params[i++]); } if (ref.optimizeInitialAngularVelocity_) { var.optimizeInitialAngularVelocity_ = true; var.currentInitialAngularVelocity_.x = static_cast<real>(params[i++]); var.currentInitialAngularVelocity_.y = static_cast<real>(params[i++]); var.currentInitialAngularVelocity_.z = static_cast<real>(params[i++]); } if (ref.optimizeGroundPlane_) { double theta = params[i++]; double phi = params[i++]; const double3 spherical = make_double3(1, theta, phi); const double3 cartesian = CoordinateTransformation::spherical2cartesian(spherical); var.optimizeGroundPlane_ = true; var.currentGroundPlane_.x = static_cast<real>(cartesian.x); var.currentGroundPlane_.y = static_cast<real>(cartesian.y); var.currentGroundPlane_.z = static_cast<real>(cartesian.z); var.currentGroundPlane_.w = static_cast<real>(params[i++]); } return var; }; static const auto packGradient = [](const InputVariables& ref, const InputVariables& in, const AdjointVariables& adj) -> Vec { std::vector<double> params; if (ref.optimizeGravity_) { params.push_back(adj.adjGravity_.x); params.push_back(adj.adjGravity_.y); params.push_back(adj.adjGravity_.z); } if (ref.optimizeYoungsModulus_) params.push_back(adj.adjYoungsModulus_); if (ref.optimizePoissonRatio_) params.push_back(adj.adjPoissonRatio_); if (ref.optimizeMass_) params.push_back(adj.adjMass_); if (ref.optimizeMassDamping_) params.push_back(adj.adjMassDamping_); if (ref.optimizeStiffnessDamping_) params.push_back(adj.adjStiffnessDamping_); if (ref.optimizeInitialLinearVelocity_) { params.push_back(adj.adjInitialLinearVelocity.x); params.push_back(adj.adjInitialLinearVelocity.y); params.push_back(adj.adjInitialLinearVelocity.z); } if (ref.optimizeInitialAngularVelocity_) { params.push_back(adj.adjInitialAngularVelocity.x); params.push_back(adj.adjInitialAngularVelocity.y); params.push_back(adj.adjInitialAngularVelocity.z); } if (ref.optimizeGroundPlane_) { const double4 spherical = CoordinateTransformation::cartesian2spherical(make_double4(in.currentGroundPlane_.x, in.currentGroundPlane_.y, in.currentGroundPlane_.z, 0)); const double4 adjSpherical = CoordinateTransformation::spherical2cartesianAdjoint(spherical, adj.adjGroundPlane_); params.push_back(adjSpherical.y); params.push_back(adjSpherical.z); params.push_back(adj.adjGroundPlane_.w); } Vec result = Eigen::Map<Vec>(params.data(), params.size()); return result; }; static const auto varToSettings = [](const InputVariables& var) -> SoftBodySimulation3D::Settings { SoftBodySimulation3D::Settings settings; settings.gravity_ = var.currentGravity_; settings.youngsModulus_ = var.currentYoungsModulus_; settings.poissonsRatio_ = var.currentPoissonRatio_; settings.mass_ = var.currentMass_; settings.dampingAlpha_ = var.currentMassDamping_; settings.dampingBeta_ = var.currentStiffnessDamping_; settings.initialLinearVelocity_ = var.currentInitialLinearVelocity_; settings.initialAngularVelocity_ = var.currentInitialAngularVelocity_; settings.groundPlane_ = var.currentGroundPlane_; return settings; }; static const auto toDouble = [](const std::string& str, double def) -> double { try { double val = std::stod(str); return val; } catch (const std::invalid_argument& ex) { return def; } }; //initialize statistics Statistics statistics; //Prepare initial values Vec initial = packInputVariables(settings_.variables_); Vec min, max; std::tie(min, max) = packMinMax(settings_.variables_); //callback(varToSettings(settings_.variables_), 0); //Initialize scaling Vec paramScaling = Vec::Ones(initial.size()); Vec paramScalingInv = Vec::Ones(initial.size()); if (settings_.normalizeUnits_) { int i = 0; if (settings_.variables_.optimizeGravity_) { real scale = std::max({ real(1e-8), abs(settings_.variables_.currentGravity_.x), abs(settings_.variables_.currentGravity_.y), abs(settings_.variables_.currentGravity_.z) }); paramScalingInv[i] = paramScalingInv[i + 1] = paramScalingInv[i + 2] = scale; i += 3; } if (settings_.variables_.optimizeYoungsModulus_) paramScalingInv[i++] = settings_.variables_.currentYoungsModulus_; if (settings_.variables_.optimizePoissonRatio_) paramScalingInv[i++] = settings_.variables_.currentPoissonRatio_; if (settings_.variables_.optimizeMass_) paramScalingInv[i++] = settings_.variables_.currentMass_; if (settings_.variables_.optimizeMassDamping_) paramScalingInv[i++] = std::max(real(1e-5), settings_.variables_.currentMassDamping_); if (settings_.variables_.optimizeStiffnessDamping_) paramScalingInv[i++] = std::max(real(1e-5), settings_.variables_.currentStiffnessDamping_); if (settings_.variables_.optimizeInitialLinearVelocity_) { paramScalingInv[i++] = std::max(real(1), abs(settings_.variables_.currentInitialLinearVelocity_.x)); paramScalingInv[i++] = std::max(real(1), abs(settings_.variables_.currentInitialLinearVelocity_.y)); paramScalingInv[i++] = std::max(real(1), abs(settings_.variables_.currentInitialLinearVelocity_.z)); } if (settings_.variables_.optimizeInitialAngularVelocity_) { paramScalingInv[i++] = std::max(real(1), abs(settings_.variables_.currentInitialAngularVelocity_.x)); paramScalingInv[i++] = std::max(real(1), abs(settings_.variables_.currentInitialAngularVelocity_.y)); paramScalingInv[i++] = std::max(real(1), abs(settings_.variables_.currentInitialAngularVelocity_.z)); } if (settings_.variables_.optimizeGroundPlane_) { const real4 spherical = CoordinateTransformation::cartesian2spherical(settings_.variables_.currentGroundPlane_); real scale = std::max({ real(1e-8), spherical.y, spherical.z }); paramScalingInv[i] = paramScalingInv[i + 1] = scale; paramScalingInv[i + 2] = std::max(real(1), settings_.variables_.currentGroundPlane_.w); i += 3; } paramScaling = paramScalingInv.cwiseInverse(); CI_LOG_I("parameter scaling: " << paramScalingInv.transpose()); } if (settings_.optimizer_ == Settings::GRADIENT_DESCENT) { //Gradient descent real finalCost = 0; const auto fun = [this, &finalCost, &statistics, &paramScaling, &paramScalingInv, &worker](const Vec& xs) -> Vec { const Vec x = xs.cwiseProduct(paramScalingInv); CI_LOG_I("X: " << x.transpose()); InputVariables var = unpackInputVariables(this->settings_.variables_, x); AdjointVariables adj = { 0 }; if (isUseAdjoint()) finalCost = computeGradient( reference_->input_, reference_->settings_, var, costFunction_, adj, settings_.memorySaving_, worker, &statistics); else finalCost = computeGradientFiniteDifferences( reference_->input_, reference_->settings_, var, costFunction_, adj, finiteDifferencesDelta_, worker, &statistics); Vec gradient = packGradient(this->settings_.variables_, var, adj); CI_LOG_I("GradientDescent-Step:\n cost=" << finalCost << "\n values:" << var << "\n gradient:" << adj << " (" << gradient.transpose() << ")"); return gradient.cwiseProduct(paramScalingInv); }; ar::GradientDescent<Vec> gd(initial.cwiseProduct(paramScaling).eval(), fun); gd.setEpsilon(toDouble(settings_.gradientDescentSettings_.epsilon_, 1e-15)); gd.setLinearStepsize(toDouble(settings_.gradientDescentSettings_.linearStepsize_, 0.001)); gd.setMaxStepsize(toDouble(settings_.gradientDescentSettings_.maxStepsize_, 1e20)); gd.setMinStepsize(toDouble(settings_.gradientDescentSettings_.epsilon_, 0)); gd.setMinValues(min.cwiseProduct(paramScaling)); gd.setMaxValues(max.cwiseProduct(paramScaling)); for (int oi = 0; oi < settings_.numIterations_; ++oi) { worker->setStatus(tfm::format("Adjoint: optimization %d/%d", (oi + 1), settings_.numIterations_)); if (gd.step()) break; if (worker->isInterrupted()) break; //fetch intermediate this->finalCost_ = finalCost; this->finalVariables_ = varToSettings(unpackInputVariables(settings_.variables_, gd.getCurrentSolution().cwiseProduct(paramScalingInv))); auto gradient = varToSettings(unpackInputVariables(settings_.variables_, gd.getCurrentGradient().cwiseProduct(paramScaling))); callback(this->finalVariables_, gradient, this->finalCost_); } //fetch final output this->finalCost_ = finalCost; this->finalVariables_ = varToSettings(unpackInputVariables(settings_.variables_, gd.getCurrentSolution().cwiseProduct(paramScalingInv))); } else if (settings_.optimizer_ == Settings::RPROP) { //RProp (Resilient Back Propagation) Gradient descent real finalCost = 0; const auto fun = [this, &finalCost, &statistics, &paramScaling, &paramScalingInv, &worker](const Vec& xs) -> Vec { const Vec x = xs.cwiseProduct(paramScalingInv); CI_LOG_I("X: " << x.transpose()); InputVariables var = unpackInputVariables(this->settings_.variables_, x); AdjointVariables adj = { 0 }; if (isUseAdjoint()) finalCost = computeGradient( reference_->input_, reference_->settings_, var, costFunction_, adj, settings_.memorySaving_, worker, &statistics); else finalCost = computeGradientFiniteDifferences( reference_->input_, reference_->settings_, var, costFunction_, adj, finiteDifferencesDelta_, worker, &statistics); Vec gradient = packGradient(this->settings_.variables_, var, adj); CI_LOG_I("Rprop-GradientDescent-Step:\n cost=" << finalCost << "\n values:" << var << "\n gradient:" << adj << " (" << gradient.transpose() << ")"); return gradient.cwiseProduct(paramScalingInv); }; ar::RpropGradientDescent<Vec> rprop(initial.cwiseProduct(paramScaling).eval(), fun); rprop.setEpsilon(toDouble(settings_.rpropSettings_.epsilon_, 1e-15)); rprop.setInitialStepsize(toDouble(settings_.rpropSettings_.initialStepsize_, 0.001)); rprop.setMinValues(min.cwiseProduct(paramScaling)); rprop.setMaxValues(max.cwiseProduct(paramScaling)); for (int oi = 0; oi < settings_.numIterations_; ++oi) { worker->setStatus(tfm::format("Adjoint: optimization %d/%d", (oi + 1), settings_.numIterations_)); auto currentVariables = varToSettings(unpackInputVariables(settings_.variables_, rprop.getCurrentSolution().cwiseProduct(paramScalingInv))); if (rprop.step()) break; if (worker->isInterrupted()) break; //fetch intermediate this->finalCost_ = finalCost; this->finalVariables_ = varToSettings(unpackInputVariables(settings_.variables_, rprop.getCurrentSolution().cwiseProduct(paramScalingInv))); auto gradient = varToSettings(unpackInputVariables(settings_.variables_, rprop.getCurrentGradient().cwiseProduct(paramScaling))); callback(currentVariables, gradient, this->finalCost_); } //fetch final output this->finalCost_ = finalCost; this->finalVariables_ = varToSettings(unpackInputVariables(settings_.variables_, rprop.getCurrentSolution().cwiseProduct(paramScalingInv))); } else if (settings_.optimizer_ == Settings::LBFGS) { //LBFGS LBFGSpp::LBFGSParam<double> params; params.epsilon = toDouble(settings_.lbfgsSettings_.epsilon_, params.epsilon); params.past = settings_.lbfgsSettings_.past_; params.delta = toDouble(settings_.lbfgsSettings_.delta_, params.delta); params.linesearch = settings_.lbfgsSettings_.lineSearchAlg_ == Settings::LbfgsSettings::Armijo ? LBFGSpp::LINE_SEARCH_ALGORITHM::LBFGS_LINESEARCH_BACKTRACKING_ARMIJO : settings_.lbfgsSettings_.lineSearchAlg_ == Settings::LbfgsSettings::Wolfe ? LBFGSpp::LINE_SEARCH_ALGORITHM::LBFGS_LINESEARCH_BACKTRACKING_WOLFE : LBFGSpp::LINE_SEARCH_ALGORITHM::LBFGS_LINESEARCH_BACKTRACKING_STRONG_WOLFE; params.max_linesearch = settings_.lbfgsSettings_.linesearchMaxTrials_; params.min_step = toDouble(settings_.lbfgsSettings_.linesearchMinStep_, params.min_step); params.max_step = toDouble(settings_.lbfgsSettings_.linesearchMaxStep_, params.max_step); params.ftol = toDouble(settings_.lbfgsSettings_.linesearchTol_, params.ftol); params.max_iterations = settings_.numIterations_; LBFGSpp::LBFGSSolver<double> lbfgs(params); LBFGSpp::LBFGSSolver<double>::ObjectiveFunction_t fun = [this, &statistics, &paramScaling, &paramScalingInv, &worker](const Vec& xs, Vec& gradient) -> double { const Vec x = xs.cwiseProduct(paramScalingInv); InputVariables var = unpackInputVariables(this->settings_.variables_, x); AdjointVariables adj = { 0 }; double finalCost; if (isUseAdjoint()) finalCost = computeGradient( reference_->input_, reference_->settings_, var, costFunction_, adj, settings_.memorySaving_, worker, &statistics); else finalCost = computeGradientFiniteDifferences( reference_->input_, reference_->settings_, var, costFunction_, adj, finiteDifferencesDelta_, worker, &statistics); gradient = packGradient(this->settings_.variables_, var, adj).cwiseProduct(paramScalingInv); CI_LOG_I("LBFGS-Step:\n cost=" << finalCost << "\n values:" << var << "\n gradient:" << adj); return finalCost; }; LBFGSpp::LBFGSSolver<double>::CallbackFunction_t lbfgsCallback = [this, worker, callback, &paramScaling, &paramScalingInv](const Vec& x, const Vec& g, const double& v, int k) -> bool { worker->setStatus(tfm::format("Adjoint: optimization %d/%d, cost %f", k + 1, settings_.numIterations_, v)); InputVariables var = unpackInputVariables(this->settings_.variables_, x.cwiseProduct(paramScalingInv)); auto gradient = varToSettings(unpackInputVariables(this->settings_.variables_, g.cwiseProduct(paramScaling))); callback(varToSettings(var), gradient, static_cast<real>(v)); return !worker->isInterrupted(); }; LBFGSpp::LBFGSSolver<double>::ValidationFunction_t validation = [&min, &max, &paramScaling](const Vec& x) -> bool { return (x.array() >= min.cwiseProduct(paramScaling).array()).all() && (x.array() <= max.cwiseProduct(paramScaling).array()).all(); }; double finalCost = 0; Vec value = initial.cwiseProduct(paramScaling); try { int oi = lbfgs.minimize(fun, value, finalCost, lbfgsCallback, validation); CI_LOG_I("Optimized for " << oi << " iterations, final cost " << finalCost); } catch (const std::runtime_error& error) { CI_LOG_EXCEPTION("LBFGS failed", error); return false; } //fetch final output this->finalCost_ = static_cast<real>(finalCost); this->finalVariables_ = varToSettings(unpackInputVariables(settings_.variables_, value.cwiseProduct(paramScalingInv))); } SoftBodySimulation3D::Settings finalGradient; memset(&finalGradient, 0, sizeof(SoftBodySimulation3D::Settings)); callback(this->finalVariables_, finalGradient, this->finalCost_); CI_LOG_I("Result: cost = " << finalCost_ << ", values:" << this->finalVariables_); CI_LOG_I(statistics); return true; } void AdjointSolver::testGradient(BackgroundWorker2* worker) { static const int numSteps = 15; static const int halfNumSteps = numSteps / 2; static const real frac = 0.5; CI_LOG_I("TEST GRADIENT"); #define GRADIENT(OPT, VAR1, VAR2, VARADJ, NAME) \ if (settings_.variables_.OPT && !worker->isInterrupted()) \ { \ InputVariables variables; \ variables.OPT = true; \ const real minValue = reference_->settings_.VAR1==0 ? -halfNumSteps : reference_->settings_.VAR1 * frac; \ const real maxValue = reference_->settings_.VAR1==0 ? +halfNumSteps : 2*reference_->settings_.VAR1 - minValue; \ typedef std::array<double, 3> entry; \ std::vector<entry> entries; \ for (int i=0; i<numSteps && !worker->isInterrupted(); ++i) \ { \ worker->setStatus(tinyformat::format("%s gradient %d/%d", NAME, i+1, numSteps)); \ const real value = minValue + (maxValue - minValue) * i / (numSteps - 1); \ variables.VAR2 = value; \ AdjointVariables adj = { 0 }; \ const real cost = computeGradient( \ reference_->input_, reference_->settings_, variables, costFunction_, adj, settings_.memorySaving_); \ entries.push_back(entry{ value, cost, adj.VARADJ }); \ } \ std::stringstream ss; \ ss << NAME << ":" << std::endl; \ ss << " Value Cost Gradient" << std::endl; \ for (int i=0; i<entries.size(); ++i) \ { \ ss \ << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][0] \ << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][1] \ << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][2] \ << std::endl; \ } \ CI_LOG_I(ss.str()); \ } GRADIENT(optimizeMass_, mass_, currentMass_, adjMass_, "Mass"); //GRADIENT(optimizeGravity_, gravity_.x, currentGravity_.x, adjGravity_.x, "GravityX"); GRADIENT(optimizeGravity_, gravity_.y, currentGravity_.y, adjGravity_.y, "GravityY"); //GRADIENT(optimizeGravity_, gravity_.z, currentGravity_.z, adjGravity_.z, "GravityZ"); GRADIENT(optimizeMassDamping_, dampingAlpha_, currentMassDamping_, adjMassDamping_, "DampingMass"); GRADIENT(optimizeStiffnessDamping_, dampingBeta_, currentStiffnessDamping_, adjStiffnessDamping_, "DampingStiffness"); GRADIENT(optimizeYoungsModulus_, youngsModulus_, currentYoungsModulus_, adjYoungsModulus_, "Young's Modulus"); GRADIENT(optimizePoissonRatio_, poissonsRatio_, currentPoissonRatio_, adjPoissonRatio_, "Poisson Ratio"); GRADIENT(optimizeInitialLinearVelocity_, initialLinearVelocity_.x, currentInitialLinearVelocity_.x, adjInitialLinearVelocity.x, "LinearVelocityX"); GRADIENT(optimizeInitialLinearVelocity_, initialLinearVelocity_.y, currentInitialLinearVelocity_.y, adjInitialLinearVelocity.y, "LinearVelocityY"); GRADIENT(optimizeInitialLinearVelocity_, initialLinearVelocity_.z, currentInitialLinearVelocity_.z, adjInitialLinearVelocity.z, "LinearVelocityZ"); GRADIENT(optimizeInitialAngularVelocity_, initialAngularVelocity_.x, currentInitialAngularVelocity_.x, adjInitialAngularVelocity.x, "AngularVelocityX"); GRADIENT(optimizeInitialAngularVelocity_, initialAngularVelocity_.y, currentInitialAngularVelocity_.y, adjInitialAngularVelocity.y, "AngularVelocityY"); GRADIENT(optimizeInitialAngularVelocity_, initialAngularVelocity_.z, currentInitialAngularVelocity_.z, adjInitialAngularVelocity.z, "AngularVelocityZ"); #undef GRADIENT // young's modulus and poisson's ratio in uniform if (settings_.variables_.optimizeYoungsModulus_ && settings_.variables_.optimizePoissonRatio_ && !worker->isInterrupted()) { InputVariables variables; variables.optimizePoissonRatio_ = true; variables.optimizeYoungsModulus_ = true; const real minYoungValue = reference_->settings_.youngsModulus_ * frac; const real maxYoungValue = 2 * reference_->settings_.youngsModulus_ - minYoungValue; const real minPoissonValue = reference_->settings_.poissonsRatio_ * frac; const real maxPoissonValue = 2 * reference_->settings_.poissonsRatio_ - minPoissonValue; typedef std::array<double, 5> entry; std::vector<entry> entries; for (int i = 0; i < numSteps && !worker->isInterrupted(); ++i) for (int j = 0; j < numSteps && !worker->isInterrupted(); ++j) { worker->setStatus(tinyformat::format("%s gradient %d/%d", "Young+Poisson", i + 1, numSteps)); const real young = minYoungValue + (maxYoungValue - minYoungValue) * i / (numSteps - 1); const real poisson = minPoissonValue + (maxPoissonValue - minPoissonValue) * j / (numSteps - 1); variables.currentYoungsModulus_ = young; variables.currentPoissonRatio_ = poisson; AdjointVariables adj = {0}; const real cost = computeGradient( reference_->input_, reference_->settings_, variables, costFunction_, adj, settings_.memorySaving_); entries.push_back(entry{young, poisson, cost, adj.adjYoungsModulus_, adj.adjPoissonRatio_}); } std::stringstream ss; ss << "Young's Modulus and Poisson's Ratio:" << std::endl; ss << " YoungModulus PoissonRatio Cost GradientYoung GradientPoisson" << std::endl; for (int i = 0; i < entries.size(); ++i) { ss << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][0] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][1] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][2] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][3] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][4] << std::endl; } CI_LOG_I(ss.str()); } // stiffness and mass damping in uniform if (settings_.variables_.optimizeMassDamping_ && settings_.variables_.optimizeStiffnessDamping_ && !worker->isInterrupted()) { InputVariables variables; variables.optimizeMassDamping_ = true; variables.optimizeStiffnessDamping_ = true; const real minMassValue = reference_->settings_.dampingAlpha_ * frac; const real maxMassValue = 2 * reference_->settings_.dampingAlpha_ - minMassValue; const real minStiffnessValue = reference_->settings_.dampingBeta_ * frac; const real maxStiffnessValue = 2 * reference_->settings_.dampingBeta_ - minStiffnessValue; typedef std::array<double, 5> entry; std::vector<entry> entries; for (int i = 0; i < numSteps && !worker->isInterrupted(); ++i) for (int j = 0; j < numSteps && !worker->isInterrupted(); ++j) { worker->setStatus(tinyformat::format("%s gradient %d/%d", "AllDamping", i + 1, numSteps)); const real mass = minMassValue + (maxMassValue - minMassValue) * i / (numSteps - 1); const real stiffness = minStiffnessValue + (maxStiffnessValue - minStiffnessValue) * j / (numSteps - 1); variables.currentMassDamping_ = mass; variables.currentStiffnessDamping_ = stiffness; AdjointVariables adj = { 0 }; const real cost = computeGradient( reference_->input_, reference_->settings_, variables, costFunction_, adj, settings_.memorySaving_); entries.push_back(entry{ mass, stiffness, cost, adj.adjMassDamping_, adj.adjStiffnessDamping_ }); } std::stringstream ss; ss << "Mass Damping and Stiffness Damping:" << std::endl; ss << " Mass-Damping Stiffness-Damping Cost Gradient-Mass Gradient-Stiffness" << std::endl; for (int i = 0; i < entries.size(); ++i) { ss << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][0] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][1] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][2] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][3] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][4] << std::endl; } CI_LOG_I(ss.str()); } //Ground Plane if (settings_.variables_.optimizeGroundPlane_ && !worker->isInterrupted()) { InputVariables variables; variables.optimizeGroundPlane_ = true; const real referenceHeight = reference_->settings_.groundPlane_.w; const real referenceTheta = CoordinateTransformation::cartesian2spherical(reference_->settings_.groundPlane_).y; const real referencePhi = CoordinateTransformation::cartesian2spherical(reference_->settings_.groundPlane_).z; const real minTheta = referenceTheta - frac * M_PI * 0.5; const real maxTheta = referenceTheta + frac * M_PI * 0.5; const real minPhi = referencePhi - frac * M_PI * 0.5; const real maxPhi = referencePhi + frac * M_PI * 0.5; typedef std::array<double, 5> entry; std::vector<entry> entries; for (int i = 0; i < numSteps && !worker->isInterrupted(); ++i) for (int j = 0; j < numSteps && !worker->isInterrupted(); ++j) { worker->setStatus(tinyformat::format("%s gradient %d/%d", "GroundPlaneOrientation", i + 1, numSteps)); const real theta = minTheta + (maxTheta - minTheta) * i / (numSteps - 1); const real phi = minPhi + (maxPhi - minPhi) * j / (numSteps - 1); variables.currentGroundPlane_ = CoordinateTransformation::spherical2cartesian(make_real3(1, theta, phi)); variables.currentGroundPlane_.w = referenceHeight; AdjointVariables adj = { 0 }; const real cost = computeGradient( reference_->input_, reference_->settings_, variables, costFunction_, adj, settings_.memorySaving_); double4 adjSpherical = CoordinateTransformation::spherical2cartesianAdjoint(make_double4(1, theta, phi, 0), adj.adjGroundPlane_); entries.push_back(entry{ theta, phi, cost, adjSpherical.y, adjSpherical.z }); CI_LOG_V( << " " << std::fixed << std::setw(12) << std::setprecision(7) << theta << " " << std::fixed << std::setw(12) << std::setprecision(7) << phi << " " << std::fixed << std::setw(12) << std::setprecision(7) << cost << " " << std::fixed << std::setw(12) << std::setprecision(7) << adjSpherical.y << " " << std::fixed << std::setw(12) << std::setprecision(7) << adjSpherical.z << " (" << CoordinateTransformation::spherical2cartesian(make_real3(1, theta, phi)).x << ", " << CoordinateTransformation::spherical2cartesian(make_real3(1, theta, phi)).y << ", " << CoordinateTransformation::spherical2cartesian(make_real3(1, theta, phi)).z << ")" ); } std::stringstream ss; ss << "Ground Plane Orientation:" << std::endl; ss << " Theta Phi Cost Gradient-Theta Gradient-Phi" << std::endl; for (int i = 0; i < entries.size(); ++i) { ss << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][0] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][1] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][2] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][3] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][4] << std::endl; } CI_LOG_I(ss.str()); } if (settings_.variables_.optimizeGroundPlane_ && !worker->isInterrupted()) { InputVariables variables; variables.optimizeGroundPlane_ = true; const real minValue = reference_->settings_.groundPlane_.w - 0.1; const real maxValue = 2 * reference_->settings_.groundPlane_.w + 0.1; typedef std::array<double, 3> entry; std::vector<entry> entries; for (int i = 0; i < numSteps && !worker->isInterrupted(); ++i) { worker->setStatus(tinyformat::format("%s gradient %d/%d", "Ground Height", i + 1, numSteps)); const real value = minValue + (maxValue - minValue) * i / (numSteps - 1); variables.currentGroundPlane_ = reference_->settings_.groundPlane_; variables.currentGroundPlane_.w = value; AdjointVariables adj = { 0 }; const real cost = computeGradient(reference_->input_, reference_->settings_, variables, costFunction_, adj, settings_.memorySaving_); entries.push_back(entry{ value, cost, adj.adjGroundPlane_.w }); } std::stringstream ss; ss << "Ground Height" << ":" << std::endl; ss << " Value Cost Gradient" << std::endl; for (int i = 0; i < entries.size(); ++i) { ss << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][0] << " " << std::fixed << std::setw(12) << std::setprecision(7) << entries[i][1] << " " << std:: fixed << std::setw(12) << std::setprecision(7) << entries[i][2] << std::endl; } CI_LOG_I(ss.str()); } CI_LOG_I("DONE"); } std::ostream& operator<<(std::ostream& os, const AdjointSolver::AdjointVariables& obj) { if (obj.adjGravity_.x != 0 || obj.adjGravity_.y != 0 || obj.adjGravity_.z != 0) os << " Gravity=(" << obj.adjGravity_.x << "," << obj.adjGravity_.y << "," << obj.adjGravity_.z << ")"; if (obj.adjYoungsModulus_ != 0) os << " YoungsModulus=" << obj.adjYoungsModulus_; if (obj.adjPoissonRatio_ != 0) os << " PoissonRatio=" << obj.adjPoissonRatio_; if (obj.adjMass_ != 0) os << " Mass=" << obj.adjMass_; if (obj.adjMassDamping_ != 0) os << " MassDamping=" << obj.adjMassDamping_; if (obj.adjStiffnessDamping_ != 0) os << " StiffnessDamping=" << obj.adjStiffnessDamping_; if (obj.adjInitialLinearVelocity.x != 0 || obj.adjInitialLinearVelocity.y != 0 || obj.adjInitialLinearVelocity.z != 0) os << " InitialLinearVelocity=(" << obj.adjInitialLinearVelocity.x << "," << obj.adjInitialLinearVelocity.y << "," << obj.adjInitialLinearVelocity.z << ")"; if (obj.adjInitialAngularVelocity.x != 0 || obj.adjInitialAngularVelocity.y != 0 || obj.adjInitialAngularVelocity.z != 0) os << " InitialAngularVelocity=(" << obj.adjInitialAngularVelocity.x << "," << obj.adjInitialAngularVelocity.y << "," << obj.adjInitialAngularVelocity.z << ")"; if (obj.adjGroundPlane_.x != 0 || obj.adjGroundPlane_.y != 0 || obj.adjGroundPlane_.z != 0 || obj.adjGroundPlane_.w != 0) os << " GroundPlane=(" << obj.adjGroundPlane_.x << "," << obj.adjGroundPlane_.y << "," << obj.adjGroundPlane_.z << "," << obj.adjGroundPlane_.w << ")"; return os; } std::ostream& operator<<(std::ostream& os, const AdjointSolver::InputVariables& obj) { if (obj.optimizeGravity_) os << " Gravity=(" << obj.currentGravity_.x << "," << obj.currentGravity_.y << "," << obj.currentGravity_.z << ")"; if (obj.optimizeYoungsModulus_) os << " YoungsModulus=" << obj.currentYoungsModulus_; if (obj.optimizePoissonRatio_) os << " PoissonRatio=" << obj.currentPoissonRatio_; if (obj.optimizeMass_) os << " Mass=" << obj.currentMass_; if (obj.optimizeMassDamping_) os << " MassDamping=" << obj.currentMassDamping_; if (obj.optimizeStiffnessDamping_) os << " StiffnessDamping=" << obj.currentStiffnessDamping_; if (obj.optimizeInitialLinearVelocity_) os << " InitialLinearVelocity=(" << obj.currentInitialLinearVelocity_.x << "," << obj.currentInitialLinearVelocity_.y << "," << obj.currentInitialLinearVelocity_.z << ")"; if (obj.optimizeInitialAngularVelocity_) os << " InitialAngularVelocity=(" << obj.currentInitialAngularVelocity_.x << "," << obj.currentInitialAngularVelocity_.y << "," << obj.currentInitialAngularVelocity_.z << ")"; if (obj.optimizeGroundPlane_) os << " GroundPlane=(" << obj.currentGroundPlane_.x << "," << obj.currentGroundPlane_.y << "," << obj.currentGroundPlane_.z << "," << obj.currentGroundPlane_.w << ")"; return os; } }
3dad13a1408c2d42a519adf47b4eb71284ae4ae6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include <stdlib.h> #include <GL/freeglut.h> #define DIM 512 #define blockSize 8 #define blurRadius 6 #define effectiveBlockSize (blockSize+2*blurRadius) float sourceColors[DIM*DIM]; texture<float,2> blurDevTex; float *sourceDevPtr; float *transDevPtr; float *blurDevPtr; float readBackPixels[DIM*DIM]; int timer = 0; int mode = 0; void keyboard(unsigned char key, int x, int y) { if(key == '1') mode = 0; else if(key == '2') mode = 1; else if(key == '3') mode = 2; else if(key == '4') mode = 3; } __global__ void animateKernel( float *sourcePtr, float *targetPtr, int time) { int index = 0; // TODO: Index berechnen int X = threadIdx.x + blockIdx.x * blockDim.x; int Y = threadIdx.y + blockIdx.y * blockDim.y; index = X + Y * blockDim.x * gridDim.x; int transX = X; transX += time%DIM; if(transX >= DIM) transX -= DIM; int transIndex = transX + Y * blockDim.x * gridDim.x; targetPtr[index] = sourcePtr[transIndex]; // simple copy } __global__ void blurKernelGlobal( float *sourcePtr, float *targetPtr) { // filterwidth = 51 - time 109ms int index = 0; int filterWidth = blurRadius*2+1; // TODO: Index berechnen int X = threadIdx.x + blockIdx.x * blockDim.x; int Y = threadIdx.y + blockIdx.y * blockDim.y; index = X + Y * blockDim.x * gridDim.x; float value = 0.0f; int upperLeftFilterPosX = X - blurRadius; int upperLeftFilterPosY = Y - blurRadius; for(int i = upperLeftFilterPosX; i<upperLeftFilterPosX+filterWidth; ++i) { for(int j = upperLeftFilterPosY; j<upperLeftFilterPosY+filterWidth; ++j) { if( i < DIM && j < DIM && i >= 0 && j >= 0) { int sampleIndex = i + j * blockDim.x * gridDim.x; value += sourcePtr[sampleIndex]; } } } value /= filterWidth*filterWidth; targetPtr[index] = value; } __global__ void blurKernelTexture(float *targetPtr) { // filterwidth = 51 - time 98ms int index = 0; int filterWidth = blurRadius*2+1; // TODO: Index berechnen int X = threadIdx.x + blockIdx.x * blockDim.x; int Y = threadIdx.y + blockIdx.y * blockDim.y; index = X + Y * blockDim.x * gridDim.x; float value = 0.0f; int upperLeftFilterPosX = X - blurRadius; int upperLeftFilterPosY = Y - blurRadius; for(int i = upperLeftFilterPosY; i<upperLeftFilterPosY+filterWidth; ++i) { for(int j = upperLeftFilterPosX; j<upperLeftFilterPosX+filterWidth; ++j) { if( i < DIM && j < DIM && i >= 0 && j >= 0) { value += tex2D(blurDevTex,j,i); } } } value /= filterWidth*filterWidth; targetPtr[index] = value; } __global__ void blurKernelShared(float *sourcePtr, float *targetPtr) { // calculate the position in source Image // therefore use blockSize not BlockDim.x int positionInImageX = blockIdx.x * blockSize + threadIdx.x - blurRadius; int positionInImageY = blockIdx.y * blockSize + threadIdx.y - blurRadius; __shared__ float cache[effectiveBlockSize * effectiveBlockSize]; // fill the with values from global memory int getterIndex = positionInImageX + positionInImageY * DIM; if(0 <= positionInImageX && positionInImageX < DIM && 0 <= positionInImageY && positionInImageY < DIM) cache[threadIdx.x + threadIdx.y * effectiveBlockSize] = sourcePtr[getterIndex]; else cache[threadIdx.x + threadIdx.y * effectiveBlockSize] = 0.0f; // synchronise all threads __syncthreads(); // let all kernels run which have enough neighbors for mean calculation int kernelSizeRightSide = effectiveBlockSize - blurRadius; if(threadIdx.x >= blurRadius && threadIdx.x < kernelSizeRightSide && threadIdx.y >= blurRadius && threadIdx.y < kernelSizeRightSide) { float value = 0; for(int i = -blurRadius; i <= blurRadius; i++) { for(int j = -blurRadius; j <= blurRadius; j++) { value += cache[(threadIdx.x + j) + (threadIdx.y + i) * effectiveBlockSize]; } } int filterWidth = blurRadius*2+1; value /= filterWidth*filterWidth; targetPtr[positionInImageX + positionInImageY * DIM] = value; } } void display(void) { glClearColor(0.0f, 0.0f, 0.0f, 1.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // TODO: Transformationskernel auf sourceDevPtr anwenden dim3 grid(DIM/blockSize, DIM/blockSize); dim3 block(blockSize, blockSize); timer += 1; hipLaunchKernelGGL(( animateKernel), dim3(grid),dim3(block), 0, 0, sourceDevPtr, transDevPtr, timer); // TODO: Zeitmessung starten (see hipEventCreate, hipEventRecord) hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); // TODO: Kernel mit Blur-Filter ausfhren. int kernelSize = blurRadius*2+1; dim3 sharedGrid(DIM/blockSize, DIM/blockSize); dim3 sharedBlock(effectiveBlockSize, effectiveBlockSize); switch(mode) { case 0:hipLaunchKernelGGL(( animateKernel), dim3(grid),dim3(block), 0, 0, transDevPtr, blurDevPtr, timer); break; case 1:hipLaunchKernelGGL(( blurKernelGlobal), dim3(grid),dim3(block), 0, 0, transDevPtr, blurDevPtr); break; case 2:hipLaunchKernelGGL(( blurKernelTexture), dim3(grid),dim3(block), 0, 0, blurDevPtr); break; case 3:hipLaunchKernelGGL(( blurKernelShared), dim3(sharedGrid),dim3(sharedBlock), 0, 0, transDevPtr, blurDevPtr); break; } // TODO: Zeitmessung stoppen und fps ausgeben (see hipEventSynchronize, hipEventElapsedTime, hipEventDestroy) hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("Time to generate: %3.1f ms \r", elapsedTime); hipEventDestroy(start); hipEventDestroy(stop); // Ergebnis zur CPU zuruecklesen CUDA_SAFE_CALL( hipMemcpy( readBackPixels, blurDevPtr, DIM*DIM*4, hipMemcpyDeviceToHost ) ); // Ergebnis zeichnen (ja, jetzt gehts direkt wieder zur GPU zurueck...) glDrawPixels( DIM, DIM, GL_LUMINANCE, GL_FLOAT, readBackPixels ); glutSwapBuffers(); } // clean up memory allocated on the GPU void cleanup() { CUDA_SAFE_CALL( hipFree( sourceDevPtr ) ); // TODO: Aufrumen zustzlich angelegter Ressourcen. CUDA_SAFE_CALL( hipUnbindTexture(blurDevTex)); CUDA_SAFE_CALL( hipFree( transDevPtr ) ); CUDA_SAFE_CALL( hipFree( blurDevPtr ) ); } int main(int argc, char **argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH); glutInitWindowSize(DIM, DIM); glutCreateWindow("Memory Types"); glutKeyboardFunc(keyboard); glutIdleFunc(display); glutDisplayFunc(display); // mit Schachbrettmuster fllen for (int i = 0 ; i < DIM*DIM ; i++) { int x = (i % DIM) / (DIM/8); int y = (i / DIM) / (DIM/8); if ((x + y) % 2) sourceColors[i] = 1.0f; else sourceColors[i] = 0.0f; } // alloc memory on the GPU CUDA_SAFE_CALL( hipMalloc( (void**)&sourceDevPtr, DIM*DIM*4 ) ); CUDA_SAFE_CALL( hipMemcpy( sourceDevPtr, sourceColors, DIM*DIM*4, hipMemcpyHostToDevice ) ); // TODO: Weiteren Speicher auf der GPU fr das Bild nach der Transformation und nach dem Blur allokieren. CUDA_SAFE_CALL( hipMalloc( (void**)&transDevPtr, DIM*DIM*4 ) ); hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); CUDA_SAFE_CALL( hipMalloc( (void**)&blurDevPtr, DIM*DIM*4 ) ); // TODO: Binding des Speichers des Bildes an eine Textur mittels hipBindTexture. CUDA_SAFE_CALL( hipBindTexture2D(NULL,blurDevTex,transDevPtr,desc,DIM,DIM,DIM*4)); glutMainLoop(); cleanup(); }
3dad13a1408c2d42a519adf47b4eb71284ae4ae6.cu
#include "common.h" #include <stdlib.h> #include <GL/freeglut.h> #define DIM 512 #define blockSize 8 #define blurRadius 6 #define effectiveBlockSize (blockSize+2*blurRadius) float sourceColors[DIM*DIM]; texture<float,2> blurDevTex; float *sourceDevPtr; float *transDevPtr; float *blurDevPtr; float readBackPixels[DIM*DIM]; int timer = 0; int mode = 0; void keyboard(unsigned char key, int x, int y) { if(key == '1') mode = 0; else if(key == '2') mode = 1; else if(key == '3') mode = 2; else if(key == '4') mode = 3; } __global__ void animateKernel( float *sourcePtr, float *targetPtr, int time) { int index = 0; // TODO: Index berechnen int X = threadIdx.x + blockIdx.x * blockDim.x; int Y = threadIdx.y + blockIdx.y * blockDim.y; index = X + Y * blockDim.x * gridDim.x; int transX = X; transX += time%DIM; if(transX >= DIM) transX -= DIM; int transIndex = transX + Y * blockDim.x * gridDim.x; targetPtr[index] = sourcePtr[transIndex]; // simple copy } __global__ void blurKernelGlobal( float *sourcePtr, float *targetPtr) { // filterwidth = 51 - time 109ms int index = 0; int filterWidth = blurRadius*2+1; // TODO: Index berechnen int X = threadIdx.x + blockIdx.x * blockDim.x; int Y = threadIdx.y + blockIdx.y * blockDim.y; index = X + Y * blockDim.x * gridDim.x; float value = 0.0f; int upperLeftFilterPosX = X - blurRadius; int upperLeftFilterPosY = Y - blurRadius; for(int i = upperLeftFilterPosX; i<upperLeftFilterPosX+filterWidth; ++i) { for(int j = upperLeftFilterPosY; j<upperLeftFilterPosY+filterWidth; ++j) { if( i < DIM && j < DIM && i >= 0 && j >= 0) { int sampleIndex = i + j * blockDim.x * gridDim.x; value += sourcePtr[sampleIndex]; } } } value /= filterWidth*filterWidth; targetPtr[index] = value; } __global__ void blurKernelTexture(float *targetPtr) { // filterwidth = 51 - time 98ms int index = 0; int filterWidth = blurRadius*2+1; // TODO: Index berechnen int X = threadIdx.x + blockIdx.x * blockDim.x; int Y = threadIdx.y + blockIdx.y * blockDim.y; index = X + Y * blockDim.x * gridDim.x; float value = 0.0f; int upperLeftFilterPosX = X - blurRadius; int upperLeftFilterPosY = Y - blurRadius; for(int i = upperLeftFilterPosY; i<upperLeftFilterPosY+filterWidth; ++i) { for(int j = upperLeftFilterPosX; j<upperLeftFilterPosX+filterWidth; ++j) { if( i < DIM && j < DIM && i >= 0 && j >= 0) { value += tex2D(blurDevTex,j,i); } } } value /= filterWidth*filterWidth; targetPtr[index] = value; } __global__ void blurKernelShared(float *sourcePtr, float *targetPtr) { // calculate the position in source Image // therefore use blockSize not BlockDim.x int positionInImageX = blockIdx.x * blockSize + threadIdx.x - blurRadius; int positionInImageY = blockIdx.y * blockSize + threadIdx.y - blurRadius; __shared__ float cache[effectiveBlockSize * effectiveBlockSize]; // fill the with values from global memory int getterIndex = positionInImageX + positionInImageY * DIM; if(0 <= positionInImageX && positionInImageX < DIM && 0 <= positionInImageY && positionInImageY < DIM) cache[threadIdx.x + threadIdx.y * effectiveBlockSize] = sourcePtr[getterIndex]; else cache[threadIdx.x + threadIdx.y * effectiveBlockSize] = 0.0f; // synchronise all threads __syncthreads(); // let all kernels run which have enough neighbors for mean calculation int kernelSizeRightSide = effectiveBlockSize - blurRadius; if(threadIdx.x >= blurRadius && threadIdx.x < kernelSizeRightSide && threadIdx.y >= blurRadius && threadIdx.y < kernelSizeRightSide) { float value = 0; for(int i = -blurRadius; i <= blurRadius; i++) { for(int j = -blurRadius; j <= blurRadius; j++) { value += cache[(threadIdx.x + j) + (threadIdx.y + i) * effectiveBlockSize]; } } int filterWidth = blurRadius*2+1; value /= filterWidth*filterWidth; targetPtr[positionInImageX + positionInImageY * DIM] = value; } } void display(void) { glClearColor(0.0f, 0.0f, 0.0f, 1.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // TODO: Transformationskernel auf sourceDevPtr anwenden dim3 grid(DIM/blockSize, DIM/blockSize); dim3 block(blockSize, blockSize); timer += 1; animateKernel<<<grid,block>>>(sourceDevPtr, transDevPtr, timer); // TODO: Zeitmessung starten (see cudaEventCreate, cudaEventRecord) cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); // TODO: Kernel mit Blur-Filter ausführen. int kernelSize = blurRadius*2+1; dim3 sharedGrid(DIM/blockSize, DIM/blockSize); dim3 sharedBlock(effectiveBlockSize, effectiveBlockSize); switch(mode) { case 0: animateKernel<<<grid,block>>>(transDevPtr, blurDevPtr, timer); break; case 1: blurKernelGlobal<<<grid,block>>>(transDevPtr, blurDevPtr); break; case 2: blurKernelTexture<<<grid,block>>>(blurDevPtr); break; case 3: blurKernelShared<<<sharedGrid,sharedBlock>>>(transDevPtr, blurDevPtr); break; } // TODO: Zeitmessung stoppen und fps ausgeben (see cudaEventSynchronize, cudaEventElapsedTime, cudaEventDestroy) cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time to generate: %3.1f ms \r", elapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); // Ergebnis zur CPU zuruecklesen CUDA_SAFE_CALL( cudaMemcpy( readBackPixels, blurDevPtr, DIM*DIM*4, cudaMemcpyDeviceToHost ) ); // Ergebnis zeichnen (ja, jetzt gehts direkt wieder zur GPU zurueck...) glDrawPixels( DIM, DIM, GL_LUMINANCE, GL_FLOAT, readBackPixels ); glutSwapBuffers(); } // clean up memory allocated on the GPU void cleanup() { CUDA_SAFE_CALL( cudaFree( sourceDevPtr ) ); // TODO: Aufräumen zusätzlich angelegter Ressourcen. CUDA_SAFE_CALL( cudaUnbindTexture(blurDevTex)); CUDA_SAFE_CALL( cudaFree( transDevPtr ) ); CUDA_SAFE_CALL( cudaFree( blurDevPtr ) ); } int main(int argc, char **argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH); glutInitWindowSize(DIM, DIM); glutCreateWindow("Memory Types"); glutKeyboardFunc(keyboard); glutIdleFunc(display); glutDisplayFunc(display); // mit Schachbrettmuster füllen for (int i = 0 ; i < DIM*DIM ; i++) { int x = (i % DIM) / (DIM/8); int y = (i / DIM) / (DIM/8); if ((x + y) % 2) sourceColors[i] = 1.0f; else sourceColors[i] = 0.0f; } // alloc memory on the GPU CUDA_SAFE_CALL( cudaMalloc( (void**)&sourceDevPtr, DIM*DIM*4 ) ); CUDA_SAFE_CALL( cudaMemcpy( sourceDevPtr, sourceColors, DIM*DIM*4, cudaMemcpyHostToDevice ) ); // TODO: Weiteren Speicher auf der GPU für das Bild nach der Transformation und nach dem Blur allokieren. CUDA_SAFE_CALL( cudaMalloc( (void**)&transDevPtr, DIM*DIM*4 ) ); cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); CUDA_SAFE_CALL( cudaMalloc( (void**)&blurDevPtr, DIM*DIM*4 ) ); // TODO: Binding des Speichers des Bildes an eine Textur mittels cudaBindTexture. CUDA_SAFE_CALL( cudaBindTexture2D(NULL,blurDevTex,transDevPtr,desc,DIM,DIM,DIM*4)); glutMainLoop(); cleanup(); }
ba27db3c2165fdc0e68612aaefa99d8f170984ef.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdio> #include <cstdlib> #include <hip/hip_fp16.h> #include <hip/hip_runtime_api.h> #include <ctime> #include <unistd.h> #include <sys/time.h> #include "common.h" using namespace std; double diffTime(timeval start, timeval end) { return (end.tv_sec - start.tv_sec) * 1000 + (end.tv_usec - start.tv_usec) * 0.001; } int main(int argc, char* argv[]) { FILE* fd = fopen("gemm_config.in", "w"); if(fd == NULL) { printf("Cannot write to file gemm_config.in\n"); return 0; } struct hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); printf("Device %s\n", prop.name); const int batch_size = atoi(argv[1]); const int seq_len = atoi(argv[2]); const int head_num = atoi(argv[3]); const int size_per_head = atoi(argv[4]); const int gemm_num = 5; int M[gemm_num]; int N[gemm_num]; int K[gemm_num]; int batchCount[gemm_num] = {1,1,1,1,1}; char mess[gemm_num][256]; //gemm1 M[0] = batch_size * seq_len; K[0] = head_num * size_per_head; N[0] = K[0]; strcpy(mess[0], "from_tensor * weightQ/K/V, attr * output_kernel"); //gemm2 M[1] = M[0]; K[1] = K[0]; N[1] = 4 * N[0]; strcpy(mess[1], "attr_output * inter_kernel"); //gemm3 M[2] = M[0]; K[2] = 4 * K[0]; N[2] = N[0]; strcpy(mess[2], "inter_matmul * output_kernel"); M[3] = seq_len; N[3] = seq_len; K[3] = size_per_head; batchCount[3] = batch_size * head_num; strcpy(mess[3], "attention batched Gemm1"); M[4] = seq_len; N[4] = size_per_head; K[4] = seq_len; batchCount[4] = batch_size * head_num; strcpy(mess[4], "attention batched Gemm2"); hipblasHandle_t cublas_handle; hipblasCreate(&cublas_handle); typedef __half T; hipDataType AType = HIP_R_16F; hipDataType BType = HIP_R_16F; hipDataType CType = HIP_R_16F; hipDataType computeType = HIP_R_16F; const int ites = 100; struct timeval start, end; int startAlgo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP; int endAlgo = (int)CUBLAS_GEMM_ALGO15_TENSOR_OP; T alpha = (T)1.0f; T beta = (T)0.0f; printf("***FP16 Gemm Testing***\n"); for(int i = 0; i < gemm_num; ++i) { int m = M[i], n = N[i], k = K[i]; printf("\n-----------------------------\n"); printf("GEMM test %d: [M: %d, K: %d, N: %d] %s\n", i, m, k, n, mess[i]); T* d_A; T* d_B; T* d_C; check_cuda_error(hipMalloc((void**)&d_A, sizeof(T) * m * k * batchCount[i])); check_cuda_error(hipMalloc((void**)&d_B, sizeof(T) * k * n * batchCount[i])); check_cuda_error(hipMalloc((void**)&d_C, sizeof(T) * m * n * batchCount[i])); float exec_time = 99999.0f; int fast_algo = 0; for(int algo = startAlgo; algo <= endAlgo; algo++) { hipDeviceSynchronize(); gettimeofday(&start, NULL); for(int ite = 0; ite < ites; ++ite) { if(i < 3) { check_cuda_error(hipblasGemmEx(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, d_B, BType, n, d_A, AType, k, &beta, d_C, CType, n, computeType, static_cast<hipblasGemmAlgo_t>(algo))); } else if(i == 3) { check_cuda_error(hipblasGemmStridedBatchedEx(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, seq_len, seq_len, size_per_head, &alpha, d_B, BType, size_per_head, seq_len * size_per_head, d_A, AType, size_per_head, seq_len * size_per_head, &beta, d_C, CType, seq_len, seq_len * seq_len, batch_size * head_num, computeType, static_cast<hipblasGemmAlgo_t>(algo))); } else { check_cuda_error(hipblasGemmStridedBatchedEx(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, size_per_head, seq_len, seq_len, &alpha, d_B, BType, size_per_head, seq_len * size_per_head, d_A, AType, seq_len, seq_len * seq_len, &beta, d_C, CType, size_per_head, seq_len * size_per_head, batch_size * head_num, computeType, static_cast<hipblasGemmAlgo_t>(algo))); } } hipDeviceSynchronize(); gettimeofday(&end, NULL); printf("algo_%d costs %.3fms \n", algo, diffTime(start, end) / ites); if(diffTime(start, end) / ites < exec_time) { exec_time = diffTime(start, end) / ites; fast_algo = algo; } } printf("fast_algo %d costs %.3f ms\n", fast_algo, exec_time); fprintf(fd, "%d\n", fast_algo); } }
ba27db3c2165fdc0e68612aaefa99d8f170984ef.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdio> #include <cstdlib> #include <cuda_fp16.h> #include <cuda_profiler_api.h> #include <ctime> #include <unistd.h> #include <sys/time.h> #include "common.h" using namespace std; double diffTime(timeval start, timeval end) { return (end.tv_sec - start.tv_sec) * 1000 + (end.tv_usec - start.tv_usec) * 0.001; } int main(int argc, char* argv[]) { FILE* fd = fopen("gemm_config.in", "w"); if(fd == NULL) { printf("Cannot write to file gemm_config.in\n"); return 0; } struct cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); printf("Device %s\n", prop.name); const int batch_size = atoi(argv[1]); const int seq_len = atoi(argv[2]); const int head_num = atoi(argv[3]); const int size_per_head = atoi(argv[4]); const int gemm_num = 5; int M[gemm_num]; int N[gemm_num]; int K[gemm_num]; int batchCount[gemm_num] = {1,1,1,1,1}; char mess[gemm_num][256]; //gemm1 M[0] = batch_size * seq_len; K[0] = head_num * size_per_head; N[0] = K[0]; strcpy(mess[0], "from_tensor * weightQ/K/V, attr * output_kernel"); //gemm2 M[1] = M[0]; K[1] = K[0]; N[1] = 4 * N[0]; strcpy(mess[1], "attr_output * inter_kernel"); //gemm3 M[2] = M[0]; K[2] = 4 * K[0]; N[2] = N[0]; strcpy(mess[2], "inter_matmul * output_kernel"); M[3] = seq_len; N[3] = seq_len; K[3] = size_per_head; batchCount[3] = batch_size * head_num; strcpy(mess[3], "attention batched Gemm1"); M[4] = seq_len; N[4] = size_per_head; K[4] = seq_len; batchCount[4] = batch_size * head_num; strcpy(mess[4], "attention batched Gemm2"); cublasHandle_t cublas_handle; cublasCreate(&cublas_handle); typedef __half T; cudaDataType_t AType = CUDA_R_16F; cudaDataType_t BType = CUDA_R_16F; cudaDataType_t CType = CUDA_R_16F; cudaDataType_t computeType = CUDA_R_16F; const int ites = 100; struct timeval start, end; int startAlgo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP; int endAlgo = (int)CUBLAS_GEMM_ALGO15_TENSOR_OP; T alpha = (T)1.0f; T beta = (T)0.0f; printf("***FP16 Gemm Testing***\n"); for(int i = 0; i < gemm_num; ++i) { int m = M[i], n = N[i], k = K[i]; printf("\n-----------------------------\n"); printf("GEMM test %d: [M: %d, K: %d, N: %d] %s\n", i, m, k, n, mess[i]); T* d_A; T* d_B; T* d_C; check_cuda_error(cudaMalloc((void**)&d_A, sizeof(T) * m * k * batchCount[i])); check_cuda_error(cudaMalloc((void**)&d_B, sizeof(T) * k * n * batchCount[i])); check_cuda_error(cudaMalloc((void**)&d_C, sizeof(T) * m * n * batchCount[i])); float exec_time = 99999.0f; int fast_algo = 0; for(int algo = startAlgo; algo <= endAlgo; algo++) { cudaDeviceSynchronize(); gettimeofday(&start, NULL); for(int ite = 0; ite < ites; ++ite) { if(i < 3) { check_cuda_error(cublasGemmEx(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, d_B, BType, n, d_A, AType, k, &beta, d_C, CType, n, computeType, static_cast<cublasGemmAlgo_t>(algo))); } else if(i == 3) { check_cuda_error(cublasGemmStridedBatchedEx(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, seq_len, seq_len, size_per_head, &alpha, d_B, BType, size_per_head, seq_len * size_per_head, d_A, AType, size_per_head, seq_len * size_per_head, &beta, d_C, CType, seq_len, seq_len * seq_len, batch_size * head_num, computeType, static_cast<cublasGemmAlgo_t>(algo))); } else { check_cuda_error(cublasGemmStridedBatchedEx(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, size_per_head, seq_len, seq_len, &alpha, d_B, BType, size_per_head, seq_len * size_per_head, d_A, AType, seq_len, seq_len * seq_len, &beta, d_C, CType, size_per_head, seq_len * size_per_head, batch_size * head_num, computeType, static_cast<cublasGemmAlgo_t>(algo))); } } cudaDeviceSynchronize(); gettimeofday(&end, NULL); printf("algo_%d costs %.3fms \n", algo, diffTime(start, end) / ites); if(diffTime(start, end) / ites < exec_time) { exec_time = diffTime(start, end) / ites; fast_algo = algo; } } printf("fast_algo %d costs %.3f ms\n", fast_algo, exec_time); fprintf(fd, "%d\n", fast_algo); } }
8736a5244846181a0d74123487af1f2b774a08cc.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdlib.h> #include <stdio.h> // includes CUDA #include <hip/hip_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> __global__ void add(int a, int b, int *c) { *c = a + b; } int main (void) { int c; int *dev_c; checkCudaErrors(hipMalloc((void **) &dev_c, sizeof(int))); hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2, 7, dev_c); checkCudaErrors(hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost)); printf("2 + 7 = %d\n", c); hipFree(dev_c); return 0; }
8736a5244846181a0d74123487af1f2b774a08cc.cu
// includes, system #include <stdlib.h> #include <stdio.h> // includes CUDA #include <cuda_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> __global__ void add(int a, int b, int *c) { *c = a + b; } int main (void) { int c; int *dev_c; checkCudaErrors(cudaMalloc((void **) &dev_c, sizeof(int))); add<<<1,1>>>(2, 7, dev_c); checkCudaErrors(cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost)); printf("2 + 7 = %d\n", c); cudaFree(dev_c); return 0; }
fd7984b576fa36ca39a1a61d36d65b72e537ca15.hip
// !!! This is a file automatically generated by hipify!!! // TestMatrix.cpp : Defines the entry point for the console application. // #include "stdafx.h" #include <iostream> #include <fstream> #include <vector> #include <string> #include "FloatVector.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define PI 3.14159265 using namespace std; // Multiply a 3 x 3 Matrix size void multiply3x3(Float3 *firstMat, Float3 *secondMat, Float3 *outputMat) { int i, j, k; for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) outputMat[j].value[i] += firstMat[k].value[i] * secondMat[j].value[k]; } } cout << "Printing a 3 x 3 Matrix: " << endl; for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { cout << outputMat[j].value[i] << " "; } cout << endl; } cout << endl; } // Multiply a 4 x 4 Matrix size void multiply4x4(Float4 *firstMat, Float4 *secondMat, Float4 *outputMat) { int i, j, k; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { for (k = 0; k < 4; k++) outputMat[j].value[i] += firstMat[k].value[i] * secondMat[j].value[k]; } } //cout << "Printing a 4 x 4 Matrix: " << endl; //for (i = 0; i < 4; i++) { // for (j = 0; j < 4; j++) { // cout << outputMat[j].value[i] << " "; // } // cout << endl; //} //cout << endl; } // Multiply a n x m Matrix size, where you can define N and M void multiplynxm(Float4 *firstMat, Float4 *secondMat, Float4 *outputMat, int N, int M) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < M; j++) { for (k = 0; k < N; k++) outputMat[j].value[i] += firstMat[k].value[i] * secondMat[j].value[k]; } } //cout << "Transformation Matrix X Location Matrix: " << endl; //for (i = 0; i < N; i++) { // for (j = 0; j < M; j++) { // cout << outputMat[j].value[i] << " "; // } // cout << endl; //} //cout << endl; } // Generate a 3 x 3 Transformation Matrix, where you input the rotation axis, coordinate and angle of rotation Float3 *gen3x3tm(float x, float y, float theta) { Float3 *a = new Float3[3](); Float3 *b = new Float3[3](); Float3 *c = new Float3[3](); Float3 *result1 = new Float3[3]; Float3 *result2 = new Float3[3]; a[0].value[0] = 1; a[1].value[1] = 1; a[2].value[0] = x; a[2].value[1] = y; a[2].value[2] = 1; b[0].value[0] = 1; b[1].value[1] = 1; b[2].value[0] = -x; b[2].value[1] = -y; b[2].value[2] = 1; c[0].value[0] = floor(cos(theta*PI / 180)); c[0].value[1] = floor(sin(theta*PI / 180)); c[1].value[0] = floor(-sin(theta*PI / 180)); c[1].value[1] = floor(cos(theta*PI / 180)); c[2].value[2] = 1; multiply3x3(a, c, result1); multiply3x3(result1, b, result2); delete[](a); delete[](b); delete[](c); delete[](result1); return result2; } // Generate a 4 x 4 Transformation Matrix, where you input the rotation axis, coordinate and angle of rotation Float4 *gen4x4tm(float xCoor, float yCoor, float zCoor, char axis, float theta) { Float4 *a = new Float4[4](); Float4 *b = new Float4[4](); Float4 *c = new Float4[4](); Float4 *result1 = new Float4[4]; Float4 *result2 = new Float4[4]; a[0].value[0] = 1; a[1].value[1] = 1; a[2].value[2] = 1; a[3].value[0] = xCoor; a[3].value[1] = yCoor; a[3].value[2] = zCoor; a[3].value[3] = 1; b[0].value[0] = 1; b[1].value[1] = 1; b[2].value[2] = 1; b[3].value[3] = 1; c[3].value[3] = 1; // TEMPORARY FIX int OddEven = 1; if (OddEven == 1) { float Divisable = 32.0 / 2.0; //16 float Divisable1 = Divisable - 1.0; //15 if (axis == 'x') { c[0].value[0] = 1; c[1].value[1] = floor(cos(theta*PI / 180)); c[1].value[2] = floor(sin(theta*PI / 180)); c[2].value[1] = floor(-sin(theta*PI / 180)); c[2].value[2] = floor(cos(theta*PI / 180)); if (zCoor == Divisable1) // 15 { if (yCoor == Divisable1) // 15 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor - 1; } else if (yCoor == Divisable) // 16 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor + 1; b[3].value[2] = -zCoor; } } else if (zCoor == Divisable) // 16 { if (yCoor == Divisable1) // 15 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor - 1; b[3].value[2] = -zCoor; } else if (yCoor == Divisable) // 16 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor + 1; } } } else if (axis == 'y') { c[1].value[1] = 1; c[0].value[0] = floor(cos(theta*PI / 180)); c[0].value[2] = floor(-sin(theta*PI / 180)); c[2].value[0] = floor(sin(theta*PI / 180)); c[2].value[2] = floor(cos(theta*PI / 180)); if (xCoor == Divisable1) // 15 { if (zCoor == Divisable1) // 15 { b[3].value[0] = -xCoor - 1; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor; } else if (zCoor == Divisable) // 16 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor + 1; } } else if (xCoor == Divisable) // 16 { if (zCoor == Divisable1) // 15 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor - 1; } else if (zCoor == Divisable) // 16 { b[3].value[0] = -xCoor + 1; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor; } } } else if (axis == 'z') { c[2].value[2] = 1; c[0].value[0] = floor(cos(theta*PI / 180)); c[0].value[1] = floor(sin(theta*PI / 180)); c[1].value[0] = floor(-sin(theta*PI / 180)); c[1].value[1] = floor(cos(theta*PI / 180)); if (xCoor == Divisable1) // 15 { if (yCoor == Divisable1) // 15 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor - 1; b[3].value[2] = -zCoor; } else if (yCoor == Divisable) // 16 { b[3].value[0] = -xCoor - 1; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor; } } else if (xCoor == Divisable) // 16 { if (yCoor == Divisable1) // 15 { b[3].value[0] = -xCoor + 1; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor; } else if (yCoor == Divisable) // 16 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor + 1; b[3].value[2] = -zCoor; } } } } cout << "Translation Matrix X Rotation Matrix: "; cout << endl; multiply4x4(a, c, result1); cout << "Rotation Matrix X Translation Matrix: "; cout << endl; multiply4x4(result1, b, result2); delete[](a); delete[](b); delete[](c); delete[](result1); return result2; } class Matrix { public: int numDivX = 32; int numDivY = 32; int numDivZ = 32; int voxelDataSize = numDivX * numDivY * numDivZ; float *voxelValue; float *TvoxelValue; Float4 *Coor; const char *fName = "toilet_0444.raw"; // Input .raw file void read(); void save(); void rotate(int xDist, int yDist, int zDist, char rotateAxis, int thetas); }; // Reads .raw file & create corresponding coordinate matrix for voxels void Matrix::read() { size_t size = voxelDataSize*sizeof(float); FILE *fp = fopen(fName, "rb"); if (!fp) { fprintf(stderr, "Error opening file '%s'\n", fName); abort(); } unsigned char *tempdata = new unsigned char[voxelDataSize]; size_t read = fread(tempdata, sizeof(unsigned char), voxelDataSize, fp); fclose(fp); printf("Read '%s', %d bytes\n", fName, read); this->voxelValue = new float[voxelDataSize]; for (int k = 0; k < voxelDataSize; k++) { this->voxelValue[k] = float(ceil(tempdata[k] / 254)); //cout << voxelValue[k] << " " << endl; } this->Coor = new Float4[voxelDataSize]; for (int i = 0; i < numDivZ; i++) { for (int j = 0; j < numDivY; j++) { for (int k = 0; k < numDivX; k++) { int marker = (numDivZ*numDivZ*i) + (numDivY*j) + k; Coor[marker].value[0] = k; Coor[marker].value[1] = j; Coor[marker].value[2] = i; Coor[marker].value[3] = 1; } } } delete[] tempdata; //cout << "Original voxel values: " << endl; //for (int p = 0; p < voxelDataSize; p++) //{ // cout << voxelValue[p] << " "; //} //cout << endl; } // Perform complete voxel rotation in 3D space void Matrix::rotate(int xDist, int yDist, int zDist, char rotateAxis, int thetas) { Float4 *Transformed = new Float4[voxelDataSize]; // Final transformed matrix stored here TvoxelValue = new float[voxelDataSize]; // Final voxel value stored here Float4 *TransMat = gen4x4tm(xDist, yDist, zDist, rotateAxis, thetas); // Obtain transformation matrix multiplynxm(TransMat, Coor, Transformed, 4, voxelDataSize); // matrix multiply to get Transformed for (int fin = 0; fin < voxelDataSize; fin++) // rotate voxelsx { int yes = (Transformed[fin].value[2] * numDivZ * numDivZ) + (Transformed[fin].value[1] * numDivY) + Transformed[fin].value[0]; this->TvoxelValue[yes] = voxelValue[fin]; } delete[](Transformed); delete[](TransMat); //cout << "Transformed voxel values: " << endl; //for (int p = 0; p < voxelDataSize; p++) //{ // cout << TvoxelValue[p] << " "; //} //cout << endl; } // Saves rotated voxels back into raw file void Matrix::save() { ofstream rawFile; string fName = "phi_grid.raw"; rawFile.open(fName, std::ofstream::binary); if (!rawFile.good()) { cerr << "Unable to open output file for writing" << endl; abort(); } char* phiOut = new char[voxelDataSize]; for (int k = 0; k < voxelDataSize; k++) { //if (GPU) // phiOut[k] = char(phiValGPU[k] * 255); //else phiOut[k] = char(TvoxelValue[k] * 255); } rawFile.write((char*)phiOut, voxelDataSize*sizeof(char)); delete[] phiOut; rawFile.close(); cout << "Voxel File Saved as : " << fName << endl << endl; } class Artificial { public: Float4 *place; int Xs = 3; int Ys = 3; int Zs = 3; int vSize = Xs * Ys * Zs; float *voxValue; float *TransformedVox; void address(); void save(); void rotate(int xDist, int yDist, int zDist, char rotateAxis, int thetas); }; void Artificial::address() { this->voxValue = new float[vSize](); // assign voxel value voxValue[0] = 1; voxValue[4] = 1; voxValue[5] = 1; voxValue[7] = 1; voxValue[13] = 1; voxValue[14] = 1; voxValue[16] = 1; voxValue[17] = 1; voxValue[22] = 1; cout << "Initialized voxel values: " << endl; for (int p = 0; p < vSize; p++) { cout << voxValue[p] << " "; } cout << endl; this->place = new Float4[vSize]; for (int i = 0; i < Zs; i++) { for (int j = 0; j < Ys; j++) { for (int k = 0; k < Xs; k++) { int marker = (Zs*Zs*i) + (Ys*j) + k; place[marker].value[0] = k; place[marker].value[1] = j; place[marker].value[2] = i; place[marker].value[3] = 1; } } } cout << "Initial Location Matrix: " << endl; for (int i = 0; i < 4; i++) { for (int j = 0; j < vSize; j++) { cout << place[j].value[i] << " "; } cout << endl; } cout << endl; } void Artificial::rotate(int xDist, int yDist, int zDist, char rotateAxis, int thetas) { Float4 *FinalT = new Float4[4]; // Final transformed matrix stored here TransformedVox = new float[vSize]; // Final voxel value stored here Float4 *TMat = gen4x4tm(xDist, yDist, zDist, rotateAxis, thetas); // Obtain transformation matrix multiplynxm(TMat, place, FinalT, 4, vSize); // matrix multiply to get Transformed for (int fin = 0; fin < vSize; fin++) // rotate voxels { int yes = (FinalT[fin].value[2] * Zs * Zs) + (FinalT[fin].value[1] * Ys) + FinalT[fin].value[0]; this->TransformedVox[yes] = voxValue[fin]; } delete[](FinalT); delete[](TMat); cout << "Transformed voxel values: " << endl; for (int p = 0; p < vSize; p++) { cout << TransformedVox[p] << " "; } cout << endl; } void Artificial::save() { ofstream rawFile; string fName = "aiyo.raw"; rawFile.open(fName, std::ofstream::binary); if (!rawFile.good()) { cerr << "Unable to open output file for writing" << endl; abort(); } char* phiOut = new char[vSize]; for (int k = 0; k < vSize; k++) { //if (GPU) // phiOut[k] = outputPhi(phiValGPU[k] * 255); //else //phiOut[k] = char(TransformedVox[k] * 255); phiOut[k] = char(voxValue[k] * 255); } rawFile.write((char*)phiOut, vSize*sizeof(char)); delete[] phiOut; rawFile.close(); cout << "Voxel File Saved as : " << fName << endl << endl; } int main() { //Matrix wow; //wow.read(); //wow.rotate(16, 16, 16, 'y', 90); //wow.save(); Artificial Testz; Testz.address(); Testz.rotate(1, 1, 1, 'y', 90); Testz.save(); return 0; }
fd7984b576fa36ca39a1a61d36d65b72e537ca15.cu
// TestMatrix.cpp : Defines the entry point for the console application. // #include "stdafx.h" #include <iostream> #include <fstream> #include <vector> #include <string> #include "FloatVector.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define PI 3.14159265 using namespace std; // Multiply a 3 x 3 Matrix size void multiply3x3(Float3 *firstMat, Float3 *secondMat, Float3 *outputMat) { int i, j, k; for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) outputMat[j].value[i] += firstMat[k].value[i] * secondMat[j].value[k]; } } cout << "Printing a 3 x 3 Matrix: " << endl; for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { cout << outputMat[j].value[i] << " "; } cout << endl; } cout << endl; } // Multiply a 4 x 4 Matrix size void multiply4x4(Float4 *firstMat, Float4 *secondMat, Float4 *outputMat) { int i, j, k; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { for (k = 0; k < 4; k++) outputMat[j].value[i] += firstMat[k].value[i] * secondMat[j].value[k]; } } //cout << "Printing a 4 x 4 Matrix: " << endl; //for (i = 0; i < 4; i++) { // for (j = 0; j < 4; j++) { // cout << outputMat[j].value[i] << " "; // } // cout << endl; //} //cout << endl; } // Multiply a n x m Matrix size, where you can define N and M void multiplynxm(Float4 *firstMat, Float4 *secondMat, Float4 *outputMat, int N, int M) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < M; j++) { for (k = 0; k < N; k++) outputMat[j].value[i] += firstMat[k].value[i] * secondMat[j].value[k]; } } //cout << "Transformation Matrix X Location Matrix: " << endl; //for (i = 0; i < N; i++) { // for (j = 0; j < M; j++) { // cout << outputMat[j].value[i] << " "; // } // cout << endl; //} //cout << endl; } // Generate a 3 x 3 Transformation Matrix, where you input the rotation axis, coordinate and angle of rotation Float3 *gen3x3tm(float x, float y, float theta) { Float3 *a = new Float3[3](); Float3 *b = new Float3[3](); Float3 *c = new Float3[3](); Float3 *result1 = new Float3[3]; Float3 *result2 = new Float3[3]; a[0].value[0] = 1; a[1].value[1] = 1; a[2].value[0] = x; a[2].value[1] = y; a[2].value[2] = 1; b[0].value[0] = 1; b[1].value[1] = 1; b[2].value[0] = -x; b[2].value[1] = -y; b[2].value[2] = 1; c[0].value[0] = floor(cos(theta*PI / 180)); c[0].value[1] = floor(sin(theta*PI / 180)); c[1].value[0] = floor(-sin(theta*PI / 180)); c[1].value[1] = floor(cos(theta*PI / 180)); c[2].value[2] = 1; multiply3x3(a, c, result1); multiply3x3(result1, b, result2); delete[](a); delete[](b); delete[](c); delete[](result1); return result2; } // Generate a 4 x 4 Transformation Matrix, where you input the rotation axis, coordinate and angle of rotation Float4 *gen4x4tm(float xCoor, float yCoor, float zCoor, char axis, float theta) { Float4 *a = new Float4[4](); Float4 *b = new Float4[4](); Float4 *c = new Float4[4](); Float4 *result1 = new Float4[4]; Float4 *result2 = new Float4[4]; a[0].value[0] = 1; a[1].value[1] = 1; a[2].value[2] = 1; a[3].value[0] = xCoor; a[3].value[1] = yCoor; a[3].value[2] = zCoor; a[3].value[3] = 1; b[0].value[0] = 1; b[1].value[1] = 1; b[2].value[2] = 1; b[3].value[3] = 1; c[3].value[3] = 1; // TEMPORARY FIX int OddEven = 1; if (OddEven == 1) { float Divisable = 32.0 / 2.0; //16 float Divisable1 = Divisable - 1.0; //15 if (axis == 'x') { c[0].value[0] = 1; c[1].value[1] = floor(cos(theta*PI / 180)); c[1].value[2] = floor(sin(theta*PI / 180)); c[2].value[1] = floor(-sin(theta*PI / 180)); c[2].value[2] = floor(cos(theta*PI / 180)); if (zCoor == Divisable1) // 15 { if (yCoor == Divisable1) // 15 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor - 1; } else if (yCoor == Divisable) // 16 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor + 1; b[3].value[2] = -zCoor; } } else if (zCoor == Divisable) // 16 { if (yCoor == Divisable1) // 15 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor - 1; b[3].value[2] = -zCoor; } else if (yCoor == Divisable) // 16 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor + 1; } } } else if (axis == 'y') { c[1].value[1] = 1; c[0].value[0] = floor(cos(theta*PI / 180)); c[0].value[2] = floor(-sin(theta*PI / 180)); c[2].value[0] = floor(sin(theta*PI / 180)); c[2].value[2] = floor(cos(theta*PI / 180)); if (xCoor == Divisable1) // 15 { if (zCoor == Divisable1) // 15 { b[3].value[0] = -xCoor - 1; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor; } else if (zCoor == Divisable) // 16 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor + 1; } } else if (xCoor == Divisable) // 16 { if (zCoor == Divisable1) // 15 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor - 1; } else if (zCoor == Divisable) // 16 { b[3].value[0] = -xCoor + 1; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor; } } } else if (axis == 'z') { c[2].value[2] = 1; c[0].value[0] = floor(cos(theta*PI / 180)); c[0].value[1] = floor(sin(theta*PI / 180)); c[1].value[0] = floor(-sin(theta*PI / 180)); c[1].value[1] = floor(cos(theta*PI / 180)); if (xCoor == Divisable1) // 15 { if (yCoor == Divisable1) // 15 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor - 1; b[3].value[2] = -zCoor; } else if (yCoor == Divisable) // 16 { b[3].value[0] = -xCoor - 1; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor; } } else if (xCoor == Divisable) // 16 { if (yCoor == Divisable1) // 15 { b[3].value[0] = -xCoor + 1; b[3].value[1] = -yCoor; b[3].value[2] = -zCoor; } else if (yCoor == Divisable) // 16 { b[3].value[0] = -xCoor; b[3].value[1] = -yCoor + 1; b[3].value[2] = -zCoor; } } } } cout << "Translation Matrix X Rotation Matrix: "; cout << endl; multiply4x4(a, c, result1); cout << "Rotation Matrix X Translation Matrix: "; cout << endl; multiply4x4(result1, b, result2); delete[](a); delete[](b); delete[](c); delete[](result1); return result2; } class Matrix { public: int numDivX = 32; int numDivY = 32; int numDivZ = 32; int voxelDataSize = numDivX * numDivY * numDivZ; float *voxelValue; float *TvoxelValue; Float4 *Coor; const char *fName = "toilet_0444.raw"; // Input .raw file void read(); void save(); void rotate(int xDist, int yDist, int zDist, char rotateAxis, int thetas); }; // Reads .raw file & create corresponding coordinate matrix for voxels void Matrix::read() { size_t size = voxelDataSize*sizeof(float); FILE *fp = fopen(fName, "rb"); if (!fp) { fprintf(stderr, "Error opening file '%s'\n", fName); abort(); } unsigned char *tempdata = new unsigned char[voxelDataSize]; size_t read = fread(tempdata, sizeof(unsigned char), voxelDataSize, fp); fclose(fp); printf("Read '%s', %d bytes\n", fName, read); this->voxelValue = new float[voxelDataSize]; for (int k = 0; k < voxelDataSize; k++) { this->voxelValue[k] = float(ceil(tempdata[k] / 254)); //cout << voxelValue[k] << " " << endl; } this->Coor = new Float4[voxelDataSize]; for (int i = 0; i < numDivZ; i++) { for (int j = 0; j < numDivY; j++) { for (int k = 0; k < numDivX; k++) { int marker = (numDivZ*numDivZ*i) + (numDivY*j) + k; Coor[marker].value[0] = k; Coor[marker].value[1] = j; Coor[marker].value[2] = i; Coor[marker].value[3] = 1; } } } delete[] tempdata; //cout << "Original voxel values: " << endl; //for (int p = 0; p < voxelDataSize; p++) //{ // cout << voxelValue[p] << " "; //} //cout << endl; } // Perform complete voxel rotation in 3D space void Matrix::rotate(int xDist, int yDist, int zDist, char rotateAxis, int thetas) { Float4 *Transformed = new Float4[voxelDataSize]; // Final transformed matrix stored here TvoxelValue = new float[voxelDataSize]; // Final voxel value stored here Float4 *TransMat = gen4x4tm(xDist, yDist, zDist, rotateAxis, thetas); // Obtain transformation matrix multiplynxm(TransMat, Coor, Transformed, 4, voxelDataSize); // matrix multiply to get Transformed for (int fin = 0; fin < voxelDataSize; fin++) // rotate voxelsx { int yes = (Transformed[fin].value[2] * numDivZ * numDivZ) + (Transformed[fin].value[1] * numDivY) + Transformed[fin].value[0]; this->TvoxelValue[yes] = voxelValue[fin]; } delete[](Transformed); delete[](TransMat); //cout << "Transformed voxel values: " << endl; //for (int p = 0; p < voxelDataSize; p++) //{ // cout << TvoxelValue[p] << " "; //} //cout << endl; } // Saves rotated voxels back into raw file void Matrix::save() { ofstream rawFile; string fName = "phi_grid.raw"; rawFile.open(fName, std::ofstream::binary); if (!rawFile.good()) { cerr << "Unable to open output file for writing" << endl; abort(); } char* phiOut = new char[voxelDataSize]; for (int k = 0; k < voxelDataSize; k++) { //if (GPU) // phiOut[k] = char(phiValGPU[k] * 255); //else phiOut[k] = char(TvoxelValue[k] * 255); } rawFile.write((char*)phiOut, voxelDataSize*sizeof(char)); delete[] phiOut; rawFile.close(); cout << "Voxel File Saved as : " << fName << endl << endl; } class Artificial { public: Float4 *place; int Xs = 3; int Ys = 3; int Zs = 3; int vSize = Xs * Ys * Zs; float *voxValue; float *TransformedVox; void address(); void save(); void rotate(int xDist, int yDist, int zDist, char rotateAxis, int thetas); }; void Artificial::address() { this->voxValue = new float[vSize](); // assign voxel value voxValue[0] = 1; voxValue[4] = 1; voxValue[5] = 1; voxValue[7] = 1; voxValue[13] = 1; voxValue[14] = 1; voxValue[16] = 1; voxValue[17] = 1; voxValue[22] = 1; cout << "Initialized voxel values: " << endl; for (int p = 0; p < vSize; p++) { cout << voxValue[p] << " "; } cout << endl; this->place = new Float4[vSize]; for (int i = 0; i < Zs; i++) { for (int j = 0; j < Ys; j++) { for (int k = 0; k < Xs; k++) { int marker = (Zs*Zs*i) + (Ys*j) + k; place[marker].value[0] = k; place[marker].value[1] = j; place[marker].value[2] = i; place[marker].value[3] = 1; } } } cout << "Initial Location Matrix: " << endl; for (int i = 0; i < 4; i++) { for (int j = 0; j < vSize; j++) { cout << place[j].value[i] << " "; } cout << endl; } cout << endl; } void Artificial::rotate(int xDist, int yDist, int zDist, char rotateAxis, int thetas) { Float4 *FinalT = new Float4[4]; // Final transformed matrix stored here TransformedVox = new float[vSize]; // Final voxel value stored here Float4 *TMat = gen4x4tm(xDist, yDist, zDist, rotateAxis, thetas); // Obtain transformation matrix multiplynxm(TMat, place, FinalT, 4, vSize); // matrix multiply to get Transformed for (int fin = 0; fin < vSize; fin++) // rotate voxels { int yes = (FinalT[fin].value[2] * Zs * Zs) + (FinalT[fin].value[1] * Ys) + FinalT[fin].value[0]; this->TransformedVox[yes] = voxValue[fin]; } delete[](FinalT); delete[](TMat); cout << "Transformed voxel values: " << endl; for (int p = 0; p < vSize; p++) { cout << TransformedVox[p] << " "; } cout << endl; } void Artificial::save() { ofstream rawFile; string fName = "aiyo.raw"; rawFile.open(fName, std::ofstream::binary); if (!rawFile.good()) { cerr << "Unable to open output file for writing" << endl; abort(); } char* phiOut = new char[vSize]; for (int k = 0; k < vSize; k++) { //if (GPU) // phiOut[k] = outputPhi(phiValGPU[k] * 255); //else //phiOut[k] = char(TransformedVox[k] * 255); phiOut[k] = char(voxValue[k] * 255); } rawFile.write((char*)phiOut, vSize*sizeof(char)); delete[] phiOut; rawFile.close(); cout << "Voxel File Saved as : " << fName << endl << endl; } int main() { //Matrix wow; //wow.read(); //wow.rotate(16, 16, 16, 'y', 90); //wow.save(); Artificial Testz; Testz.address(); Testz.rotate(1, 1, 1, 'y', 90); Testz.save(); return 0; }
e5296e1fcb9dd51aa009bc30ce9010db1731865c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "max_subsampling_layer_updater_schema.h" #include "../neural_network_exception.h" #include "../max_subsampling_layer.h" #include "max_subsampling_layer_updater_hip.cuh" #include <boost/format.hpp> namespace nnforge { namespace cuda { max_subsampling_layer_updater_schema::max_subsampling_layer_updater_schema() { } max_subsampling_layer_updater_schema::~max_subsampling_layer_updater_schema() { } layer_updater_schema_smart_ptr max_subsampling_layer_updater_schema::create_specific() const { return layer_updater_schema_smart_ptr(new max_subsampling_layer_updater_schema()); } const boost::uuids::uuid& max_subsampling_layer_updater_schema::get_uuid() const { return max_subsampling_layer::layer_guid; } layer_updater_cuda_smart_ptr max_subsampling_layer_updater_schema::create_updater_specific( const layer_configuration_specific& input_configuration_specific, const layer_configuration_specific& output_configuration_specific) const { layer_updater_cuda_smart_ptr res; switch (output_configuration_specific.dimension_sizes.size()) { case 1: res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<1>()); break; case 2: res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<2>()); break; case 3: res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<3>()); break; case 4: res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<4>()); break; default: throw neural_network_exception((boost::format("No CUDA updater for the max subsampling of %1% dimensions") % output_configuration_specific.dimension_sizes.size()).str()); } return res; } } }
e5296e1fcb9dd51aa009bc30ce9010db1731865c.cu
/* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "max_subsampling_layer_updater_schema.h" #include "../neural_network_exception.h" #include "../max_subsampling_layer.h" #include "max_subsampling_layer_updater_cuda.cuh" #include <boost/format.hpp> namespace nnforge { namespace cuda { max_subsampling_layer_updater_schema::max_subsampling_layer_updater_schema() { } max_subsampling_layer_updater_schema::~max_subsampling_layer_updater_schema() { } layer_updater_schema_smart_ptr max_subsampling_layer_updater_schema::create_specific() const { return layer_updater_schema_smart_ptr(new max_subsampling_layer_updater_schema()); } const boost::uuids::uuid& max_subsampling_layer_updater_schema::get_uuid() const { return max_subsampling_layer::layer_guid; } layer_updater_cuda_smart_ptr max_subsampling_layer_updater_schema::create_updater_specific( const layer_configuration_specific& input_configuration_specific, const layer_configuration_specific& output_configuration_specific) const { layer_updater_cuda_smart_ptr res; switch (output_configuration_specific.dimension_sizes.size()) { case 1: res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<1>()); break; case 2: res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<2>()); break; case 3: res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<3>()); break; case 4: res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<4>()); break; default: throw neural_network_exception((boost::format("No CUDA updater for the max subsampling of %1% dimensions") % output_configuration_specific.dimension_sizes.size()).str()); } return res; } } }
b2d90a327521453cf35361e874b7651bad3dbbca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda_runtime.h> #include<iostream> #include<stdio.h> #include<sys/time.h> #include<assert.h> using namespace std; #define REAL double #define BX 128 #define BY 2 #define BZ 1 #define GZ 1 const REAL cc = 0.4; const REAL ce = 0.1; const REAL cw = 0.1; const REAL cs = 0.1; const REAL cn = 0.1; const REAL ct = 0.1; const REAL cb = 0.1; //Must be re-written, including all the parameters int stencil(REAL *A, REAL *B, int nx, int ny, int nz, int steps) { int i, j, k, s; #define IDX(i,j,k) ((i)*ny*nz+(j)*nz+(k)) for(s = 0; s < steps; s ++) { for(i = 0; i < nx; i ++) { for(j = 0; j < ny; j ++) { for(k = 0; k < nz; k ++) { REAL r = 0.4*A[IDX(i,j,k)]; if(k != 0) r += 0.1*A[IDX(i,j,k-1)]; else r += 0.1*A[IDX(i,j,k)]; if(k != nz-1) r += 0.1*A[IDX(i,j,k+1)]; else r += 0.1*A[IDX(i,j,k)]; if(j != 0) r += 0.1*A[IDX(i,j-1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(j != ny-1) r += 0.1*A[IDX(i,j+1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != 0) r += 0.1*A[IDX(i-1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != nx-1) r += 0.1*A[IDX(i+1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; B[IDX(i,j,k)] = r; } } } REAL *tmp = NULL; tmp = A, A = B, B = tmp; } return 0; } void check(REAL *a, REAL *b, int nx, int ny, int nz) { int slice = nx * ny; for (int z = 1; z < nz-1; ++z) { for (int y = 1; y < ny-1; ++y) { for (int x = 1; x < nz-1; ++x) { int idx = z * slice + y * nx + x; if (abs(a[idx]-b[idx]) > 1e-5) { cout << a[idx] << " " << b[idx] << endl; printf("%d\n", idx); printf("Wrong!!!!!!!!\n"); return; } } } } printf("Right!!!!!!\n"); return; } __global__ void baseline(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; int k = kb; //int k = kb > 0? kb: 1; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; //#pragma unroll for (; k < ke; k++){ int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int b = (k==0)?c:c-slice; int t = (k==nz-1)?c:c+slice; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*A[t] + cb*A[b] + cc*A[c]; c += slice; //if (k > 0 && k < nz-1 && i > 0 && i < nx-1 && j > 0 && j < ny-1){ // B[idx] = ce*A[idx+1] + cw*A[idx-1] + cs*A[idx+nx] + cn*A[idx-nx] // +ct*A[idx+slice] + cb*A[idx-slice] + cc*A[idx]; // idx += slice; } } __global__ void baseopt(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; //b_t = B[idx+slice]; ////A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] //// +ct*B[idx+slice] + cb*B[idx-slice] + cc*B[idx]; //A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] // +ct*b_t + cb*b_b + cc*b_c; //b_b = b_c; //b_c = b_t; //idx += slice; } return; } __global__ void roc(const REAL* __restrict__ A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; } return; } int main(int argc, char **argv){ int NX = atoi(argv[2]); int NY = atoi(argv[3]); int NZ = atoi(argv[4]); int T = atoi(argv[5]); int num = 8; int NZ_ = NZ/num+2; if (NX*NY*NZ <= 600*600*600) { num = 1; NZ_ = NZ; } int p1, p2; if (NZ % num == 0) { p1 = p2 = NZ/num; } else { p1 = NZ / (num-1); p2 = NZ - p1*(num-1); } int size = sizeof(REAL)*NX*NY*NZ; int partsize = sizeof(REAL)*NX*NY*NZ_; REAL **host_A = new REAL*[num]; REAL **host_B = new REAL*[num]; int size_ = NZ_*NY*NX; for (int i = 0; i < num; ++i) { host_A[i] = new REAL[size_]; host_B[i] = new REAL[size_]; //host_A[i] = (REAL*)malloc(partsize); //host_B[i] = (REAL*)malloc(partsize); } REAL* cpu_A = new REAL[NX*NY*NZ]; REAL* result_A = new REAL[NX*NY*NZ]; REAL* cpu_B = new REAL[NX*NY*NZ]; for (int part = 0; part < num; part++) for (int k = 0; k < NZ_; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { host_A[part][k*NY*NX+j*NX+i] = 1.0; host_B[part][k*NY*NX+j*NX+i] = 1.0; } for (int k = 0; k < NZ; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { //cout << k*NY*NX + j*NX + i << endl; cpu_A[k*NY*NX+j*NX+i] = 1.0; cpu_B[k*NY*NX+j*NX+i] = 1.0; result_A[k*NY*NX+j*NX+i] = 0.0; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); float elapsed_time; double flops; for (int i = 0; i < num; ++i) { REAL *dev_A, *dev_B; hipMalloc(&dev_A, partsize); hipMalloc(&dev_B, partsize); hipMemcpy(dev_A, host_A[i], partsize, hipMemcpyHostToDevice); dim3 threadPerBlock(BX, BY, BZ); //128,1,1 dim3 blockPerGrid((NX+BX-1)/BX, (NY+BY-1)/BY, GZ); //512/128,512/1,1 = 4,512,1 /////////////////////////////////////////////////////////////// //baseline cout << NZ_ << endl; for (int t = 0; t < T; t++){ hipLaunchKernelGGL(( baseline), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, dev_A, dev_B, NX, NY, NZ_); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } /////////////////////////////////////////////////////////////// if (hipGetLastError() != hipSuccess) printf("baseline: wrong!!!\n"); hipMemcpy(host_A[i], dev_A, partsize, hipMemcpyDeviceToHost); hipFree(dev_A); hipFree(dev_B); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); struct timeval t1, t2; gettimeofday(&t1, NULL); stencil(cpu_A, cpu_B, NX, NY, NZ, T); gettimeofday(&t2, NULL); float cpubaseline_time = (t2.tv_sec-t1.tv_sec)*1e3 + (t2.tv_usec-t1.tv_usec)*1e-3; cout << "CPU time:" << cpubaseline_time/T << " ms" << endl; if (num == 1) { check(cpu_A, host_A[0], NX, NY, NZ); } else { int begin, end; int smallsize = NZ/num * NY * NX; int i=0, z=0, y=0, x=0; for (i = 0; i < num; ++i) { begin = 1; end = NZ_-1; if (i == 0) { begin=0; end=NZ_-2; } else if (i == num-1) { begin=2; end=NZ_; } int index = i*smallsize; for (z = begin; z < end; ++z) for (y = 0; y < NY; ++y) for (x = 0; x < NX; ++x) { result_A[index] = host_A[i][NY*NX*z + y*NX + x]; //assert(abs(host_A[i][NY*NX*z + y*NX + x] - 1.0) < 1e-5); //if (i == 2) // cout << host_A[i][NY*NX_*z + y*NX_ + x] << endl; index++; } } check(cpu_A, result_A, NX, NY, NZ); } //printf("baseline: Gflops = %lf\n", flops); printf("baseline: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; /* /////////////////////////////////////////////////////////////// //baseopt hipEventRecord(start, 0); for (int t = 0; t < T; t++){ baseopt<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("baseopt: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("baseopt: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("baseopt: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //read-only data cache hipEventRecord(start, 0); for (int t = 0; t < T; t++){ roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("read-only data cache: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("read-only data cache: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("read-only data cache: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //share memory raw hipEventRecord(start, 0); for (int t = 0; t < T; t++){ shm_raw<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("share memory raw: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("share memory raw: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("share memory raw: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// hipEventDestroy(start); hipEventDestroy(stop); */ return 0; }
b2d90a327521453cf35361e874b7651bad3dbbca.cu
#include<cuda_runtime.h> #include<iostream> #include<stdio.h> #include<sys/time.h> #include<assert.h> using namespace std; #define REAL double #define BX 128 #define BY 2 #define BZ 1 #define GZ 1 const REAL cc = 0.4; const REAL ce = 0.1; const REAL cw = 0.1; const REAL cs = 0.1; const REAL cn = 0.1; const REAL ct = 0.1; const REAL cb = 0.1; //Must be re-written, including all the parameters int stencil(REAL *A, REAL *B, int nx, int ny, int nz, int steps) { int i, j, k, s; #define IDX(i,j,k) ((i)*ny*nz+(j)*nz+(k)) for(s = 0; s < steps; s ++) { for(i = 0; i < nx; i ++) { for(j = 0; j < ny; j ++) { for(k = 0; k < nz; k ++) { REAL r = 0.4*A[IDX(i,j,k)]; if(k != 0) r += 0.1*A[IDX(i,j,k-1)]; else r += 0.1*A[IDX(i,j,k)]; if(k != nz-1) r += 0.1*A[IDX(i,j,k+1)]; else r += 0.1*A[IDX(i,j,k)]; if(j != 0) r += 0.1*A[IDX(i,j-1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(j != ny-1) r += 0.1*A[IDX(i,j+1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != 0) r += 0.1*A[IDX(i-1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != nx-1) r += 0.1*A[IDX(i+1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; B[IDX(i,j,k)] = r; } } } REAL *tmp = NULL; tmp = A, A = B, B = tmp; } return 0; } void check(REAL *a, REAL *b, int nx, int ny, int nz) { int slice = nx * ny; for (int z = 1; z < nz-1; ++z) { for (int y = 1; y < ny-1; ++y) { for (int x = 1; x < nz-1; ++x) { int idx = z * slice + y * nx + x; if (abs(a[idx]-b[idx]) > 1e-5) { cout << a[idx] << " " << b[idx] << endl; printf("%d\n", idx); printf("Wrong!!!!!!!!\n"); return; } } } } printf("Right!!!!!!\n"); return; } __global__ void baseline(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; int k = kb; //int k = kb > 0? kb: 1; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; //#pragma unroll for (; k < ke; k++){ int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int b = (k==0)?c:c-slice; int t = (k==nz-1)?c:c+slice; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*A[t] + cb*A[b] + cc*A[c]; c += slice; //if (k > 0 && k < nz-1 && i > 0 && i < nx-1 && j > 0 && j < ny-1){ // B[idx] = ce*A[idx+1] + cw*A[idx-1] + cs*A[idx+nx] + cn*A[idx-nx] // +ct*A[idx+slice] + cb*A[idx-slice] + cc*A[idx]; // idx += slice; } } __global__ void baseopt(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; //b_t = B[idx+slice]; ////A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] //// +ct*B[idx+slice] + cb*B[idx-slice] + cc*B[idx]; //A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] // +ct*b_t + cb*b_b + cc*b_c; //b_b = b_c; //b_c = b_t; //idx += slice; } return; } __global__ void roc(const REAL* __restrict__ A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; } return; } int main(int argc, char **argv){ int NX = atoi(argv[2]); int NY = atoi(argv[3]); int NZ = atoi(argv[4]); int T = atoi(argv[5]); int num = 8; int NZ_ = NZ/num+2; if (NX*NY*NZ <= 600*600*600) { num = 1; NZ_ = NZ; } int p1, p2; if (NZ % num == 0) { p1 = p2 = NZ/num; } else { p1 = NZ / (num-1); p2 = NZ - p1*(num-1); } int size = sizeof(REAL)*NX*NY*NZ; int partsize = sizeof(REAL)*NX*NY*NZ_; REAL **host_A = new REAL*[num]; REAL **host_B = new REAL*[num]; int size_ = NZ_*NY*NX; for (int i = 0; i < num; ++i) { host_A[i] = new REAL[size_]; host_B[i] = new REAL[size_]; //host_A[i] = (REAL*)malloc(partsize); //host_B[i] = (REAL*)malloc(partsize); } REAL* cpu_A = new REAL[NX*NY*NZ]; REAL* result_A = new REAL[NX*NY*NZ]; REAL* cpu_B = new REAL[NX*NY*NZ]; for (int part = 0; part < num; part++) for (int k = 0; k < NZ_; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { host_A[part][k*NY*NX+j*NX+i] = 1.0; host_B[part][k*NY*NX+j*NX+i] = 1.0; } for (int k = 0; k < NZ; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { //cout << k*NY*NX + j*NX + i << endl; cpu_A[k*NY*NX+j*NX+i] = 1.0; cpu_B[k*NY*NX+j*NX+i] = 1.0; result_A[k*NY*NX+j*NX+i] = 0.0; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); float elapsed_time; double flops; for (int i = 0; i < num; ++i) { REAL *dev_A, *dev_B; cudaMalloc(&dev_A, partsize); cudaMalloc(&dev_B, partsize); cudaMemcpy(dev_A, host_A[i], partsize, cudaMemcpyHostToDevice); dim3 threadPerBlock(BX, BY, BZ); //128,1,1 dim3 blockPerGrid((NX+BX-1)/BX, (NY+BY-1)/BY, GZ); //512/128,512/1,1 = 4,512,1 /////////////////////////////////////////////////////////////// //baseline cout << NZ_ << endl; for (int t = 0; t < T; t++){ baseline<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ_); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } /////////////////////////////////////////////////////////////// if (cudaGetLastError() != cudaSuccess) printf("baseline: wrong!!!\n"); cudaMemcpy(host_A[i], dev_A, partsize, cudaMemcpyDeviceToHost); cudaFree(dev_A); cudaFree(dev_B); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); struct timeval t1, t2; gettimeofday(&t1, NULL); stencil(cpu_A, cpu_B, NX, NY, NZ, T); gettimeofday(&t2, NULL); float cpubaseline_time = (t2.tv_sec-t1.tv_sec)*1e3 + (t2.tv_usec-t1.tv_usec)*1e-3; cout << "CPU time:" << cpubaseline_time/T << " ms" << endl; if (num == 1) { check(cpu_A, host_A[0], NX, NY, NZ); } else { int begin, end; int smallsize = NZ/num * NY * NX; int i=0, z=0, y=0, x=0; for (i = 0; i < num; ++i) { begin = 1; end = NZ_-1; if (i == 0) { begin=0; end=NZ_-2; } else if (i == num-1) { begin=2; end=NZ_; } int index = i*smallsize; for (z = begin; z < end; ++z) for (y = 0; y < NY; ++y) for (x = 0; x < NX; ++x) { result_A[index] = host_A[i][NY*NX*z + y*NX + x]; //assert(abs(host_A[i][NY*NX*z + y*NX + x] - 1.0) < 1e-5); //if (i == 2) // cout << host_A[i][NY*NX_*z + y*NX_ + x] << endl; index++; } } check(cpu_A, result_A, NX, NY, NZ); } //printf("baseline: Gflops = %lf\n", flops); printf("baseline: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; /* /////////////////////////////////////////////////////////////// //baseopt cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ baseopt<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("baseopt: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("baseopt: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("baseopt: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //read-only data cache cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("read-only data cache: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("read-only data cache: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("read-only data cache: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //share memory raw cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ shm_raw<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("share memory raw: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("share memory raw: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("share memory raw: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// cudaEventDestroy(start); cudaEventDestroy(stop); */ return 0; }
64b6bfabc27561365b57b49bc8ba0822658f3fa8.hip
// !!! This is a file automatically generated by hipify!!! #include <time.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <errno.h> #include <hip/hip_runtime.h> #define THREADS 32 #define WIDTH 4 #define HEIGHT 2 // See times between copy and traspose and realize the same operation // with shared memory. // Traspose is slower than copy... // Traspose is need to shared memory to improve the performance... // https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ __global__ void copy(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idy * WIDTH + idx] = src[idy * WIDTH + idx]; // Copio tal cual con los mismos indices facil... :) } __global__ void copy_shared(int *src, int *dst) { __shared__ int mem[THREADS][THREADS + 1]; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; mem[threadIdx.x][threadIdx.y] = src[idy * WIDTH + idx]; // Aado el valor en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Aado en su posicion natural el valor de la memoria // compartida... } __global__ void traspose(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idx * HEIGHT + idy] = src[idy * WIDTH + idx]; // Cambio el valor de la matriz a la traspuesta // con los ndices de acceso a la matriz... } __global__ void traspose_shared(int *src, int *dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; __shared__ int mem[THREADS][THREADS + 1]; mem[threadIdx.x][threadIdx.y] = src[idx * HEIGHT + idy]; // Hago las posiciones traspuestas // en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Aado en su posicion natural el valor de la shared // que tiene el valor de la traspuesta.... } __global__ void matrixAddPitch (int *a, int *b, int*c, int pitch) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx > pitch || idy > HEIGHT) return; c[idy * pitch + idx] = a[idy * pitch + idx] + b[idy * pitch + idx]; } unsigned long get_time() { struct timespec ts; if (clock_gettime(0, &ts) < 0) { fprintf(stderr, "Error calc time... %s\n", strerror(errno)); exit(1); } return ts.tv_sec * 1000000000L + ts.tv_nsec; } void mi_malloc_int(int **i, int size) { *i = (int *) malloc( sizeof(int) * size); if (*i == NULL) { fprintf(stderr, "Error malloc %s\n", strerror(errno)); exit(1); } memset(*i, 0, sizeof(int) * size); } void init(int *h_v) { for (int i = 0; i < HEIGHT; i++) { for (int j = 0; j < WIDTH; ++j) { h_v[i * WIDTH + j] = i * WIDTH + j; } } } void print_matrix(const int *matrix, const int w, const int h) { fprintf(stdout, "%s\n", "Print matrix..."); for (int i = 0; i < h; i++) { for (int j = 0; j < w; ++j) { fprintf(stdout, "%5d", matrix[i * w + j]); } fprintf(stdout, "%s\n", ""); } fprintf(stdout, "%s\n", ""); } void addPitch() { int n = WIDTH * HEIGHT; dim3 t (16, 16); dim3 b ( (WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = sizeof(int) * n; size_t pitch; h_a = (int *) malloc (size); h_b = (int *) malloc (size); h_c = (int *) malloc (size); for (int i = 0; i < n; i++) { h_a[i] = i; h_b[i] = i; } hipMallocPitch(&d_a, &pitch, WIDTH * sizeof(int), HEIGHT); hipMallocPitch(&d_b, &pitch, WIDTH * sizeof(int), HEIGHT); hipMallocPitch(&d_c, &pitch, WIDTH * sizeof(int), HEIGHT); hipMemcpy2D (d_a, pitch, h_a, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, hipMemcpyHostToDevice); hipMemcpy2D (d_b, pitch, h_b, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, hipMemcpyHostToDevice); hipLaunchKernelGGL(( matrixAddPitch) , dim3(b), dim3(t), 0, 0, d_a, d_b, d_c, pitch / sizeof(int)); hipMemcpy2D (h_c, WIDTH * sizeof(int), d_c, pitch, WIDTH * sizeof(int), HEIGHT, hipMemcpyDeviceToHost); print_matrix(h_c, HEIGHT, WIDTH); free(h_a); free(h_b); free(h_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); } void traspose() { int *matrix = NULL; int *dev_matrix = NULL; int *dev_traspose = NULL; mi_malloc_int(&matrix, WIDTH * HEIGHT); init(matrix); print_matrix(matrix, WIDTH, HEIGHT); hipMalloc(&dev_matrix, sizeof(int) * WIDTH * HEIGHT); // &dst, size... hipMemcpy(dev_matrix, matrix, sizeof(int) * WIDTH * HEIGHT, hipMemcpyHostToDevice); // dst, src, size, hipMemcpyHostToDevice...; hipMalloc(&dev_traspose, sizeof(int) * WIDTH * HEIGHT); // &dst, size... hipMemset(dev_traspose, 0, sizeof(int) * WIDTH * HEIGHT); // dst, value byte 0, size... dim3 t(THREADS, THREADS); dim3 b((WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); // ... START PARARELL CODE ... unsigned long now = get_time(); hipLaunchKernelGGL(( traspose), dim3(b), dim3(t), 0, 0, dev_matrix, dev_traspose); // Call kernel << b , t >> (a , b); hipMemcpy(matrix, dev_traspose, sizeof(int) * WIDTH * HEIGHT, hipMemcpyDeviceToHost); // dest, src, size, hipMemcpyDeviceToHost; fprintf(stdout, "Time : %lf ms\n", (get_time() - now) / 1000000.0f); // ... END PARARELL CODE ... print_matrix(matrix, HEIGHT, WIDTH); fprintf(stdout, "Num Blocks (x:%d y:%d)\n", b.x, b.y); } int main(int argc, char const *argv[]) { traspose(); return 0; }
64b6bfabc27561365b57b49bc8ba0822658f3fa8.cu
#include <time.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <errno.h> #include <cuda.h> #define THREADS 32 #define WIDTH 4 #define HEIGHT 2 // See times between copy and traspose and realize the same operation // with shared memory. // Traspose is slower than copy... // Traspose is need to shared memory to improve the performance... // https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ __global__ void copy(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idy * WIDTH + idx] = src[idy * WIDTH + idx]; // Copio tal cual con los mismos indices facil... :) } __global__ void copy_shared(int *src, int *dst) { __shared__ int mem[THREADS][THREADS + 1]; int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; mem[threadIdx.x][threadIdx.y] = src[idy * WIDTH + idx]; // Añado el valor en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la memoria // compartida... } __global__ void traspose(int *src, int *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; dest[idx * HEIGHT + idy] = src[idy * WIDTH + idx]; // Cambio el valor de la matriz a la traspuesta // con los índices de acceso a la matriz... } __global__ void traspose_shared(int *src, int *dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= WIDTH || idy >= HEIGHT) return; __shared__ int mem[THREADS][THREADS + 1]; mem[threadIdx.x][threadIdx.y] = src[idx * HEIGHT + idy]; // Hago las posiciones traspuestas // en la memoria compartida... __syncthreads(); dst[idy * WIDTH + idx] = mem[threadIdx.x][threadIdx.y]; // Añado en su posicion natural el valor de la shared // que tiene el valor de la traspuesta.... } __global__ void matrixAddPitch (int *a, int *b, int*c, int pitch) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx > pitch || idy > HEIGHT) return; c[idy * pitch + idx] = a[idy * pitch + idx] + b[idy * pitch + idx]; } unsigned long get_time() { struct timespec ts; if (clock_gettime(0, &ts) < 0) { fprintf(stderr, "Error calc time... %s\n", strerror(errno)); exit(1); } return ts.tv_sec * 1000000000L + ts.tv_nsec; } void mi_malloc_int(int **i, int size) { *i = (int *) malloc( sizeof(int) * size); if (*i == NULL) { fprintf(stderr, "Error malloc %s\n", strerror(errno)); exit(1); } memset(*i, 0, sizeof(int) * size); } void init(int *h_v) { for (int i = 0; i < HEIGHT; i++) { for (int j = 0; j < WIDTH; ++j) { h_v[i * WIDTH + j] = i * WIDTH + j; } } } void print_matrix(const int *matrix, const int w, const int h) { fprintf(stdout, "%s\n", "Print matrix..."); for (int i = 0; i < h; i++) { for (int j = 0; j < w; ++j) { fprintf(stdout, "%5d", matrix[i * w + j]); } fprintf(stdout, "%s\n", ""); } fprintf(stdout, "%s\n", ""); } void addPitch() { int n = WIDTH * HEIGHT; dim3 t (16, 16); dim3 b ( (WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = sizeof(int) * n; size_t pitch; h_a = (int *) malloc (size); h_b = (int *) malloc (size); h_c = (int *) malloc (size); for (int i = 0; i < n; i++) { h_a[i] = i; h_b[i] = i; } cudaMallocPitch(&d_a, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMallocPitch(&d_b, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMallocPitch(&d_c, &pitch, WIDTH * sizeof(int), HEIGHT); cudaMemcpy2D (d_a, pitch, h_a, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, cudaMemcpyHostToDevice); cudaMemcpy2D (d_b, pitch, h_b, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, cudaMemcpyHostToDevice); matrixAddPitch <<<b, t>>> (d_a, d_b, d_c, pitch / sizeof(int)); cudaMemcpy2D (h_c, WIDTH * sizeof(int), d_c, pitch, WIDTH * sizeof(int), HEIGHT, cudaMemcpyDeviceToHost); print_matrix(h_c, HEIGHT, WIDTH); free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } void traspose() { int *matrix = NULL; int *dev_matrix = NULL; int *dev_traspose = NULL; mi_malloc_int(&matrix, WIDTH * HEIGHT); init(matrix); print_matrix(matrix, WIDTH, HEIGHT); cudaMalloc(&dev_matrix, sizeof(int) * WIDTH * HEIGHT); // &dst, size... cudaMemcpy(dev_matrix, matrix, sizeof(int) * WIDTH * HEIGHT, cudaMemcpyHostToDevice); // dst, src, size, cudaMemcpyHostToDevice...; cudaMalloc(&dev_traspose, sizeof(int) * WIDTH * HEIGHT); // &dst, size... cudaMemset(dev_traspose, 0, sizeof(int) * WIDTH * HEIGHT); // dst, value byte 0, size... dim3 t(THREADS, THREADS); dim3 b((WIDTH - 1) / t.x + 1, (HEIGHT - 1) / t.y + 1); // ... START PARARELL CODE ... unsigned long now = get_time(); traspose<<<b, t>>>(dev_matrix, dev_traspose); // Call kernel << b , t >> (a , b); cudaMemcpy(matrix, dev_traspose, sizeof(int) * WIDTH * HEIGHT, cudaMemcpyDeviceToHost); // dest, src, size, cudaMemcpyDeviceToHost; fprintf(stdout, "Time : %lf ms\n", (get_time() - now) / 1000000.0f); // ... END PARARELL CODE ... print_matrix(matrix, HEIGHT, WIDTH); fprintf(stdout, "Num Blocks (x:%d y:%d)\n", b.x, b.y); } int main(int argc, char const *argv[]) { traspose(); return 0; }
04850f6f1ee83df7dc918d05503f2e4a2a231516.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <string.h> #include "ising.h" // CUDA Kernel __global__ void computeMoment(int8_t *readArr, int8_t *writeArr, float *weightArr, int n, int tileSize){ int row_init = blockIdx.x*blockDim.x + threadIdx.x; int col_init = blockIdx.y*blockDim.y + threadIdx.y; // Assign each thread a tileSizeXtileSize tile for(int ii=0; ii<tileSize; ++ii){ for (int jj=0; jj<tileSize; ++jj){ int row = row_init + ii*tileSize; int col = col_init + jj*tileSize; // If coordinates are between boundaries // update the write array accordingly if(row < n && col < n){ float influence = 0.0f; for (int i=-2; i<3; i++) { for (int j=-2; j<3; j++) { //add extra n so that modulo behaves like mathematics modulo //that is return only positive values int y = (row+i+n)%n; int x = (col+j+n)%n; influence += weightArr[i*5 + j]*readArr[y*n + x]; } } writeArr[row*n + col] = readArr[row*n + col]; if (influence<-diff) writeArr[row*n + col] = -1; else if (influence>diff) writeArr[row*n + col] = 1; __syncthreads(); } } } } void ising(int8_t *G, float *w, int k, int n) { // Allocate memory for the 3 arrays with hipMallocManaged() // because they will be used inside the kernel // The return err values are for debugging only int8_t *readArr, *writeArr; hipError_t err1 = hipMallocManaged(&readArr, n*n*sizeof(int8_t)); hipError_t err2 = hipMallocManaged(&writeArr,n*n*sizeof(int8_t)); float *weightArr_d; hipError_t er3 = hipMallocManaged(&weightArr_d, 5*5*sizeof(float)); // Copy the contents of input arrays inside // the ones we will use inside kernel memcpy(readArr, G, n*n*sizeof(int8_t)); memcpy(weightArr_d, w, 5*5*sizeof(float)); //set valid indexes to [-2..2][-2..2] weightArr_d = &weightArr_d[2*5 + 2]; weightArr_d[0] = 0.0; // Define the thread tile size, that is the size of the block of // moments a single thread will calculate. Set it to 5x5 int tileSize = 5; for (int i=1; i<=k; i++) { // Create blocks of size 32x32 threads per block // The number of blocks will adjust to fit the input n dim3 dimBlock(32, 32); int gridSz = (n + 32*tileSize)/ 32*tileSize; dim3 dimGrid(gridSz, gridSz); // Run the kernel in GPU hipLaunchKernelGGL(( computeMoment), dim3(dimGrid), dim3(dimBlock), 0, 0, readArr, writeArr, weightArr_d, n, tileSize); // Uncomment below to check for launch errors //printf("%s\n", hipGetErrorString(hipGetLastError())); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Swap read and write arrays int8_t *temp = readArr; readArr = writeArr; writeArr = temp; } //The final result now is in readArr. Copy the contents // in array G memcpy(G, readArr, n*n*sizeof(int8_t)); hipFree( readArr ); hipFree( writeArr ); hipFree( weightArr_d ); }
04850f6f1ee83df7dc918d05503f2e4a2a231516.cu
#include <stdlib.h> #include <string.h> #include "ising.h" // CUDA Kernel __global__ void computeMoment(int8_t *readArr, int8_t *writeArr, float *weightArr, int n, int tileSize){ int row_init = blockIdx.x*blockDim.x + threadIdx.x; int col_init = blockIdx.y*blockDim.y + threadIdx.y; // Assign each thread a tileSizeXtileSize tile for(int ii=0; ii<tileSize; ++ii){ for (int jj=0; jj<tileSize; ++jj){ int row = row_init + ii*tileSize; int col = col_init + jj*tileSize; // If coordinates are between boundaries // update the write array accordingly if(row < n && col < n){ float influence = 0.0f; for (int i=-2; i<3; i++) { for (int j=-2; j<3; j++) { //add extra n so that modulo behaves like mathematics modulo //that is return only positive values int y = (row+i+n)%n; int x = (col+j+n)%n; influence += weightArr[i*5 + j]*readArr[y*n + x]; } } writeArr[row*n + col] = readArr[row*n + col]; if (influence<-diff) writeArr[row*n + col] = -1; else if (influence>diff) writeArr[row*n + col] = 1; __syncthreads(); } } } } void ising(int8_t *G, float *w, int k, int n) { // Allocate memory for the 3 arrays with cudaMallocManaged() // because they will be used inside the kernel // The return err values are for debugging only int8_t *readArr, *writeArr; cudaError_t err1 = cudaMallocManaged(&readArr, n*n*sizeof(int8_t)); cudaError_t err2 = cudaMallocManaged(&writeArr,n*n*sizeof(int8_t)); float *weightArr_d; cudaError_t er3 = cudaMallocManaged(&weightArr_d, 5*5*sizeof(float)); // Copy the contents of input arrays inside // the ones we will use inside kernel memcpy(readArr, G, n*n*sizeof(int8_t)); memcpy(weightArr_d, w, 5*5*sizeof(float)); //set valid indexes to [-2..2][-2..2] weightArr_d = &weightArr_d[2*5 + 2]; weightArr_d[0] = 0.0; // Define the thread tile size, that is the size of the block of // moments a single thread will calculate. Set it to 5x5 int tileSize = 5; for (int i=1; i<=k; i++) { // Create blocks of size 32x32 threads per block // The number of blocks will adjust to fit the input n dim3 dimBlock(32, 32); int gridSz = (n + 32*tileSize)/ 32*tileSize; dim3 dimGrid(gridSz, gridSz); // Run the kernel in GPU computeMoment<<<dimGrid, dimBlock>>> (readArr, writeArr, weightArr_d, n, tileSize); // Uncomment below to check for launch errors //printf("%s\n", cudaGetErrorString(cudaGetLastError())); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Swap read and write arrays int8_t *temp = readArr; readArr = writeArr; writeArr = temp; } //The final result now is in readArr. Copy the contents // in array G memcpy(G, readArr, n*n*sizeof(int8_t)); cudaFree( readArr ); cudaFree( writeArr ); cudaFree( weightArr_d ); }
64da6baf9079d508a7b9da0fd0e08e766ed68103.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/device_vector.h> #include <thrust/pair.h> #include <thrust/sequence.h> #include <thrust/execution_policy.h> #include "Utilities.cuh" #define DEBUG /******************/ /* LINSPACE - GPU */ /******************/ template <class T> T * d_linspace(const T a, const T b, const unsigned int N) { T *out_array; gpuErrchk(hipMalloc((void**)&out_array, N * sizeof(T))); T Dx = (b - a) / (T)(N - 1); thrust::device_ptr<T> d = thrust::device_pointer_cast(out_array); thrust::transform(thrust::make_counting_iterator(a / Dx), thrust::make_counting_iterator((b + static_cast<T>(1)) / Dx), thrust::make_constant_iterator(Dx), d, thrust::multiplies<T>()); gpuErrchk(hipMemcpy(&out_array[N - 1], &b, sizeof(T), hipMemcpyHostToDevice)); return out_array; } template float * d_linspace<float>(const float a, const float b, const unsigned int N); template double * d_linspace<double>(const double a, const double b, const unsigned int N); /******************/ /* LINSPACE - CPU */ /******************/ template <class T> T * h_linspace(const T a, const T b, const unsigned int N) { T *out_array = (T *)malloc(N * sizeof(T)); T Dx = (b - a) / (T)(N - 1); //thrust::device_ptr<T> d = thrust::device_pointer_cast(out_array); //thrust::transform(thrust::host, thrust::make_counting_iterator(a/Dx), thrust::make_counting_iterator((b+static_cast<T>(1))/Dx), thrust::make_constant_iterator(Dx), d, thrust::multiplies<T>()); //memcpy(&out_array[N - 1], &b, sizeof(T)); T temp = a / Dx; for (int i = 0; i < N; i++) out_array[i] = (temp + i) * Dx; return out_array; } template float * h_linspace<float>(const float a, const float b, const unsigned int N); template double * h_linspace<double>(const double a, const double b, const unsigned int N); /*******************/ /* MESHGRID KERNEL */ /*******************/ template <class T> __global__ void meshgrid_kernel(const T * __restrict__ x, const unsigned int Nx, const T * __restrict__ y, const unsigned int Ny, T * __restrict__ X, T * __restrict__ Y) { unsigned int tidx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tidy = blockIdx.y * blockDim.y + threadIdx.y; if ((tidx < Nx) && (tidy < Ny)) { X[tidy * Nx + tidx] = x[tidx]; Y[tidy * Nx + tidx] = y[tidy]; } } /******************/ /* MESHGRID - GPU */ /******************/ #define BLOCKSIZE_MESHGRID_X 16 #define BLOCKSIZE_MESHGRID_Y 16 template <class T> thrust::pair<T *, T *> d_meshgrid(const T *x, const unsigned int Nx, const T *y, const unsigned int Ny) { T *X; gpuErrchk(hipMalloc((void**)&X, Nx * Ny * sizeof(T))); T *Y; gpuErrchk(hipMalloc((void**)&Y, Nx * Ny * sizeof(T))); dim3 BlockSize(BLOCKSIZE_MESHGRID_X, BLOCKSIZE_MESHGRID_Y); dim3 GridSize(iDivUp(Nx, BLOCKSIZE_MESHGRID_X), iDivUp(Ny, BLOCKSIZE_MESHGRID_Y)); meshgrid_kernel << <GridSize, BlockSize >> >(x, Nx, y, Ny, X, Y); #ifdef DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif return thrust::make_pair(X, Y); } template thrust::pair<float *, float *> d_meshgrid<float>(const float *, const unsigned int, const float *, const unsigned int); template thrust::pair<double *, double *> d_meshgrid<double>(const double *, const unsigned int, const double *, const unsigned int); /******************/ /* MESHGRID - CPU */ /******************/ template <class T> thrust::pair<T *, T *> h_meshgrid(const T *x, const unsigned int Nx, const T *y, const unsigned int Ny) { T *X = (T *)malloc(Nx * Ny * sizeof(T)); T *Y = (T *)malloc(Nx * Ny * sizeof(T)); for (int j = 0; j < Ny; j++) for (int i = 0; i < Nx; i++) { X[j * Nx + i] = x[i]; Y[j * Nx + i] = y[j]; } return thrust::make_pair(X, Y); } template thrust::pair<float *, float *> h_meshgrid<float>(const float *, const unsigned int, const float *, const unsigned int); template thrust::pair<double *, double *> h_meshgrid<double>(const double *, const unsigned int, const double *, const unsigned int); /***************/ /* COLON - GPU */ /***************/ template <class T> T * d_colon(const T a, const T step, const T b) { int N = (int)((b - a) / step) + 1; T *out_array; gpuErrchk(hipMalloc((void**)&out_array, N * sizeof(T))); thrust::device_ptr<T> d = thrust::device_pointer_cast(out_array); thrust::sequence(d, d + N, a, step); return out_array; } template float * d_colon<float>(const float a, const float step, const float b); template double * d_colon<double>(const double a, const double step, const double b); /***************/ /* COLON - CPU */ /***************/ template <class T> T * h_colon(const T a, const T step, const T b) { int N = (int)((b - a) / step) + 1; T *out_array = (T *)malloc(N * sizeof(T)); thrust::device_ptr<T> d = thrust::device_pointer_cast(out_array); thrust::sequence(thrust::host, d, d + N, a, step); return out_array; } template float * h_colon<float>(const float a, const float step, const float b); template double * h_colon<double>(const double a, const double step, const double b); /*****************************/ /* GENERATE SYMMETRIC POINTS */ /*****************************/ template<class T> T * generateSymmetricPoints(const T step, const T b) { const int N = (int)(b / step) + 1; T *d_u; gpuErrchk(hipMalloc(&d_u, (2 * N - 1) * sizeof(T))); T *d_u_temp = d_colon(static_cast<T>(0), step, b); gpuErrchk(hipMemcpy(d_u + N - 1, d_u_temp, N * sizeof(T), hipMemcpyDeviceToDevice)); reverseArray(d_u_temp + 1, d_u, N - 1, static_cast<T>(-1)); gpuErrchk(hipFree(d_u_temp)); return d_u; } template float * generateSymmetricPoints<float>(const float, const float); template double * generateSymmetricPoints<double>(const double, const double);
64da6baf9079d508a7b9da0fd0e08e766ed68103.cu
#include <thrust/device_vector.h> #include <thrust/pair.h> #include <thrust/sequence.h> #include <thrust/execution_policy.h> #include "Utilities.cuh" #define DEBUG /******************/ /* LINSPACE - GPU */ /******************/ template <class T> T * d_linspace(const T a, const T b, const unsigned int N) { T *out_array; gpuErrchk(cudaMalloc((void**)&out_array, N * sizeof(T))); T Dx = (b - a) / (T)(N - 1); thrust::device_ptr<T> d = thrust::device_pointer_cast(out_array); thrust::transform(thrust::make_counting_iterator(a / Dx), thrust::make_counting_iterator((b + static_cast<T>(1)) / Dx), thrust::make_constant_iterator(Dx), d, thrust::multiplies<T>()); gpuErrchk(cudaMemcpy(&out_array[N - 1], &b, sizeof(T), cudaMemcpyHostToDevice)); return out_array; } template float * d_linspace<float>(const float a, const float b, const unsigned int N); template double * d_linspace<double>(const double a, const double b, const unsigned int N); /******************/ /* LINSPACE - CPU */ /******************/ template <class T> T * h_linspace(const T a, const T b, const unsigned int N) { T *out_array = (T *)malloc(N * sizeof(T)); T Dx = (b - a) / (T)(N - 1); //thrust::device_ptr<T> d = thrust::device_pointer_cast(out_array); //thrust::transform(thrust::host, thrust::make_counting_iterator(a/Dx), thrust::make_counting_iterator((b+static_cast<T>(1))/Dx), thrust::make_constant_iterator(Dx), d, thrust::multiplies<T>()); //memcpy(&out_array[N - 1], &b, sizeof(T)); T temp = a / Dx; for (int i = 0; i < N; i++) out_array[i] = (temp + i) * Dx; return out_array; } template float * h_linspace<float>(const float a, const float b, const unsigned int N); template double * h_linspace<double>(const double a, const double b, const unsigned int N); /*******************/ /* MESHGRID KERNEL */ /*******************/ template <class T> __global__ void meshgrid_kernel(const T * __restrict__ x, const unsigned int Nx, const T * __restrict__ y, const unsigned int Ny, T * __restrict__ X, T * __restrict__ Y) { unsigned int tidx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tidy = blockIdx.y * blockDim.y + threadIdx.y; if ((tidx < Nx) && (tidy < Ny)) { X[tidy * Nx + tidx] = x[tidx]; Y[tidy * Nx + tidx] = y[tidy]; } } /******************/ /* MESHGRID - GPU */ /******************/ #define BLOCKSIZE_MESHGRID_X 16 #define BLOCKSIZE_MESHGRID_Y 16 template <class T> thrust::pair<T *, T *> d_meshgrid(const T *x, const unsigned int Nx, const T *y, const unsigned int Ny) { T *X; gpuErrchk(cudaMalloc((void**)&X, Nx * Ny * sizeof(T))); T *Y; gpuErrchk(cudaMalloc((void**)&Y, Nx * Ny * sizeof(T))); dim3 BlockSize(BLOCKSIZE_MESHGRID_X, BLOCKSIZE_MESHGRID_Y); dim3 GridSize(iDivUp(Nx, BLOCKSIZE_MESHGRID_X), iDivUp(Ny, BLOCKSIZE_MESHGRID_Y)); meshgrid_kernel << <GridSize, BlockSize >> >(x, Nx, y, Ny, X, Y); #ifdef DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif return thrust::make_pair(X, Y); } template thrust::pair<float *, float *> d_meshgrid<float>(const float *, const unsigned int, const float *, const unsigned int); template thrust::pair<double *, double *> d_meshgrid<double>(const double *, const unsigned int, const double *, const unsigned int); /******************/ /* MESHGRID - CPU */ /******************/ template <class T> thrust::pair<T *, T *> h_meshgrid(const T *x, const unsigned int Nx, const T *y, const unsigned int Ny) { T *X = (T *)malloc(Nx * Ny * sizeof(T)); T *Y = (T *)malloc(Nx * Ny * sizeof(T)); for (int j = 0; j < Ny; j++) for (int i = 0; i < Nx; i++) { X[j * Nx + i] = x[i]; Y[j * Nx + i] = y[j]; } return thrust::make_pair(X, Y); } template thrust::pair<float *, float *> h_meshgrid<float>(const float *, const unsigned int, const float *, const unsigned int); template thrust::pair<double *, double *> h_meshgrid<double>(const double *, const unsigned int, const double *, const unsigned int); /***************/ /* COLON - GPU */ /***************/ template <class T> T * d_colon(const T a, const T step, const T b) { int N = (int)((b - a) / step) + 1; T *out_array; gpuErrchk(cudaMalloc((void**)&out_array, N * sizeof(T))); thrust::device_ptr<T> d = thrust::device_pointer_cast(out_array); thrust::sequence(d, d + N, a, step); return out_array; } template float * d_colon<float>(const float a, const float step, const float b); template double * d_colon<double>(const double a, const double step, const double b); /***************/ /* COLON - CPU */ /***************/ template <class T> T * h_colon(const T a, const T step, const T b) { int N = (int)((b - a) / step) + 1; T *out_array = (T *)malloc(N * sizeof(T)); thrust::device_ptr<T> d = thrust::device_pointer_cast(out_array); thrust::sequence(thrust::host, d, d + N, a, step); return out_array; } template float * h_colon<float>(const float a, const float step, const float b); template double * h_colon<double>(const double a, const double step, const double b); /*****************************/ /* GENERATE SYMMETRIC POINTS */ /*****************************/ template<class T> T * generateSymmetricPoints(const T step, const T b) { const int N = (int)(b / step) + 1; T *d_u; gpuErrchk(cudaMalloc(&d_u, (2 * N - 1) * sizeof(T))); T *d_u_temp = d_colon(static_cast<T>(0), step, b); gpuErrchk(cudaMemcpy(d_u + N - 1, d_u_temp, N * sizeof(T), cudaMemcpyDeviceToDevice)); reverseArray(d_u_temp + 1, d_u, N - 1, static_cast<T>(-1)); gpuErrchk(cudaFree(d_u_temp)); return d_u; } template float * generateSymmetricPoints<float>(const float, const float); template double * generateSymmetricPoints<double>(const double, const double);
28a18391e853fe512af9010db39f208a5e1faeae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <time.h> #include <random> #include "kernels_hip.cuh" int main() { unsigned int n = 16; int *h_x; int *d_x; int *h_root; int *d_root; int *h_child; int *d_child; // allocate memory h_x = (int*)malloc(n*sizeof(int)); h_root = (int*)malloc(sizeof(int)); h_child = (int*)malloc(2*(n+1)*sizeof(int)); hipMalloc((void**)&d_root, sizeof(int)); hipMalloc((void**)&d_x, n*sizeof(int)); hipMalloc((void**)&d_child, 2*(n+1)*sizeof(int)); hipMemset(d_child, -1, 2*(n+1)*sizeof(int)); // fill h_temp and h_x arrays for(unsigned int i=0;i<n;i++){ h_x[i] = i+1; } for(unsigned int i=0;i<n;i++){ unsigned int j = random() % (n-i); int temp = h_x[i]; h_x[i] = h_x[i+j]; h_x[i+j] = temp; } *h_root = h_x[0]; for(unsigned int i=0;i<n;i++){ std::cout<<h_x[i]<<" "; } std::cout<<""<<std::endl; // copy data to device hipMemcpy(d_root, h_root, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_x, h_x, n*sizeof(int), hipMemcpyHostToDevice); // kernel call dim3 gridSize = 4; dim3 blockSize = 4; hipLaunchKernelGGL(( build_binary_tree), dim3(gridSize), dim3(blockSize), 0, 0, d_x, d_child, d_root, n); // copy from device back to host hipMemcpy(h_child, d_child, 2*(n+1)*sizeof(int), hipMemcpyDeviceToHost); // print tree for(unsigned int i=0;i<2*(n+1);i++){ std::cout<<h_child[i]<<" "; } std::cout<<""<<std::endl; // free memory free(h_x); free(h_root); free(h_child); hipFree(d_x); hipFree(d_root); hipFree(d_child); }
28a18391e853fe512af9010db39f208a5e1faeae.cu
#include <iostream> #include <time.h> #include <random> #include "kernels.cuh" int main() { unsigned int n = 16; int *h_x; int *d_x; int *h_root; int *d_root; int *h_child; int *d_child; // allocate memory h_x = (int*)malloc(n*sizeof(int)); h_root = (int*)malloc(sizeof(int)); h_child = (int*)malloc(2*(n+1)*sizeof(int)); cudaMalloc((void**)&d_root, sizeof(int)); cudaMalloc((void**)&d_x, n*sizeof(int)); cudaMalloc((void**)&d_child, 2*(n+1)*sizeof(int)); cudaMemset(d_child, -1, 2*(n+1)*sizeof(int)); // fill h_temp and h_x arrays for(unsigned int i=0;i<n;i++){ h_x[i] = i+1; } for(unsigned int i=0;i<n;i++){ unsigned int j = random() % (n-i); int temp = h_x[i]; h_x[i] = h_x[i+j]; h_x[i+j] = temp; } *h_root = h_x[0]; for(unsigned int i=0;i<n;i++){ std::cout<<h_x[i]<<" "; } std::cout<<""<<std::endl; // copy data to device cudaMemcpy(d_root, h_root, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_x, h_x, n*sizeof(int), cudaMemcpyHostToDevice); // kernel call dim3 gridSize = 4; dim3 blockSize = 4; build_binary_tree<<< gridSize, blockSize>>>(d_x, d_child, d_root, n); // copy from device back to host cudaMemcpy(h_child, d_child, 2*(n+1)*sizeof(int), cudaMemcpyDeviceToHost); // print tree for(unsigned int i=0;i<2*(n+1);i++){ std::cout<<h_child[i]<<" "; } std::cout<<""<<std::endl; // free memory free(h_x); free(h_root); free(h_child); cudaFree(d_x); cudaFree(d_root); cudaFree(d_child); }
2495cf9d5d0c6f77a5f2d0a1e96f2d684e800c47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlange.cu normal z -> d, Tue Sep 2 12:38:15 2014 @author Mark Gates */ #include "common_magma.h" /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:m-1, for || A ||_inf, * where m and n are any size. * Has ceil( m/64 ) blocks of 64 threads. Each thread does one row. */ extern "C" __global__ void dlange_inf_kernel( int m, int n, const double *A, int lda, double *dwork ) { int i = blockIdx.x*64 + threadIdx.x; double Cb[4] = {0, 0, 0, 0}; int n_mod_4 = n % 4; n -= n_mod_4; // if beyond last row, skip row if ( i < m ) { A += i; if ( n >= 4 ) { const double *Aend = A + lda*n; double rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] }; A += 4*lda; while( A < Aend ) { Cb[0] += fabs( rA[0] ); rA[0] = A[0]; Cb[1] += fabs( rA[1] ); rA[1] = A[lda]; Cb[2] += fabs( rA[2] ); rA[2] = A[2*lda]; Cb[3] += fabs( rA[3] ); rA[3] = A[3*lda]; A += 4*lda; } Cb[0] += fabs( rA[0] ); Cb[1] += fabs( rA[1] ); Cb[2] += fabs( rA[2] ); Cb[3] += fabs( rA[3] ); } /* clean up code */ switch( n_mod_4 ) { case 0: break; case 1: Cb[0] += fabs( A[0] ); break; case 2: Cb[0] += fabs( A[0] ); Cb[1] += fabs( A[lda] ); break; case 3: Cb[0] += fabs( A[0] ); Cb[1] += fabs( A[lda] ); Cb[2] += fabs( A[2*lda] ); break; } /* compute final result */ dwork[i] = Cb[0] + Cb[1] + Cb[2] + Cb[3]; } } /** Purpose ------- DLANGE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real matrix A. Description ----------- DLANGE returns the value DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ** not yet supported ( ( norm1(A), NORM = '1', 'O' or 'o' ** not yet supported ( ( normI(A), NORM = 'I' or 'i' ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments --------- @param[in] norm CHARACTER*1 Specifies the value to be returned in DLANGE as described above. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. When M = 0, DLANGE is set to zero. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. When N = 0, DLANGE is set to zero. @param[in] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) The m by n matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(M,1). @param dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= M when NORM = 'I'; otherwise, WORK is not referenced. @ingroup magma_daux2 ********************************************************************/ extern "C" double magmablas_dlange( magma_norm_t norm, magma_int_t m, magma_int_t n, const double *A, magma_int_t lda, double *dwork ) { magma_int_t info = 0; if ( norm != MagmaInfNorm ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < m ) info = -5; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( m == 0 || n == 0 ) return 0; dim3 threads( 64 ); dim3 grid( (m-1)/64 + 1 ); hipLaunchKernelGGL(( dlange_inf_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda, dwork ); int i = magma_idamax( m, dwork, 1 ) - 1; double res; hipMemcpy( &res, &dwork[i], sizeof(double), hipMemcpyDeviceToHost ); return res; }
2495cf9d5d0c6f77a5f2d0a1e96f2d684e800c47.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlange.cu normal z -> d, Tue Sep 2 12:38:15 2014 @author Mark Gates */ #include "common_magma.h" /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:m-1, for || A ||_inf, * where m and n are any size. * Has ceil( m/64 ) blocks of 64 threads. Each thread does one row. */ extern "C" __global__ void dlange_inf_kernel( int m, int n, const double *A, int lda, double *dwork ) { int i = blockIdx.x*64 + threadIdx.x; double Cb[4] = {0, 0, 0, 0}; int n_mod_4 = n % 4; n -= n_mod_4; // if beyond last row, skip row if ( i < m ) { A += i; if ( n >= 4 ) { const double *Aend = A + lda*n; double rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] }; A += 4*lda; while( A < Aend ) { Cb[0] += fabs( rA[0] ); rA[0] = A[0]; Cb[1] += fabs( rA[1] ); rA[1] = A[lda]; Cb[2] += fabs( rA[2] ); rA[2] = A[2*lda]; Cb[3] += fabs( rA[3] ); rA[3] = A[3*lda]; A += 4*lda; } Cb[0] += fabs( rA[0] ); Cb[1] += fabs( rA[1] ); Cb[2] += fabs( rA[2] ); Cb[3] += fabs( rA[3] ); } /* clean up code */ switch( n_mod_4 ) { case 0: break; case 1: Cb[0] += fabs( A[0] ); break; case 2: Cb[0] += fabs( A[0] ); Cb[1] += fabs( A[lda] ); break; case 3: Cb[0] += fabs( A[0] ); Cb[1] += fabs( A[lda] ); Cb[2] += fabs( A[2*lda] ); break; } /* compute final result */ dwork[i] = Cb[0] + Cb[1] + Cb[2] + Cb[3]; } } /** Purpose ------- DLANGE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real matrix A. Description ----------- DLANGE returns the value DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ** not yet supported ( ( norm1(A), NORM = '1', 'O' or 'o' ** not yet supported ( ( normI(A), NORM = 'I' or 'i' ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments --------- @param[in] norm CHARACTER*1 Specifies the value to be returned in DLANGE as described above. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. When M = 0, DLANGE is set to zero. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. When N = 0, DLANGE is set to zero. @param[in] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) The m by n matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(M,1). @param dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= M when NORM = 'I'; otherwise, WORK is not referenced. @ingroup magma_daux2 ********************************************************************/ extern "C" double magmablas_dlange( magma_norm_t norm, magma_int_t m, magma_int_t n, const double *A, magma_int_t lda, double *dwork ) { magma_int_t info = 0; if ( norm != MagmaInfNorm ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < m ) info = -5; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( m == 0 || n == 0 ) return 0; dim3 threads( 64 ); dim3 grid( (m-1)/64 + 1 ); dlange_inf_kernel<<< grid, threads, 0, magma_stream >>>( m, n, A, lda, dwork ); int i = magma_idamax( m, dwork, 1 ) - 1; double res; cudaMemcpy( &res, &dwork[i], sizeof(double), cudaMemcpyDeviceToHost ); return res; }
9a1c29e690a9cc9568e83e75d842b01fa2b5ce69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void Final_Hello() {printf("Final_Hello");} int main() {hipLaunchKernelGGL(( Final_Hello), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); }
9a1c29e690a9cc9568e83e75d842b01fa2b5ce69.cu
#include <stdio.h> __global__ void Final_Hello() {printf("Final_Hello");} int main() { Final_Hello<<<1,1>>>(); cudaDeviceSynchronize(); }
97759542f678f1496ab2efe8cc7e594c56c9ae21.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef _BICUBICTEXTURE_CU_ #define _BICUBICTEXTURE_CU_ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <helper_math.h> // includes, cuda #include <helper_cuda.h> typedef unsigned int uint; typedef unsigned char uchar; #include "bicubicTexture_kernel.cuh" hipArray *d_imageArray = 0; extern "C" void initTexture(int imageWidth, int imageHeight, uchar *h_data) { // allocate array and copy image data hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned); checkCudaErrors(hipMallocArray(&d_imageArray, &channelDesc, imageWidth, imageHeight)); uint size = imageWidth * imageHeight * sizeof(uchar); checkCudaErrors(hipMemcpyToArray(d_imageArray, 0, 0, h_data, size, hipMemcpyHostToDevice)); free(h_data); // set texture parameters tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; tex.filterMode = hipFilterModeLinear; tex.normalized = false; // access with integer texture coordinates getLastCudaError("initTexture"); // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(tex, d_imageArray)); // bind same array to 2nd texture reference with point sampling tex2.addressMode[0] = hipAddressModeClamp; tex2.addressMode[1] = hipAddressModeClamp; tex2.filterMode = hipFilterModePoint; tex2.normalized = false; // access with integer texture coordinates checkCudaErrors(hipBindTextureToArray(tex2, d_imageArray)); } extern "C" void freeTexture() { checkCudaErrors(hipFreeArray(d_imageArray)); } // render image using CUDA extern "C" void render(int width, int height, float tx, float ty, float scale, float cx, float cy, dim3 blockSize, dim3 gridSize, int filter_mode, uchar4 *output) { // call CUDA kernel, writing results to PBO memory switch (filter_mode) { case MODE_NEAREST: tex.filterMode = hipFilterModePoint; d_render << <gridSize, blockSize >> > (output, width, height, tx, ty, scale, cx, cy); break; case MODE_BILINEAR: tex.filterMode = hipFilterModeLinear; d_render << <gridSize, blockSize >> > (output, width, height, tx, ty, scale, cx, cy); break; case MODE_BICUBIC: tex.filterMode = hipFilterModePoint; d_renderBicubic << <gridSize, blockSize >> > (output, width, height, tx, ty, scale, cx, cy); break; case MODE_FAST_BICUBIC: tex.filterMode = hipFilterModeLinear; d_renderFastBicubic << <gridSize, blockSize >> > (output, width, height, tx, ty, scale, cx, cy); break; case MODE_CATROM: tex.filterMode = hipFilterModePoint; d_renderCatRom << <gridSize, blockSize >> > (output, width, height, tx, ty, scale, cx, cy); break; } getLastCudaError("kernel failed"); } #endif
97759542f678f1496ab2efe8cc7e594c56c9ae21.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef _BICUBICTEXTURE_CU_ #define _BICUBICTEXTURE_CU_ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <helper_math.h> // includes, cuda #include <helper_cuda.h> typedef unsigned int uint; typedef unsigned char uchar; #include "bicubicTexture_kernel.cuh" cudaArray *d_imageArray = 0; extern "C" void initTexture(int imageWidth, int imageHeight, uchar *h_data) { // allocate array and copy image data cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned); checkCudaErrors(cudaMallocArray(&d_imageArray, &channelDesc, imageWidth, imageHeight)); uint size = imageWidth * imageHeight * sizeof(uchar); checkCudaErrors(cudaMemcpyToArray(d_imageArray, 0, 0, h_data, size, cudaMemcpyHostToDevice)); free(h_data); // set texture parameters tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; tex.filterMode = cudaFilterModeLinear; tex.normalized = false; // access with integer texture coordinates getLastCudaError("initTexture"); // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(tex, d_imageArray)); // bind same array to 2nd texture reference with point sampling tex2.addressMode[0] = cudaAddressModeClamp; tex2.addressMode[1] = cudaAddressModeClamp; tex2.filterMode = cudaFilterModePoint; tex2.normalized = false; // access with integer texture coordinates checkCudaErrors(cudaBindTextureToArray(tex2, d_imageArray)); } extern "C" void freeTexture() { checkCudaErrors(cudaFreeArray(d_imageArray)); } // render image using CUDA extern "C" void render(int width, int height, float tx, float ty, float scale, float cx, float cy, dim3 blockSize, dim3 gridSize, int filter_mode, uchar4 *output) { // call CUDA kernel, writing results to PBO memory switch (filter_mode) { case MODE_NEAREST: tex.filterMode = cudaFilterModePoint; d_render << <gridSize, blockSize >> > (output, width, height, tx, ty, scale, cx, cy); break; case MODE_BILINEAR: tex.filterMode = cudaFilterModeLinear; d_render << <gridSize, blockSize >> > (output, width, height, tx, ty, scale, cx, cy); break; case MODE_BICUBIC: tex.filterMode = cudaFilterModePoint; d_renderBicubic << <gridSize, blockSize >> > (output, width, height, tx, ty, scale, cx, cy); break; case MODE_FAST_BICUBIC: tex.filterMode = cudaFilterModeLinear; d_renderFastBicubic << <gridSize, blockSize >> > (output, width, height, tx, ty, scale, cx, cy); break; case MODE_CATROM: tex.filterMode = cudaFilterModePoint; d_renderCatRom << <gridSize, blockSize >> > (output, width, height, tx, ty, scale, cx, cy); break; } getLastCudaError("kernel failed"); } #endif
70ce9bb52f2684aa94c791670ff17ecff631120e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_exp.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; hipMalloc(&result, XSIZE*YSIZE); double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_exp), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_exp), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_exp), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
70ce9bb52f2684aa94c791670ff17ecff631120e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_exp.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_exp<<<gridBlock,threadBlock>>>(n,result,x); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_exp<<<gridBlock,threadBlock>>>(n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_exp<<<gridBlock,threadBlock>>>(n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5321126b5c8c586821c193baf26897caecf6cc71.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/div_rtn.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/im2col.cuh> #include <ATen/native/im2col_shape_check.h> namespace at { namespace native { namespace { void col2im_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { TensorArg input_arg{input_, "input", 1}; TensorArg output_arg{output, "output", 2}; checkAllSameGPU("col2im_out_cuda", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); TORCH_CHECK( kernel_size.size() == 2, "It is expected kernel_size equals to 2, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 2, "It is expected dilation equals to 2, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 2, "It is expected padding equals to 2, but got size ", padding.size()); TORCH_CHECK( stride.size() == 2, "It is expected stride equals to 2, but got size ", stride.size()); int64_t output_height = output_size[0]; int64_t output_width = output_size[1]; int64_t kernel_height = kernel_size[0]; int64_t kernel_width = kernel_size[1]; int64_t dilation_height = dilation[0]; int64_t dilation_width = dilation[1]; int64_t pad_height = padding[0]; int64_t pad_width = padding[1]; int64_t stride_height = stride[0]; int64_t stride_width = stride[1]; col2im_shape_check( input_, Tensor(), output_height, output_width, kernel_height, kernel_width, dilation_height, dilation_width, pad_height, pad_width, stride_height, stride_width); Tensor input = input_.contiguous(); bool batched_input = true; if (input.dim() == 2) { // Force batch batched_input = false; input.resize_({1, input.size(0), input.size(1)}); } int64_t batch_size = input.size(0); int64_t n_input_plane = input.size(1); int64_t n_output_plane = n_input_plane / (kernel_width * kernel_height); output.resize_({batch_size, n_output_plane, output_height, output_width}); output.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "col2im_out_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; Tensor input_n; Tensor output_n; int64_t height_col = (output_height + 2 * pad_height - (dilation_height * (kernel_height - 1) + 1)) / stride_height + 1; int64_t width_col = (output_width + 2 * pad_width - (dilation_width * (kernel_width - 1) + 1)) / stride_width + 1; for (int64_t elt = 0; elt < batch_size; elt++) { input_n = input.select(0, elt); output_n = output.select(0, elt); col2im<scalar_t, accscalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_n.data_ptr<scalar_t>(), n_output_plane, output_height, output_width, height_col, width_col, kernel_height, kernel_width, pad_height, pad_width, stride_height, stride_width, dilation_height, dilation_width, output_n.data_ptr<scalar_t>()); } if (!batched_input) { output.resize_({n_output_plane, output_height, output_width}); } }); } void col2im_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { // im2col_out_cuda checks size of kernel_size, dilation, padding and stride at::native::im2col_out_cuda( grad_output, kernel_size, dilation, padding, stride, grad_input); } } // namespace Tensor& col2im_out_cuda(const Tensor& input, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor& output) { col2im_out_cuda_template( output, input, output_size, kernel_size, dilation, padding, stride); return output; } Tensor col2im_cuda( const Tensor& input, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); col2im_out_cuda_template( output, input, output_size, kernel_size, dilation, padding, stride); return output; } Tensor& col2im_backward_out_cuda(const Tensor& grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor& grad_input) { col2im_backward_out_cuda_template( grad_input, grad_output, kernel_size, dilation, padding, stride); return grad_input; } Tensor col2im_backward_cuda( const Tensor& grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); col2im_backward_out_cuda_template( grad_input, grad_output, kernel_size, dilation, padding, stride); return grad_input; } } // namespace native } // namespace at
5321126b5c8c586821c193baf26897caecf6cc71.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/div_rtn.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/im2col.cuh> #include <ATen/native/im2col_shape_check.h> namespace at { namespace native { namespace { void col2im_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { TensorArg input_arg{input_, "input", 1}; TensorArg output_arg{output, "output", 2}; checkAllSameGPU("col2im_out_cuda", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); TORCH_CHECK( kernel_size.size() == 2, "It is expected kernel_size equals to 2, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 2, "It is expected dilation equals to 2, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 2, "It is expected padding equals to 2, but got size ", padding.size()); TORCH_CHECK( stride.size() == 2, "It is expected stride equals to 2, but got size ", stride.size()); int64_t output_height = output_size[0]; int64_t output_width = output_size[1]; int64_t kernel_height = kernel_size[0]; int64_t kernel_width = kernel_size[1]; int64_t dilation_height = dilation[0]; int64_t dilation_width = dilation[1]; int64_t pad_height = padding[0]; int64_t pad_width = padding[1]; int64_t stride_height = stride[0]; int64_t stride_width = stride[1]; col2im_shape_check( input_, Tensor(), output_height, output_width, kernel_height, kernel_width, dilation_height, dilation_width, pad_height, pad_width, stride_height, stride_width); Tensor input = input_.contiguous(); bool batched_input = true; if (input.dim() == 2) { // Force batch batched_input = false; input.resize_({1, input.size(0), input.size(1)}); } int64_t batch_size = input.size(0); int64_t n_input_plane = input.size(1); int64_t n_output_plane = n_input_plane / (kernel_width * kernel_height); output.resize_({batch_size, n_output_plane, output_height, output_width}); output.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "col2im_out_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; Tensor input_n; Tensor output_n; int64_t height_col = (output_height + 2 * pad_height - (dilation_height * (kernel_height - 1) + 1)) / stride_height + 1; int64_t width_col = (output_width + 2 * pad_width - (dilation_width * (kernel_width - 1) + 1)) / stride_width + 1; for (int64_t elt = 0; elt < batch_size; elt++) { input_n = input.select(0, elt); output_n = output.select(0, elt); col2im<scalar_t, accscalar_t>( at::cuda::getCurrentCUDAStream(), input_n.data_ptr<scalar_t>(), n_output_plane, output_height, output_width, height_col, width_col, kernel_height, kernel_width, pad_height, pad_width, stride_height, stride_width, dilation_height, dilation_width, output_n.data_ptr<scalar_t>()); } if (!batched_input) { output.resize_({n_output_plane, output_height, output_width}); } }); } void col2im_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { // im2col_out_cuda checks size of kernel_size, dilation, padding and stride at::native::im2col_out_cuda( grad_output, kernel_size, dilation, padding, stride, grad_input); } } // namespace Tensor& col2im_out_cuda(const Tensor& input, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor& output) { col2im_out_cuda_template( output, input, output_size, kernel_size, dilation, padding, stride); return output; } Tensor col2im_cuda( const Tensor& input, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); col2im_out_cuda_template( output, input, output_size, kernel_size, dilation, padding, stride); return output; } Tensor& col2im_backward_out_cuda(const Tensor& grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor& grad_input) { col2im_backward_out_cuda_template( grad_input, grad_output, kernel_size, dilation, padding, stride); return grad_input; } Tensor col2im_backward_cuda( const Tensor& grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); col2im_backward_out_cuda_template( grad_input, grad_output, kernel_size, dilation, padding, stride); return grad_input; } } // namespace native } // namespace at
549771938f5ea581f1fe903ec9ebbbc6efc0cab1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zgeadd.cu, normal z -> c, Thu Oct 8 23:05:31 2020 @author Mark Gates */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to claset. */ __global__ void cgeadd_full( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex *dA, int ldda, magmaFloatComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb]; } } } } /***************************************************************************//** Purpose ------- ZGEADD adds two matrices, dB = alpha*dA + dB. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] alpha COMPLEX The scalar alpha. @param[in] dA COMPLEX array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in,out] dB COMPLEX array, dimension (LDDB,N) The m by n matrix dB. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_geadd *******************************************************************************/ extern "C" void magmablas_cgeadd( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dB, magma_int_t lddb, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 ) return; dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) ); hipLaunchKernelGGL(( cgeadd_full), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, dA, ldda, dB, lddb ); }
549771938f5ea581f1fe903ec9ebbbc6efc0cab1.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zgeadd.cu, normal z -> c, Thu Oct 8 23:05:31 2020 @author Mark Gates */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to claset. */ __global__ void cgeadd_full( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex *dA, int ldda, magmaFloatComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb]; } } } } /***************************************************************************//** Purpose ------- ZGEADD adds two matrices, dB = alpha*dA + dB. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] alpha COMPLEX The scalar alpha. @param[in] dA COMPLEX array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in,out] dB COMPLEX array, dimension (LDDB,N) The m by n matrix dB. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_geadd *******************************************************************************/ extern "C" void magmablas_cgeadd( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dB, magma_int_t lddb, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 ) return; dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) ); cgeadd_full<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, alpha, dA, ldda, dB, lddb ); }
c377bdfe7a36ee3fa4f1574019a6d7e72fa16282.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include <assert.h> extern "C" { #include "blas.h" #include "hip/hip_runtime.h" #include "utils.h" } __global__ void deconv_transpose_weights_kernels(float* weights, int spatial_size, int channels) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= channels) return; float swap; int j; for (j = 0; j < spatial_size/2; ++j) { swap = weights[i * spatial_size + j]; weights[i * spatial_size + j] = weights[i * spatial_size + spatial_size - 1 - j]; weights[i * spatial_size + spatial_size - 1 - j] = swap; } } void deconv_transpose_weights(float* weights, int spatial_size, int channels) { hipLaunchKernelGGL(( deconv_transpose_weights_kernels), dim3(cuda_gridsize(channels)), dim3(BLOCK), 0, 0, weights, spatial_size, channels); check_error(hipPeekAtLastError()); } __global__ void stretch_fill_3d_kernel(float* x, float* y, int w, int pad_w, int h, int pad_h, int c, int pad_c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int ow = w + (w - 1) * pad_w; int oh = h + (h - 1) * pad_h; int oc = c + (c - 1) * pad_c; if (i >= ow * oh * oc) return; int iy = i; int iw = i % ow; i = (i - iw) / ow; int ih = i % oh; i = (i - ih) / oh; int ic = i; if (iw % (pad_w + 1) || ih % (pad_h + 1) || ic % (pad_c + 1)) { y[iy] = 0; } else { iw = iw / (pad_w + 1); ih = ih / (pad_h + 1); ic = ic / (pad_c + 1); y[iy] = x[ic * w * h + ih * w + iw]; } } void stretch_fill_3d_gpu(float* x, float* y, int w, int pad_w, int h, int pad_h, int c, int pad_c) { //stretch x, save to y int ow = w + (w - 1) * pad_w; int oh = h + (h - 1) * pad_h; int oc = c + (c - 1) * pad_c; hipLaunchKernelGGL(( stretch_fill_3d_kernel), dim3(cuda_gridsize(ow * oh * oc)), dim3(BLOCK), 0, 0, x, y, w, pad_w, h, pad_h, c, pad_c); check_error(hipPeekAtLastError()); } __global__ void squeeze_fill_3d_kernel(float* x, float* y, int w, int pad_w, int h, int pad_h, int c, int pad_c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= w * h * c) return; int iy = i; int iw = i % w; i = (i - iw) / w; int ih = i % h; i = (i - ih) / h; int ic = i; int ow = w + (w - 1) * pad_w; int oh = h + (h - 1) * pad_h; iw = iw * (pad_w + 1); ih = ih * (pad_h + 1); ic = ic * (pad_c + 1); y[iy] += x[ic * ow * oh + ih * ow + iw]; } void squeeze_fill_3d_gpu(float* x, float* y, int w, int pad_w, int h, int pad_h, int c, int pad_c) { //squeeze x, save to y hipLaunchKernelGGL(( squeeze_fill_3d_kernel), dim3(cuda_gridsize(w * h * c)), dim3(BLOCK), 0, 0, x, y, w, pad_w, h, pad_h, c, pad_c); check_error(hipPeekAtLastError()); } __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); hipLaunchKernelGGL(( scale_bias_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, biases, n, size); check_error(hipPeekAtLastError()); } __global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index]*x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { hipLaunchKernelGGL(( backward_scale_kernel), dim3(n), dim3(BLOCK), 0, 0, x_norm, delta, batch, n, size, scale_updates); check_error(hipPeekAtLastError()); } __global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; output[(k*n+j)*size + i] += biases[j]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { int num = n*size*batch; hipLaunchKernelGGL(( add_bias_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, output, biases, batch, n, size); check_error(hipPeekAtLastError()); } __global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; int b; float sum = 0; for(b = 0; b < batch; ++b){ int i = b*n + index; sum += delta[i]; } bias_updates[index] += sum; } __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i]; } } void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) { if(size == 1){ hipLaunchKernelGGL(( backward_bias_conn_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n); }else{ hipLaunchKernelGGL(( backward_bias_kernel), dim3(n), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n, size); } check_error(hipPeekAtLastError()); } __global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; float mhat = m[index] / (1.f - powf(B1, t)); float vhat = v[index] / (1.f - powf(B2, t)); x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps); } extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { hipLaunchKernelGGL(( adam_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, x, m, v, B1, B2, rate, eps, t); check_error(hipPeekAtLastError()); } extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_gpu(n, B1, m, 1); scal_gpu(n, B2, v, 1); axpy_gpu(n, -decay*batch, w, 1, d, 1); axpy_gpu(n, (1-B1), d, 1, m, 1); mul_gpu(n, d, 1, d, 1); axpy_gpu(n, (1-B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_gpu(n, 0, d, 1); } __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f)); } __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch); } extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { size_t N = batch*filters*spatial; hipLaunchKernelGGL(( normalize_delta_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); check_error(hipPeekAtLastError()); } __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; variance_delta[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance_delta[i] += delta[index]*(x[index] - mean[i]); } } variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f)); } __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } __syncthreads(); if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f)); } } __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; } } __syncthreads(); if(id == 0){ variance_delta[filter] = 0; for(i = 0; i < threads; ++i){ variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f)); } } __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j*filters*spatial + i*spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f)); } extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { hipLaunchKernelGGL(( mean_delta_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta); check_error(hipPeekAtLastError()); } extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { hipLaunchKernelGGL(( fast_mean_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta); check_error(hipPeekAtLastError()); } extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { hipLaunchKernelGGL(( fast_variance_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, delta, mean, variance, batch, filters, spatial, variance_delta); check_error(hipPeekAtLastError()); } __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1.f/(batch * spatial); int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1.f/(batch * spatial - 1); int j,k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += powf((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float ALPHA, float BETA, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); if(forward) out[out_index] = ALPHA * x[in_index] + BETA * out[out_index]; else out[in_index] = ALPHA * x[out_index] + BETA * out[in_index]; } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; } __global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX])); } __global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0; } } __global__ void add_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] += ALPHA; } __global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] *= ALPHA; } __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void fill_int_kernel(int N, int ALPHA, int *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] *= X[i*INCX]; } extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; hipLaunchKernelGGL(( normalize_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, batch, filters, spatial); check_error(hipPeekAtLastError()); } __global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int b = index / spatial; int i = index % spatial; int f; float sum = 0; for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; sum += powf(x[index], 2); } sum = sqrtf(sum); if(sum == 0) sum = 1; for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; x[index] /= sum; dx[index] = (1 - x[index]) / sum; } } extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial) { size_t N = batch*spatial; hipLaunchKernelGGL(( l2norm_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, dx, batch, filters, spatial); check_error(hipPeekAtLastError()); } __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x[index] : 0; } } __syncthreads(); if(id == 0){ mean[filter] = 0; for(i = 0; i < threads; ++i){ mean[filter] += local[i]; } mean[filter] /= spatial * batch; } } __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if(id == 0){ variance[filter] = 0; for(i = 0; i < threads; ++i){ variance[filter] += local[i]; } variance[filter] /= (spatial * batch - 1); } } extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { hipLaunchKernelGGL(( fast_mean_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean); check_error(hipPeekAtLastError()); } extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { hipLaunchKernelGGL(( fast_variance_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance); check_error(hipPeekAtLastError()); } extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { hipLaunchKernelGGL(( mean_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean); check_error(hipPeekAtLastError()); } extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { hipLaunchKernelGGL(( variance_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance); check_error(hipPeekAtLastError()); } extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); check_error(hipPeekAtLastError()); } extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) { copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY); } extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( mul_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, OFFX, INCX, Y, OFFY, INCY); check_error(hipPeekAtLastError()); } __global__ void flatten_kernel(int N, float ALPHA, float BETA, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_s = i%spatial; i = i/spatial; int in_c = i%layers; i = i/layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = ALPHA * x[i1] + BETA * out[i2]; else out[i1] = ALPHA * x[i2] + BETA * out[i1]; } extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out, float ALPHA, float BETA) { int size = spatial*batch*layers; hipLaunchKernelGGL(( flatten_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, ALPHA, BETA, x, spatial, layers, batch, forward, out); check_error(hipPeekAtLastError()); } extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out, float ALPHA, float BETA) { int size = w*h*c*batch; hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, ALPHA, BETA, x, w, h, c, batch, stride, forward, out); check_error(hipPeekAtLastError()); } __global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] = val; } extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val) { hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, val); check_error(hipPeekAtLastError()); } __global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] *= scale; } extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) { hipLaunchKernelGGL(( scale_mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, scale); check_error(hipPeekAtLastError()); } extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( const_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( constrain_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( add_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( supp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( fill_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void fill_int_gpu(int N, int ALPHA, int * X, int INCX) { hipLaunchKernelGGL(( fill_int_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] = s1*out[out_index] + s2*add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int size = batch * minw * minh * minc; hipLaunchKernelGGL(( shortcut_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out); check_error(hipPeekAtLastError()); } __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; float abs_val = fabsf(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( smooth_l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( softmax_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001); delta[i] = t-p; } } extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( logistic_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = diff * diff; //I know this is technically wrong, deal with it. delta[i] = diff; } } extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( l2_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = abs(diff); delta[i] = (diff > 0) ? 1 : -1; } } extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } //nghiant_20190822: //symmetric exp loss __global__ void symexp_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = expf(diff) + expf(-diff) - 2; delta[i] = expf(diff) - expf(-diff); } } extern "C" void symexp_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( symexp_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } //logcosh loss __global__ void logcosh_kernel(int n, float *pred, float* truth, float* delta, float* error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = log(cosh(diff)); delta[i] = expf(diff) - expf(-diff); } } extern "C" void logcosh_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( logcosh_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } //nghiant_20190822_end __global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ error[i] = truth[i] ? -pred[i] : pred[i]; delta[i] = (truth[i] > 0) ? 1 : -1; } } extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( wgan_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } __global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ if(X) X[b*NX + j] += OUT[i]; } else { if(Y) Y[b*NY + j - NX] += OUT[i]; } } } extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { hipLaunchKernelGGL(( deinter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT); check_error(hipPeekAtLastError()); } __global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ OUT[i] = X[b*NX + j]; } else { OUT[i] = Y[b*NY + j - NX]; } } } extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { hipLaunchKernelGGL(( inter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT); check_error(hipPeekAtLastError()); } extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) { hipLaunchKernelGGL(( weighted_sum_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, c); check_error(hipPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) { hipLaunchKernelGGL(( weighted_delta_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, da, db, ds, dc); check_error(hipPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] += a[i]*b[i]; } } extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) { hipLaunchKernelGGL(( mult_add_into_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, c); check_error(hipPeekAtLastError()); } __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = expf(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= spatial*batch*groups) return; int s = id % spatial; id = id / spatial; int g = id % groups; int b = id / groups; int goff = group_offset[g]*spatial; int boff = b*stride; softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s); } extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) { int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); int num = spatial*batch*hier.groups; hipLaunchKernelGGL(( softmax_tree_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset); check_error(hipPeekAtLastError()); cuda_free((float *)tree_groups_size); cuda_free((float *)tree_groups_offset); } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { hipLaunchKernelGGL(( softmax_kernel), dim3(cuda_gridsize(batch*groups)), dim3(BLOCK), 0, 0, input, n, batch, batch_offset, groups, group_offset, stride, temp, output); check_error(hipPeekAtLastError()); } __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int out_index = i; int out_w = i%(w*stride); i = i/(w*stride); int out_h = i%(h*stride); i = i/(h*stride); int out_c = i%c; i = i/c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if(forward) out[out_index] += scale * x[in_index]; else atomicAdd(x+in_index, scale * out[out_index]); } extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t size = w*h*c*batch*stride*stride; hipLaunchKernelGGL(( upsample_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, in, w, h, c, batch, stride, forward, scale, out); check_error(hipPeekAtLastError()); }
c377bdfe7a36ee3fa4f1574019a6d7e72fa16282.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include <assert.h> extern "C" { #include "blas.h" #include "cuda.h" #include "utils.h" } __global__ void deconv_transpose_weights_kernels(float* weights, int spatial_size, int channels) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= channels) return; float swap; int j; for (j = 0; j < spatial_size/2; ++j) { swap = weights[i * spatial_size + j]; weights[i * spatial_size + j] = weights[i * spatial_size + spatial_size - 1 - j]; weights[i * spatial_size + spatial_size - 1 - j] = swap; } } void deconv_transpose_weights(float* weights, int spatial_size, int channels) { deconv_transpose_weights_kernels<<<cuda_gridsize(channels), BLOCK>>>(weights, spatial_size, channels); check_error(cudaPeekAtLastError()); } __global__ void stretch_fill_3d_kernel(float* x, float* y, int w, int pad_w, int h, int pad_h, int c, int pad_c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int ow = w + (w - 1) * pad_w; int oh = h + (h - 1) * pad_h; int oc = c + (c - 1) * pad_c; if (i >= ow * oh * oc) return; int iy = i; int iw = i % ow; i = (i - iw) / ow; int ih = i % oh; i = (i - ih) / oh; int ic = i; if (iw % (pad_w + 1) || ih % (pad_h + 1) || ic % (pad_c + 1)) { y[iy] = 0; } else { iw = iw / (pad_w + 1); ih = ih / (pad_h + 1); ic = ic / (pad_c + 1); y[iy] = x[ic * w * h + ih * w + iw]; } } void stretch_fill_3d_gpu(float* x, float* y, int w, int pad_w, int h, int pad_h, int c, int pad_c) { //stretch x, save to y int ow = w + (w - 1) * pad_w; int oh = h + (h - 1) * pad_h; int oc = c + (c - 1) * pad_c; stretch_fill_3d_kernel<<<cuda_gridsize(ow * oh * oc), BLOCK>>>(x, y, w, pad_w, h, pad_h, c, pad_c); check_error(cudaPeekAtLastError()); } __global__ void squeeze_fill_3d_kernel(float* x, float* y, int w, int pad_w, int h, int pad_h, int c, int pad_c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= w * h * c) return; int iy = i; int iw = i % w; i = (i - iw) / w; int ih = i % h; i = (i - ih) / h; int ic = i; int ow = w + (w - 1) * pad_w; int oh = h + (h - 1) * pad_h; iw = iw * (pad_w + 1); ih = ih * (pad_h + 1); ic = ic * (pad_c + 1); y[iy] += x[ic * ow * oh + ih * ow + iw]; } void squeeze_fill_3d_gpu(float* x, float* y, int w, int pad_w, int h, int pad_h, int c, int pad_c) { //squeeze x, save to y squeeze_fill_3d_kernel<<<cuda_gridsize(w * h * c), BLOCK>>>(x, y, w, pad_w, h, pad_h, c, pad_c); check_error(cudaPeekAtLastError()); } __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size); check_error(cudaPeekAtLastError()); } __global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index]*x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates); check_error(cudaPeekAtLastError()); } __global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; output[(k*n+j)*size + i] += biases[j]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { int num = n*size*batch; add_bias_kernel<<<cuda_gridsize(num), BLOCK>>>(output, biases, batch, n, size); check_error(cudaPeekAtLastError()); } __global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; int b; float sum = 0; for(b = 0; b < batch; ++b){ int i = b*n + index; sum += delta[i]; } bias_updates[index] += sum; } __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i]; } } void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) { if(size == 1){ backward_bias_conn_kernel<<<cuda_gridsize(n), BLOCK>>>(bias_updates, delta, batch, n); }else{ backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size); } check_error(cudaPeekAtLastError()); } __global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; float mhat = m[index] / (1.f - powf(B1, t)); float vhat = v[index] / (1.f - powf(B2, t)); x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps); } extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { adam_kernel<<<cuda_gridsize(n), BLOCK>>>(n, x, m, v, B1, B2, rate, eps, t); check_error(cudaPeekAtLastError()); } extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_gpu(n, B1, m, 1); scal_gpu(n, B2, v, 1); axpy_gpu(n, -decay*batch, w, 1, d, 1); axpy_gpu(n, (1-B1), d, 1, m, 1); mul_gpu(n, d, 1, d, 1); axpy_gpu(n, (1-B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_gpu(n, 0, d, 1); } __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f)); } __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch); } extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { size_t N = batch*filters*spatial; normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); check_error(cudaPeekAtLastError()); } __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; variance_delta[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance_delta[i] += delta[index]*(x[index] - mean[i]); } } variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f)); } __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } __syncthreads(); if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f)); } } __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; } } __syncthreads(); if(id == 0){ variance_delta[filter] = 0; for(i = 0; i < threads; ++i){ variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f)); } } __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j*filters*spatial + i*spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f)); } extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); check_error(cudaPeekAtLastError()); } extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); check_error(cudaPeekAtLastError()); } extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta); check_error(cudaPeekAtLastError()); } __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1.f/(batch * spatial); int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1.f/(batch * spatial - 1); int j,k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += powf((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float ALPHA, float BETA, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); if(forward) out[out_index] = ALPHA * x[in_index] + BETA * out[out_index]; else out[in_index] = ALPHA * x[out_index] + BETA * out[in_index]; } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; } __global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX])); } __global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0; } } __global__ void add_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] += ALPHA; } __global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] *= ALPHA; } __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void fill_int_kernel(int N, int ALPHA, int *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] *= X[i*INCX]; } extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial); check_error(cudaPeekAtLastError()); } __global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int b = index / spatial; int i = index % spatial; int f; float sum = 0; for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; sum += powf(x[index], 2); } sum = sqrtf(sum); if(sum == 0) sum = 1; for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; x[index] /= sum; dx[index] = (1 - x[index]) / sum; } } extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial) { size_t N = batch*spatial; l2norm_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, dx, batch, filters, spatial); check_error(cudaPeekAtLastError()); } __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x[index] : 0; } } __syncthreads(); if(id == 0){ mean[filter] = 0; for(i = 0; i < threads; ++i){ mean[filter] += local[i]; } mean[filter] /= spatial * batch; } } __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if(id == 0){ variance[filter] = 0; for(i = 0; i < threads; ++i){ variance[filter] += local[i]; } variance[filter] /= (spatial * batch - 1); } } extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean); check_error(cudaPeekAtLastError()); } extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance); check_error(cudaPeekAtLastError()); } extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean); check_error(cudaPeekAtLastError()); } extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance); check_error(cudaPeekAtLastError()); } extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); check_error(cudaPeekAtLastError()); } extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) { copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY); } extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) { mul_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY); check_error(cudaPeekAtLastError()); } __global__ void flatten_kernel(int N, float ALPHA, float BETA, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_s = i%spatial; i = i/spatial; int in_c = i%layers; i = i/layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = ALPHA * x[i1] + BETA * out[i2]; else out[i1] = ALPHA * x[i2] + BETA * out[i1]; } extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out, float ALPHA, float BETA) { int size = spatial*batch*layers; flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, ALPHA, BETA, x, spatial, layers, batch, forward, out); check_error(cudaPeekAtLastError()); } extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out, float ALPHA, float BETA) { int size = w*h*c*batch; reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, ALPHA, BETA, x, w, h, c, batch, stride, forward, out); check_error(cudaPeekAtLastError()); } __global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] = val; } extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val) { mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, val); check_error(cudaPeekAtLastError()); } __global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] *= scale; } extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) { scale_mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, scale); check_error(cudaPeekAtLastError()); } extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) { const_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) { constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) { add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) { scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) { supp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) { fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void fill_int_gpu(int N, int ALPHA, int * X, int INCX) { fill_int_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] = s1*out[out_index] + s2*add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int size = batch * minw * minh * minc; shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out); check_error(cudaPeekAtLastError()); } __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; float abs_val = fabsf(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { softmax_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001); delta[i] = t-p; } } extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { logistic_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = diff * diff; //I know this is technically wrong, deal with it. delta[i] = diff; } } extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) { l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = abs(diff); delta[i] = (diff > 0) ? 1 : -1; } } extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } //nghiant_20190822: //symmetric exp loss __global__ void symexp_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = expf(diff) + expf(-diff) - 2; delta[i] = expf(diff) - expf(-diff); } } extern "C" void symexp_gpu(int n, float *pred, float *truth, float *delta, float *error) { symexp_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } //logcosh loss __global__ void logcosh_kernel(int n, float *pred, float* truth, float* delta, float* error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = log(cosh(diff)); delta[i] = expf(diff) - expf(-diff); } } extern "C" void logcosh_gpu(int n, float *pred, float *truth, float *delta, float *error) { logcosh_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } //nghiant_20190822_end __global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ error[i] = truth[i] ? -pred[i] : pred[i]; delta[i] = (truth[i] > 0) ? 1 : -1; } } extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error) { wgan_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } __global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ if(X) X[b*NX + j] += OUT[i]; } else { if(Y) Y[b*NY + j - NX] += OUT[i]; } } } extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { deinter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT); check_error(cudaPeekAtLastError()); } __global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ OUT[i] = X[b*NX + j]; } else { OUT[i] = Y[b*NY + j - NX]; } } } extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { inter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT); check_error(cudaPeekAtLastError()); } extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) { weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c); check_error(cudaPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) { weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc); check_error(cudaPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] += a[i]*b[i]; } } extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) { mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c); check_error(cudaPeekAtLastError()); } __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = expf(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= spatial*batch*groups) return; int s = id % spatial; id = id / spatial; int g = id % groups; int b = id / groups; int goff = group_offset[g]*spatial; int boff = b*stride; softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s); } extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) { int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); int num = spatial*batch*hier.groups; softmax_tree_kernel<<<cuda_gridsize(num), BLOCK>>>(input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset); check_error(cudaPeekAtLastError()); cuda_free((float *)tree_groups_size); cuda_free((float *)tree_groups_offset); } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { softmax_kernel<<<cuda_gridsize(batch*groups), BLOCK>>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output); check_error(cudaPeekAtLastError()); } __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int out_index = i; int out_w = i%(w*stride); i = i/(w*stride); int out_h = i%(h*stride); i = i/(h*stride); int out_c = i%c; i = i/c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if(forward) out[out_index] += scale * x[in_index]; else atomicAdd(x+in_index, scale * out[out_index]); } extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t size = w*h*c*batch*stride*stride; upsample_kernel<<<cuda_gridsize(size), BLOCK>>>(size, in, w, h, c, batch, stride, forward, scale, out); check_error(cudaPeekAtLastError()); }
a13278066fa81a7f11de145277ea49eaa8fd1bd6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "test.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; Node *d_graph_nodes = NULL; hipMalloc(&d_graph_nodes, XSIZE*YSIZE); int no_of_nodes = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( test), dim3(gridBlock),dim3(threadBlock), 0, 0, d_graph_nodes,no_of_nodes); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( test), dim3(gridBlock),dim3(threadBlock), 0, 0, d_graph_nodes,no_of_nodes); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( test), dim3(gridBlock),dim3(threadBlock), 0, 0, d_graph_nodes,no_of_nodes); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a13278066fa81a7f11de145277ea49eaa8fd1bd6.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "test.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; Node *d_graph_nodes = NULL; cudaMalloc(&d_graph_nodes, XSIZE*YSIZE); int no_of_nodes = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); test<<<gridBlock,threadBlock>>>(d_graph_nodes,no_of_nodes); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { test<<<gridBlock,threadBlock>>>(d_graph_nodes,no_of_nodes); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { test<<<gridBlock,threadBlock>>>(d_graph_nodes,no_of_nodes); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c3d8a100994e7b4e04356abdc08461e9d5b3a32e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdint> // sets of fg/bg pixels in python not sorted, so slightly different result // here, total sum over array should be identical __device__ void _fillConsensusArray3( unsigned idx, unsigned idy, unsigned idz, const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE]) { unsigned int mid = int((PSX*PSY*PSZ)/2); unsigned const PSXH = int(PSX/2); unsigned const PSYH = int(PSY/2); unsigned const PSZH = int(PSZ/2); // ignore border pixels if ((idx < (DATAXSIZE-PSXH)) && (idy < (DATAYSIZE-PSYH)) && (idz < (DATAZSIZE-PSZH)) && (idx >= (PSXH)) && (idy >= (PSYH)) && (idz >= (PSZH))){ // only if pixel in foreground if(inPred[mid][idz][idy][idx] <= TH) return; // for all pairs of pixels in patch for(int pz1 = 0; pz1 < PSZ; pz1++) { for(int py1 = 0; py1 < PSY; py1++) { for(int px1 = 0; px1 < PSX; px1++) { // offset in patch pixel 1 int po1 = px1 + PSX * py1 + PSX * PSY * pz1; // first element of pair should have high affinity // (to not count every pair twice) float v1 = inPred[po1][idz][idy][idx]; if(v1 <= TH) { continue; } // check if predicted affinity in patch agrees // with corresponding pixel in fg prediction const int z1 = idz+pz1-PSZH; const int y1 = idy+py1-PSYH; const int x1 = idx+px1-PSXH; if(inPred[mid][z1][y1][x1] <= TH) { continue; } if(inOverlap[z1][y1][x1] != 0){ continue; } // second element of pixel pair for(int pz2 = 0; pz2 < PSZ; pz2++) { for(int py2 = 0; py2 < PSY; py2++) { for(int px2 = 0; px2 < PSX; px2++) { // offset in patch pixel 2 int po2 = px2 + PSX * py2 + PSX * PSY * pz2; if (po1 == po2) continue; const int z2 = idz+pz2-PSZH; const int y2 = idy+py2-PSYH; const int x2 = idx+px2-PSXH; // patch pixel should correspond to foreground if(inPred[mid][z2][y2][x2] <= TH) { continue; } if(inOverlap[z2][y2][x2] != 0){ continue; } float v2 = inPred[po2][idz][idy][idx]; // offset from pixel 1 to pixel 2 int zo = pz2-pz1+PSZ-1; int yo = py2-py1+PSY-1; int xo = px2-px1+PSX-1; // if both high affinity, increase consensus // pixel 1 with offset yo/xo to pixel 2 if(v2 > TH) { if(po2 <= po1) continue; // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // 1); // float v3 = v1*v2; float v3 = (v1*v2 - TH*TH)/(1.0-TH*TH); atomicAdd( &outCons[zo][yo][xo][z1][y1][x1], v3); // atomicAdd( // &outConsCnt[zo][yo][xo][z1][y1][x1], // 1); } // if one foreground/one background, // decrease consensus else if(v2 < THI) { // reverse order if pixel 2 before pixel1 if(po2 <= po1) { zo = pz1-pz2; zo += PSZ-1; yo = py1-py2; yo += PSY-1; xo = px1-px2; xo += PSX-1; // atomicAdd( // &outCons[zo][yo][xo][z2][y2][x2], // -1); // float v3 = v1*(1-v2); float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH); // v3 = v3*4/3; atomicAdd( &outCons[zo][yo][xo][z2][y2][x2], -v3); // atomicAdd( // &outConsCnt[zo][yo][xo][z2][y2][x2], // 1); } else { // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // -1); // v3 = v3*4/3; // float v3 = v1*(1-v2); float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH); atomicAdd( &outCons[zo][yo][xo][z1][y1][x1], -v3); // atomicAdd( // &outConsCnt[zo][yo][xo][z1][y1][x1], // 1); } } } } } } } } } } // device function to set the 3D volume __global__ void fillConsensusArray_allPatches3( const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE]) { // pixel for this thread: idz, idy, idx unsigned idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned idy = blockIdx.y*blockDim.y + threadIdx.y; unsigned idz = blockIdx.z*blockDim.z + threadIdx.z; //unsigned idz = 0; _fillConsensusArray3(idx, idy, idz, inPred, inOverlap, outCons); // _fillConsensusArray(idx, idy, idz, inPred, outCons); } // device function to set the 3D volume __global__ void fillConsensusArray_subsetPatches3( const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE], const unsigned patchesIDs[], const uint64_t numPatches) { unsigned id = blockIdx.x*blockDim.x + threadIdx.x; if(id >= numPatches) return; int idz = patchesIDs[id*3+0]; int idy = patchesIDs[id*3+1]; int idx = patchesIDs[id*3+2]; _fillConsensusArray3(idx, idy, idz, inPred, inOverlap, outCons); // _fillConsensusArray(idx, idy, idz, inPred, outCons); } #ifdef MAIN_FILLCONSENSUS #include "verySimpleArgParse.h" #include "cuda_vote_instances.h" int main(int argc, char *argv[]) { std::string affinitiesFileName = getAndCheckArg(argc, argv, "--affinities"); std::string consensusFileName = getAndCheckArg(argc, argv, "--consensus");; predAff_t *inPredAffinitiesGPU = allocLoadPred(affinitiesFileName); consensus_t *outConsensusGPU = allocInitConsensus(); computeConsensus(consensusFileName, inPredAffinitiesGPU, outConsensusGPU); return 0; } #endif
c3d8a100994e7b4e04356abdc08461e9d5b3a32e.cu
#include <cstdint> // sets of fg/bg pixels in python not sorted, so slightly different result // here, total sum over array should be identical __device__ void _fillConsensusArray3( unsigned idx, unsigned idy, unsigned idz, const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE]) { unsigned int mid = int((PSX*PSY*PSZ)/2); unsigned const PSXH = int(PSX/2); unsigned const PSYH = int(PSY/2); unsigned const PSZH = int(PSZ/2); // ignore border pixels if ((idx < (DATAXSIZE-PSXH)) && (idy < (DATAYSIZE-PSYH)) && (idz < (DATAZSIZE-PSZH)) && (idx >= (PSXH)) && (idy >= (PSYH)) && (idz >= (PSZH))){ // only if pixel in foreground if(inPred[mid][idz][idy][idx] <= TH) return; // for all pairs of pixels in patch for(int pz1 = 0; pz1 < PSZ; pz1++) { for(int py1 = 0; py1 < PSY; py1++) { for(int px1 = 0; px1 < PSX; px1++) { // offset in patch pixel 1 int po1 = px1 + PSX * py1 + PSX * PSY * pz1; // first element of pair should have high affinity // (to not count every pair twice) float v1 = inPred[po1][idz][idy][idx]; if(v1 <= TH) { continue; } // check if predicted affinity in patch agrees // with corresponding pixel in fg prediction const int z1 = idz+pz1-PSZH; const int y1 = idy+py1-PSYH; const int x1 = idx+px1-PSXH; if(inPred[mid][z1][y1][x1] <= TH) { continue; } if(inOverlap[z1][y1][x1] != 0){ continue; } // second element of pixel pair for(int pz2 = 0; pz2 < PSZ; pz2++) { for(int py2 = 0; py2 < PSY; py2++) { for(int px2 = 0; px2 < PSX; px2++) { // offset in patch pixel 2 int po2 = px2 + PSX * py2 + PSX * PSY * pz2; if (po1 == po2) continue; const int z2 = idz+pz2-PSZH; const int y2 = idy+py2-PSYH; const int x2 = idx+px2-PSXH; // patch pixel should correspond to foreground if(inPred[mid][z2][y2][x2] <= TH) { continue; } if(inOverlap[z2][y2][x2] != 0){ continue; } float v2 = inPred[po2][idz][idy][idx]; // offset from pixel 1 to pixel 2 int zo = pz2-pz1+PSZ-1; int yo = py2-py1+PSY-1; int xo = px2-px1+PSX-1; // if both high affinity, increase consensus // pixel 1 with offset yo/xo to pixel 2 if(v2 > TH) { if(po2 <= po1) continue; // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // 1); // float v3 = v1*v2; float v3 = (v1*v2 - TH*TH)/(1.0-TH*TH); atomicAdd( &outCons[zo][yo][xo][z1][y1][x1], v3); // atomicAdd( // &outConsCnt[zo][yo][xo][z1][y1][x1], // 1); } // if one foreground/one background, // decrease consensus else if(v2 < THI) { // reverse order if pixel 2 before pixel1 if(po2 <= po1) { zo = pz1-pz2; zo += PSZ-1; yo = py1-py2; yo += PSY-1; xo = px1-px2; xo += PSX-1; // atomicAdd( // &outCons[zo][yo][xo][z2][y2][x2], // -1); // float v3 = v1*(1-v2); float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH); // v3 = v3*4/3; atomicAdd( &outCons[zo][yo][xo][z2][y2][x2], -v3); // atomicAdd( // &outConsCnt[zo][yo][xo][z2][y2][x2], // 1); } else { // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // -1); // v3 = v3*4/3; // float v3 = v1*(1-v2); float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH); atomicAdd( &outCons[zo][yo][xo][z1][y1][x1], -v3); // atomicAdd( // &outConsCnt[zo][yo][xo][z1][y1][x1], // 1); } } } } } } } } } } // device function to set the 3D volume __global__ void fillConsensusArray_allPatches3( const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE]) { // pixel for this thread: idz, idy, idx unsigned idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned idy = blockIdx.y*blockDim.y + threadIdx.y; unsigned idz = blockIdx.z*blockDim.z + threadIdx.z; //unsigned idz = 0; _fillConsensusArray3(idx, idy, idz, inPred, inOverlap, outCons); // _fillConsensusArray(idx, idy, idz, inPred, outCons); } // device function to set the 3D volume __global__ void fillConsensusArray_subsetPatches3( const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outCons[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE], const unsigned patchesIDs[], const uint64_t numPatches) { unsigned id = blockIdx.x*blockDim.x + threadIdx.x; if(id >= numPatches) return; int idz = patchesIDs[id*3+0]; int idy = patchesIDs[id*3+1]; int idx = patchesIDs[id*3+2]; _fillConsensusArray3(idx, idy, idz, inPred, inOverlap, outCons); // _fillConsensusArray(idx, idy, idz, inPred, outCons); } #ifdef MAIN_FILLCONSENSUS #include "verySimpleArgParse.h" #include "cuda_vote_instances.h" int main(int argc, char *argv[]) { std::string affinitiesFileName = getAndCheckArg(argc, argv, "--affinities"); std::string consensusFileName = getAndCheckArg(argc, argv, "--consensus");; predAff_t *inPredAffinitiesGPU = allocLoadPred(affinitiesFileName); consensus_t *outConsensusGPU = allocInitConsensus(); computeConsensus(consensusFileName, inPredAffinitiesGPU, outConsensusGPU); return 0; } #endif
bd10e929deef9f08a5e95d031d9b942089702dd2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define MAX_ERR 1e-6 // Random array generator (generates float between 0 - 256 for each entry in the array) float* RandArray(const int size) { srand((unsigned)time(NULL)); static float* r; r = NULL; r = (float*)malloc(size * sizeof(float)); for(int i=0; i<size; i++) // r[i] = (float)rand()/(float)(RAND_MAX/256); r[i] = 1; return r; } __global__ void conv_layer(float *output_fm_g, float *input_fm_g, float *filter_kernel_g, int N, int M, int F, int E, int R, int S, int C, int H, int W, int U) { int start_idx_x= blockIdx.x*(blockDim.x)+ threadIdx.x; //Output X and Output Y and Output Z for 1 batch int start_idx_y= blockIdx.y*(blockDim.y)+ threadIdx.y; int start_idx_z= blockIdx.z; //N is for Batch //M is for Output Filter Map channel (Z dimension) //F is for Output Y dimension //E is for Output X dimension //R and S are for filter //C is for input striding int m=start_idx_z; int f=start_idx_y; int e=start_idx_x; float temp_output; for(int n=0;n<N;n++) { if((m<M)&&(f<F)&&(e<E)) { for(int i=0;i<R;i++) { for(int j=0;j<S;j++) { for(int k=0;k<C;k++) { temp_output += input_fm_g[C*H*W*n + H*W*k + (U*f+i)*W + (U*e+j)] * filter_kernel_g[C*R*S*m + R*S*k + S*i + j]; } } } output_fm_g[M*F*E*n + F*E*m + E*f + e]=temp_output; } } } int main( int argc, char *argv[] ) { int N ; // input batch size int M ; // num of filters int C ; // num of channels int H ; // input height int W ; // input height and weight int R ; // kernel height int S ; // kernel weight int E ; // output FMAP height int F ; // output FMAP weight int U ; // convolution stride float *input_fm; float *filter_kernel; float *output_fm; float *input_fm_g; float *filter_kernel_g; float *output_fm_g; //CHANGE BATCH SIZE int layer_num; if(argc == 1) { printf("Error No Parameters passed"); return 0; } N=atoi(argv[1]); printf("N(Number of Batches) = %d\n ",N); layer_num=atoi(argv[2]); printf("Layer= %d\n",layer_num); if (layer_num==1) { //FIRST LAYER M=96,C=3,H=227,W=227,R=11,S=11,E=55,F=55,U=4; //printf("First Layer\n"); } else if (layer_num==2) { //SECOND LAYER M=256,C=96,H=31,W=31,R=5,S=5,E=27,F=27,U=1; //printf("Second Layer\n"); } else if (layer_num==3) { //THIRD LAYER M = 384, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 254, U = 1; //printf("Third Layer\n"); } else if (layer_num==4) { //FOURTH LAYER M = 384, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 384, U = 1; //printf("Fourth Layer\n"); } else if (layer_num==5) { //FIFTH LAYER M = 256, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 384, U = 1; //printf("Fifth Layer\n"); } else { printf("Invalid Layer Number Input\n"); return 0; } //Nth LAYER //Allocating CPU memory input_fm = (float*)malloc(sizeof(float)*(N*C*H*W)); filter_kernel = (float*)malloc(sizeof(float)*(M*C*R*S)); output_fm = (float*)malloc(sizeof(float)*(N*M*E*F)); //Allocating GPU memory hipMalloc((void**)&input_fm_g, sizeof(float) * N*C*H*W); hipMalloc((void**)&filter_kernel_g, sizeof(float) * M*C*R*S); hipMalloc((void**)&output_fm_g, sizeof(float) * N*M*E*F); //Assigning Inputs and Outputs input_fm=RandArray(N*W*C*H), filter_kernel=RandArray(M*C*R*S); dim3 block_2d_dimension(16,16,1); int ceil1_E = ceil((double)E/16.0); int ceil1_F = ceil((double)F/16.0); int ceil1_M = ceil((double)M); dim3 grid_3d_dimension(ceil1_E,ceil1_F,ceil1_M); //printf("Dimensions are %d %d %d \n",ceil1_E,ceil1_F,ceil1_M); // Mem copy hipMemcpy(input_fm_g, input_fm, sizeof(float) *N*C*H*W, hipMemcpyHostToDevice); hipMemcpy(filter_kernel_g, filter_kernel, sizeof(float) *M*C*R*S, hipMemcpyHostToDevice); // Launch kernel hipLaunchKernelGGL(( conv_layer), dim3(grid_3d_dimension), dim3(block_2d_dimension) , 0, 0, output_fm_g, input_fm_g, filter_kernel_g, N, M, F, E, R, S, C, H, W, U); hipMemcpy(output_fm, output_fm_g, sizeof(float) *N*M*E*F, hipMemcpyDeviceToHost); printf("%f \n",output_fm[0]); //Done with Kernel hipFree(input_fm_g); hipFree(output_fm_g),hipFree(filter_kernel_g); // END OF Nth LAYER //VALIDATION CODE // float *output_fm_v; // output_fm_v = (float*)malloc(sizeof(float)*(N*M*E*F)); // //VALIDATION CODE // for(int n=0;n<N;n++) // { // for(int m=0;m<M;m++) { // for(int f=0;f<F;f++) { // for(int e=0;e<E;e++) { // //output_fm[N*n + M*m + F*f + E*e]=0; // output_fm_v[M*F*E*n + F*E*m + E*f + e]=0; // for(int i=0;i<R;i++) { // for(int j=0;j<S;j++) { // for(int k=0;k<C;k++) { // output_fm_v[M*F*E*n + F*E*m + E*f + e] += input_fm[C*H*W*n + H*W*k + (U*f+i)*W + (U*e+j)] * filter_kernel[C*R*S*m + R*S*k + S*i + j]; // // printf("%f ",output_fm_v[M*F*E*n + F*E*m + E*f + e]); // } // } // } // //printf("%f ",output_fm_v[M*F*E*n + F*E*m + E*f + e]); // } // } // } // } // // Verification // for(int i = 0; i < N*M*F*E ; i++) // { // assert(fabs(output_fm_v[i] - output_fm[i] ) < MAX_ERR); // } // printf("output_fm_v[0] = %f\n", output_fm_v[0]); // printf("PASSED\n"); // //int op_index; // // // //Saving To File // int op_index; // FILE *file1 = fopen("Output_Toeplitz.txt","wb"); // for(int n=0;n<N;n++){ // fprintf(file1,"%d Output Batch\n",n); // for(int m=0;m<M;m++){ // fprintf(file1,"%d Output Channel\n",m); // for(int f=0;f<F;f++){ // for(int e=0;e<E;e++){ // op_index=M*F*E*n + F*E*m + E*f + e; // // int output=(int)output_fm[op_index]; // fprintf(file1,"%f ",output_fm[op_index]); // } // fprintf(file1,"\n"); // } // } // } // FILE *file2 = fopen("Output_Toeplitz_v.txt","wb"); // for(int n=0;n<N;n++){ // fprintf(file2,"%d Output Batch\n",n); // for(int m=0;m<M;m++){ // fprintf(file2,"%d Output Channel\n",m); // for(int f=0;f<F;f++){ // for(int e=0;e<E;e++){ // op_index=M*F*E*n + F*E*m + E*f+e; // //int output=(int)output_fm_v[op_index]; // fprintf(file2,"%f ",output_fm_v[op_index]); // } // fprintf(file2,"\n"); // } // } // } return 0; }
bd10e929deef9f08a5e95d031d9b942089702dd2.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda.h> #define MAX_ERR 1e-6 // Random array generator (generates float between 0 - 256 for each entry in the array) float* RandArray(const int size) { srand((unsigned)time(NULL)); static float* r; r = NULL; r = (float*)malloc(size * sizeof(float)); for(int i=0; i<size; i++) // r[i] = (float)rand()/(float)(RAND_MAX/256); r[i] = 1; return r; } __global__ void conv_layer(float *output_fm_g, float *input_fm_g, float *filter_kernel_g, int N, int M, int F, int E, int R, int S, int C, int H, int W, int U) { int start_idx_x= blockIdx.x*(blockDim.x)+ threadIdx.x; //Output X and Output Y and Output Z for 1 batch int start_idx_y= blockIdx.y*(blockDim.y)+ threadIdx.y; int start_idx_z= blockIdx.z; //N is for Batch //M is for Output Filter Map channel (Z dimension) //F is for Output Y dimension //E is for Output X dimension //R and S are for filter //C is for input striding int m=start_idx_z; int f=start_idx_y; int e=start_idx_x; float temp_output; for(int n=0;n<N;n++) { if((m<M)&&(f<F)&&(e<E)) { for(int i=0;i<R;i++) { for(int j=0;j<S;j++) { for(int k=0;k<C;k++) { temp_output += input_fm_g[C*H*W*n + H*W*k + (U*f+i)*W + (U*e+j)] * filter_kernel_g[C*R*S*m + R*S*k + S*i + j]; } } } output_fm_g[M*F*E*n + F*E*m + E*f + e]=temp_output; } } } int main( int argc, char *argv[] ) { int N ; // input batch size int M ; // num of filters int C ; // num of channels int H ; // input height int W ; // input height and weight int R ; // kernel height int S ; // kernel weight int E ; // output FMAP height int F ; // output FMAP weight int U ; // convolution stride float *input_fm; float *filter_kernel; float *output_fm; float *input_fm_g; float *filter_kernel_g; float *output_fm_g; //CHANGE BATCH SIZE int layer_num; if(argc == 1) { printf("Error No Parameters passed"); return 0; } N=atoi(argv[1]); printf("N(Number of Batches) = %d\n ",N); layer_num=atoi(argv[2]); printf("Layer= %d\n",layer_num); if (layer_num==1) { //FIRST LAYER M=96,C=3,H=227,W=227,R=11,S=11,E=55,F=55,U=4; //printf("First Layer\n"); } else if (layer_num==2) { //SECOND LAYER M=256,C=96,H=31,W=31,R=5,S=5,E=27,F=27,U=1; //printf("Second Layer\n"); } else if (layer_num==3) { //THIRD LAYER M = 384, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 254, U = 1; //printf("Third Layer\n"); } else if (layer_num==4) { //FOURTH LAYER M = 384, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 384, U = 1; //printf("Fourth Layer\n"); } else if (layer_num==5) { //FIFTH LAYER M = 256, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 384, U = 1; //printf("Fifth Layer\n"); } else { printf("Invalid Layer Number Input\n"); return 0; } //Nth LAYER //Allocating CPU memory input_fm = (float*)malloc(sizeof(float)*(N*C*H*W)); filter_kernel = (float*)malloc(sizeof(float)*(M*C*R*S)); output_fm = (float*)malloc(sizeof(float)*(N*M*E*F)); //Allocating GPU memory cudaMalloc((void**)&input_fm_g, sizeof(float) * N*C*H*W); cudaMalloc((void**)&filter_kernel_g, sizeof(float) * M*C*R*S); cudaMalloc((void**)&output_fm_g, sizeof(float) * N*M*E*F); //Assigning Inputs and Outputs input_fm=RandArray(N*W*C*H), filter_kernel=RandArray(M*C*R*S); dim3 block_2d_dimension(16,16,1); int ceil1_E = ceil((double)E/16.0); int ceil1_F = ceil((double)F/16.0); int ceil1_M = ceil((double)M); dim3 grid_3d_dimension(ceil1_E,ceil1_F,ceil1_M); //printf("Dimensions are %d %d %d \n",ceil1_E,ceil1_F,ceil1_M); // Mem copy cudaMemcpy(input_fm_g, input_fm, sizeof(float) *N*C*H*W, cudaMemcpyHostToDevice); cudaMemcpy(filter_kernel_g, filter_kernel, sizeof(float) *M*C*R*S, cudaMemcpyHostToDevice); // Launch kernel conv_layer<<< grid_3d_dimension, block_2d_dimension >>>( output_fm_g, input_fm_g, filter_kernel_g, N, M, F, E, R, S, C, H, W, U); cudaMemcpy(output_fm, output_fm_g, sizeof(float) *N*M*E*F, cudaMemcpyDeviceToHost); printf("%f \n",output_fm[0]); //Done with Kernel cudaFree(input_fm_g); cudaFree(output_fm_g),cudaFree(filter_kernel_g); // END OF Nth LAYER //VALIDATION CODE // float *output_fm_v; // output_fm_v = (float*)malloc(sizeof(float)*(N*M*E*F)); // //VALIDATION CODE // for(int n=0;n<N;n++) // { // for(int m=0;m<M;m++) { // for(int f=0;f<F;f++) { // for(int e=0;e<E;e++) { // //output_fm[N*n + M*m + F*f + E*e]=0; // output_fm_v[M*F*E*n + F*E*m + E*f + e]=0; // for(int i=0;i<R;i++) { // for(int j=0;j<S;j++) { // for(int k=0;k<C;k++) { // output_fm_v[M*F*E*n + F*E*m + E*f + e] += input_fm[C*H*W*n + H*W*k + (U*f+i)*W + (U*e+j)] * filter_kernel[C*R*S*m + R*S*k + S*i + j]; // // printf("%f ",output_fm_v[M*F*E*n + F*E*m + E*f + e]); // } // } // } // //printf("%f ",output_fm_v[M*F*E*n + F*E*m + E*f + e]); // } // } // } // } // // Verification // for(int i = 0; i < N*M*F*E ; i++) // { // assert(fabs(output_fm_v[i] - output_fm[i] ) < MAX_ERR); // } // printf("output_fm_v[0] = %f\n", output_fm_v[0]); // printf("PASSED\n"); // //int op_index; // // // //Saving To File // int op_index; // FILE *file1 = fopen("Output_Toeplitz.txt","wb"); // for(int n=0;n<N;n++){ // fprintf(file1,"%d Output Batch\n",n); // for(int m=0;m<M;m++){ // fprintf(file1,"%d Output Channel\n",m); // for(int f=0;f<F;f++){ // for(int e=0;e<E;e++){ // op_index=M*F*E*n + F*E*m + E*f + e; // // int output=(int)output_fm[op_index]; // fprintf(file1,"%f ",output_fm[op_index]); // } // fprintf(file1,"\n"); // } // } // } // FILE *file2 = fopen("Output_Toeplitz_v.txt","wb"); // for(int n=0;n<N;n++){ // fprintf(file2,"%d Output Batch\n",n); // for(int m=0;m<M;m++){ // fprintf(file2,"%d Output Channel\n",m); // for(int f=0;f<F;f++){ // for(int e=0;e<E;e++){ // op_index=M*F*E*n + F*E*m + E*f+e; // //int output=(int)output_fm_v[op_index]; // fprintf(file2,"%f ",output_fm_v[op_index]); // } // fprintf(file2,"\n"); // } // } // } return 0; }
16e7167717808b4a486ce559c7c67c143140b969.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <errno.h> #include <limits.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> typedef unsigned uint; typedef float flt; #define exit_if(cnd_value, msg) \ do { \ if (cnd_value) { \ if (errno) \ perror(msg); \ else \ fprintf(stderr, "error: %s\n", msg); \ exit(EXIT_FAILURE); \ } \ } while (0) #define cudaErrorCheck(error) \ do { \ hipError_t res = error; \ if (res != hipSuccess) { \ fprintf(stderr, "cuda %s:%d error: %s\n", __FILE__, __LINE__, \ hipGetErrorString(res)); \ exit(EXIT_FAILURE); \ } \ } while(0) #define MU_COUNT (3U) __device__ __constant__ int3 deviceMu[MU_COUNT]; __global__ static void classify(uchar4 * __restrict__, uint, uint); __host__ static uchar4 *read_file(const char *, uint *, uint *); __host__ static void write_file( const char *, const uchar4 * __restrict__, uint, uint ); int main(void) { char fileIn[FILENAME_MAX], fileOut[FILENAME_MAX]; scanf("%s%s", fileIn, fileOut); uint w, h; uchar4 * const __restrict__ data = read_file(fileIn, &w, &h); const uint size = sizeof(uchar4) * w * h; uchar4 *deviceData; cudaErrorCheck(hipMalloc(&deviceData, size)); cudaErrorCheck(hipMemcpy(deviceData, data, size, hipMemcpyHostToDevice)); int3 hostMu[MU_COUNT] = { make_int3(255, 0, 0), make_int3(0, 255, 0), make_int3(0, 0, 255) }; cudaErrorCheck(hipMemcpyToSymbol(deviceMu, hostMu, sizeof(int3) * MU_COUNT )); hipLaunchKernelGGL(( classify), dim3(dim3(16U, 16U)), dim3(dim3(16U, 16U)), 0, 0, deviceData, w, h); cudaErrorCheck(hipGetLastError()); cudaErrorCheck(hipMemcpy(data, deviceData, size, hipMemcpyDeviceToHost)); cudaErrorCheck(hipFree(deviceData)); write_file(fileOut, data, w, h); free(data); return 0; } __global__ static void classify( uchar4 * const __restrict__ data, const uint w, const uint h ) { const uint idxX = threadIdx.x + blockDim.x * blockIdx.x, idxY = threadIdx.y + blockDim.y * blockIdx.y, offsetX = blockDim.x * gridDim.x, offsetY = blockDim.y * gridDim.y; for (uint j = idxY; j < h; j += offsetY) { for (uint i = idxX; i < w; i += offsetX) { int min_distance = INT_MAX; const uint index = j * w + i; const uchar4 data_i_j = data[index]; for (uint idx = 0; idx < MU_COUNT; ++idx) { const int distance = (data_i_j.x - deviceMu[idx].x) * (data_i_j.x - deviceMu[idx].x) + (data_i_j.y - deviceMu[idx].y) * (data_i_j.y - deviceMu[idx].y) + (data_i_j.z - deviceMu[idx].z) * (data_i_j.z - deviceMu[idx].z); if (distance < min_distance) { data[index].w = idx; min_distance = distance; } } } } } __host__ static uchar4 *read_file( const char * const file, uint * const w, uint * const h ) { FILE * const stream = fopen(file, "rb"); exit_if(stream == NULL, "fopen()"); exit_if(fread(w, sizeof(uint), 1, stream) != 1, "fread()"); exit_if(fread(h, sizeof(uint), 1, stream) != 1, "fread()"); const uint count = (*w) * (*h); uchar4 * const data = (uchar4 *) malloc(sizeof(uchar4) * count); exit_if(data == NULL, "malloc()"); exit_if(fread(data, sizeof(uchar4), count, stream) != count, "fread()"); exit_if(fclose(stream) != 0, "close()"); return data; } __host__ static void write_file( const char * const file, const uchar4 * const __restrict__ data, const uint w, const uint h ) { FILE * const stream = fopen(file, "wb"); exit_if(stream == NULL, "fopen()"); const uint count = w * h; exit_if(fwrite(&w, sizeof(uint), 1, stream) != 1, "fwrite()"); exit_if(fwrite(&h, sizeof(uint), 1, stream) != 1, "fwrite()"); exit_if(fwrite(data, sizeof(uchar4), count, stream) != count, "fwrite()"); exit_if(fclose(stream) != 0, "fclose()"); }
16e7167717808b4a486ce559c7c67c143140b969.cu
#include <errno.h> #include <limits.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> typedef unsigned uint; typedef float flt; #define exit_if(cnd_value, msg) \ do { \ if (cnd_value) { \ if (errno) \ perror(msg); \ else \ fprintf(stderr, "error: %s\n", msg); \ exit(EXIT_FAILURE); \ } \ } while (0) #define cudaErrorCheck(error) \ do { \ cudaError_t res = error; \ if (res != cudaSuccess) { \ fprintf(stderr, "cuda %s:%d error: %s\n", __FILE__, __LINE__, \ cudaGetErrorString(res)); \ exit(EXIT_FAILURE); \ } \ } while(0) #define MU_COUNT (3U) __device__ __constant__ int3 deviceMu[MU_COUNT]; __global__ static void classify(uchar4 * __restrict__, uint, uint); __host__ static uchar4 *read_file(const char *, uint *, uint *); __host__ static void write_file( const char *, const uchar4 * __restrict__, uint, uint ); int main(void) { char fileIn[FILENAME_MAX], fileOut[FILENAME_MAX]; scanf("%s%s", fileIn, fileOut); uint w, h; uchar4 * const __restrict__ data = read_file(fileIn, &w, &h); const uint size = sizeof(uchar4) * w * h; uchar4 *deviceData; cudaErrorCheck(cudaMalloc(&deviceData, size)); cudaErrorCheck(cudaMemcpy(deviceData, data, size, cudaMemcpyHostToDevice)); int3 hostMu[MU_COUNT] = { make_int3(255, 0, 0), make_int3(0, 255, 0), make_int3(0, 0, 255) }; cudaErrorCheck(cudaMemcpyToSymbol(deviceMu, hostMu, sizeof(int3) * MU_COUNT )); classify<<<dim3(16U, 16U), dim3(16U, 16U)>>>(deviceData, w, h); cudaErrorCheck(cudaGetLastError()); cudaErrorCheck(cudaMemcpy(data, deviceData, size, cudaMemcpyDeviceToHost)); cudaErrorCheck(cudaFree(deviceData)); write_file(fileOut, data, w, h); free(data); return 0; } __global__ static void classify( uchar4 * const __restrict__ data, const uint w, const uint h ) { const uint idxX = threadIdx.x + blockDim.x * blockIdx.x, idxY = threadIdx.y + blockDim.y * blockIdx.y, offsetX = blockDim.x * gridDim.x, offsetY = blockDim.y * gridDim.y; for (uint j = idxY; j < h; j += offsetY) { for (uint i = idxX; i < w; i += offsetX) { int min_distance = INT_MAX; const uint index = j * w + i; const uchar4 data_i_j = data[index]; for (uint idx = 0; idx < MU_COUNT; ++idx) { const int distance = (data_i_j.x - deviceMu[idx].x) * (data_i_j.x - deviceMu[idx].x) + (data_i_j.y - deviceMu[idx].y) * (data_i_j.y - deviceMu[idx].y) + (data_i_j.z - deviceMu[idx].z) * (data_i_j.z - deviceMu[idx].z); if (distance < min_distance) { data[index].w = idx; min_distance = distance; } } } } } __host__ static uchar4 *read_file( const char * const file, uint * const w, uint * const h ) { FILE * const stream = fopen(file, "rb"); exit_if(stream == NULL, "fopen()"); exit_if(fread(w, sizeof(uint), 1, stream) != 1, "fread()"); exit_if(fread(h, sizeof(uint), 1, stream) != 1, "fread()"); const uint count = (*w) * (*h); uchar4 * const data = (uchar4 *) malloc(sizeof(uchar4) * count); exit_if(data == NULL, "malloc()"); exit_if(fread(data, sizeof(uchar4), count, stream) != count, "fread()"); exit_if(fclose(stream) != 0, "close()"); return data; } __host__ static void write_file( const char * const file, const uchar4 * const __restrict__ data, const uint w, const uint h ) { FILE * const stream = fopen(file, "wb"); exit_if(stream == NULL, "fopen()"); const uint count = w * h; exit_if(fwrite(&w, sizeof(uint), 1, stream) != 1, "fwrite()"); exit_if(fwrite(&h, sizeof(uint), 1, stream) != 1, "fwrite()"); exit_if(fwrite(data, sizeof(uchar4), count, stream) != count, "fwrite()"); exit_if(fclose(stream) != 0, "fclose()"); }
bffe43dfbaf79f39486c023b8b120bdf24f6b813.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gtest/gtest.h> #include <poly/kernel.hpp> #include <poly/vector.hpp> using namespace koishi; #ifdef KOISHI_USE_CUDA __global__ void f( unsigned a ) { } __global__ void g( poly::vector<int> &a, unsigned b ) { } __global__ void h( unsigned a, poly::vector<int> &b ) { } #endif TEST( test_kernel, ) { #ifdef KOISHI_USE_CUDA poly::vector<int> a; poly::kernel( h, 1, 1 )( 0u, a ); poly::kernel( g, 1, 1 )( a, 0u ); poly::kernel( f, 1, 1 )( 0u ); #endif }
bffe43dfbaf79f39486c023b8b120bdf24f6b813.cu
#include <gtest/gtest.h> #include <poly/kernel.hpp> #include <poly/vector.hpp> using namespace koishi; #ifdef KOISHI_USE_CUDA __global__ void f( unsigned a ) { } __global__ void g( poly::vector<int> &a, unsigned b ) { } __global__ void h( unsigned a, poly::vector<int> &b ) { } #endif TEST( test_kernel, ) { #ifdef KOISHI_USE_CUDA poly::vector<int> a; poly::kernel( h, 1, 1 )( 0u, a ); poly::kernel( g, 1, 1 )( a, 0u ); poly::kernel( f, 1, 1 )( 0u ); #endif }
981c6fd05e7caf1269ea3789224bb65edf5b0e4a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../../gpu_utils/runtime.h" #include "GArray.h" __global__ void update_array_neuron(GArrayNeurons *d_neurons, int num, int start_id) { __shared__ int fire_table_t[MAXBLOCKSIZE]; __shared__ volatile unsigned int fire_cnt; if (threadIdx.x == 0) { fire_cnt = 0; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int idx = tid; idx < num; idx += blockDim.x * gridDim.x) { bool fired = false; int test_loc = 0; fired = (d_neurons->p_start[idx] < d_neurons->p_end[idx]) && (gCurrentCycle >= d_neurons->p_fire_time[d_neurons->p_start[idx]]); gFireCount[start_id + idx] += fired; for (int i=0; i<2; i++) { if (fired) { test_loc = atomicAdd((int*)&fire_cnt, 1); if (test_loc < MAXBLOCKSIZE) { fire_table_t[test_loc] = start_id + idx; d_neurons->p_start[idx] = d_neurons->p_start[idx] + 1; fired = false; } } __syncthreads(); if (fire_cnt >= MAXBLOCKSIZE) { commit2globalTable(fire_table_t, MAXBLOCKSIZE, gFiredTable, &(gFiredTableSizes[gCurrentIdx]), gFiredTableCap*gCurrentIdx); //advance_array_neuron(d_neurons, fire_table_t, MAXBLOCKSIZE, start_id); if (threadIdx.x == 0) { fire_cnt = 0; } } __syncthreads(); } } __syncthreads(); if (fire_cnt > 0) { commit2globalTable(fire_table_t, fire_cnt, gFiredTable, &(gFiredTableSizes[gCurrentIdx]), gFiredTableCap*gCurrentIdx); //advance_array_neuron(d_neurons, fire_table_t, fire_cnt, start_id); if (threadIdx.x == 0) { fire_cnt = 0; } } }
981c6fd05e7caf1269ea3789224bb65edf5b0e4a.cu
#include "../../gpu_utils/runtime.h" #include "GArray.h" __global__ void update_array_neuron(GArrayNeurons *d_neurons, int num, int start_id) { __shared__ int fire_table_t[MAXBLOCKSIZE]; __shared__ volatile unsigned int fire_cnt; if (threadIdx.x == 0) { fire_cnt = 0; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int idx = tid; idx < num; idx += blockDim.x * gridDim.x) { bool fired = false; int test_loc = 0; fired = (d_neurons->p_start[idx] < d_neurons->p_end[idx]) && (gCurrentCycle >= d_neurons->p_fire_time[d_neurons->p_start[idx]]); gFireCount[start_id + idx] += fired; for (int i=0; i<2; i++) { if (fired) { test_loc = atomicAdd((int*)&fire_cnt, 1); if (test_loc < MAXBLOCKSIZE) { fire_table_t[test_loc] = start_id + idx; d_neurons->p_start[idx] = d_neurons->p_start[idx] + 1; fired = false; } } __syncthreads(); if (fire_cnt >= MAXBLOCKSIZE) { commit2globalTable(fire_table_t, MAXBLOCKSIZE, gFiredTable, &(gFiredTableSizes[gCurrentIdx]), gFiredTableCap*gCurrentIdx); //advance_array_neuron(d_neurons, fire_table_t, MAXBLOCKSIZE, start_id); if (threadIdx.x == 0) { fire_cnt = 0; } } __syncthreads(); } } __syncthreads(); if (fire_cnt > 0) { commit2globalTable(fire_table_t, fire_cnt, gFiredTable, &(gFiredTableSizes[gCurrentIdx]), gFiredTableCap*gCurrentIdx); //advance_array_neuron(d_neurons, fire_table_t, fire_cnt, start_id); if (threadIdx.x == 0) { fire_cnt = 0; } } }
cdc43d81f1c79b0f5944ec92faac5731cf5f8890.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "cuda_test_class.h" __global__ void mykernel(int *a, int * b, int * c){ *c=*a+*b; } int main(void){ int a,b,c; int *d_a, *d_b, *d_c; int size = sizeof(int); //Allocate space for device hipMalloc((void **) & d_a, size); hipMalloc((void **) & d_b, size); hipMalloc((void **) & d_c, size); a=2; b=7; //Copy inputs to device hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( mykernel), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c); //Copy results back to host hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost); //cleanup hipFree(d_a); hipFree(d_b); hipFree(d_c); std::cout<<"CUDA answer = "<<c<<std::endl; std::cout<<"Should be = "<<a+b<<std::endl; int N=10; cuda_test_class c_test(N); std::cout<<"Created class"<<std::endl; std::cout<<"Now check"<<std::endl; std::cout<<"Passed? "<<c_test.check()<<std::endl; std::cout<<"GPU adding"<<std::endl; c_test.add(); std::cout<<"Now check gpu"<<std::endl; std::cout<<"Passed? "<<c_test.check()<<std::endl; std::cout<<"Hello World!"<<std::endl; return 0; }
cdc43d81f1c79b0f5944ec92faac5731cf5f8890.cu
#include <iostream> #include "cuda_test_class.h" __global__ void mykernel(int *a, int * b, int * c){ *c=*a+*b; } int main(void){ int a,b,c; int *d_a, *d_b, *d_c; int size = sizeof(int); //Allocate space for device cudaMalloc((void **) & d_a, size); cudaMalloc((void **) & d_b, size); cudaMalloc((void **) & d_c, size); a=2; b=7; //Copy inputs to device cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); mykernel<<<1,1>>>(d_a, d_b, d_c); //Copy results back to host cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); //cleanup cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); std::cout<<"CUDA answer = "<<c<<std::endl; std::cout<<"Should be = "<<a+b<<std::endl; int N=10; cuda_test_class c_test(N); std::cout<<"Created class"<<std::endl; std::cout<<"Now check"<<std::endl; std::cout<<"Passed? "<<c_test.check()<<std::endl; std::cout<<"GPU adding"<<std::endl; c_test.add(); std::cout<<"Now check gpu"<<std::endl; std::cout<<"Passed? "<<c_test.check()<<std::endl; std::cout<<"Hello World!"<<std::endl; return 0; }
82524fcd4b1c5365e0501eae833b2d904753988f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once /* ## Clase `gpu_array` Un *smart pointer* para arreglos de objetos en el GPU. La contraparte del `gpu_object` pero para arreglos. La clase abstrae la alocacin y liberacin de memoria adems de las operaciones de copia entre Host y Device. Esta es una abstraccin del Host, por lo que no se puede utilizar en un kernel. */ #include "pcuditas/gpu/macros.cu" #include "pcuditas/gpu/kernels.cu" #include "pcuditas/gpu/gpu_object.cu" #include <assert.h> template< typename T > class gpu_array { T *_gpu_pointer; T *_cpu_pointer; public: size_t size; using element_type = T; gpu_array(size_t n): size(n) { assert(size > 0); // <-- Allocate and initialize on GPU CUDA_CALL(hipMalloc(&_gpu_pointer, n * sizeof(T))); hipLaunchKernelGGL(( init_array_kernel), dim3(128),dim3(32), 0, 0, _gpu_pointer, n); // <-- Allocate and initialize on CPU _cpu_pointer = (T *) malloc(n * sizeof(T)); for (int i=0; i<n; i++) { new (&_cpu_pointer[i]) T(); } } template <class InitializerT> gpu_array(size_t n, InitializerT init_fn) : gpu_array(n) { /* Instantiate with a function to initialize each value. Example: // Initialize to {0, 1, 2, 3, 4...9} auto array = gpu_array<int>(10, []__device__ (int &el, int i) { el = i; }); */ // Apply the initialization function to each element (*this).for_each(init_fn); } T *gpu_pointer() const { return _gpu_pointer; } gpu_array<T>& to_cpu() { CUDA_CALL(hipMemcpy( _cpu_pointer, _gpu_pointer, size*sizeof(T), hipMemcpyDeviceToHost )); return *this; } T *cpu_pointer() const { return _cpu_pointer; } T operator[](size_t idx) { return _cpu_pointer[idx]; } template <class TransformedT, class TransformationT> gpu_array<TransformedT> transform( TransformationT gpu_fn, int n_blocks = 1024, int n_threads = 32 ) { /* Create a new array with the transformed elements. Example: // Create gpu_array "array" with the numbers 0 to 9 auto array = gpu_array<int>(10, []__device__ (int &el, int i) { el = i; }); // Transform to pairs of the number and it's squares auto squares = array.transform<int2>( [] __device__ (int2 el, int idx) { return make_int2(el, el*el); }); */ auto transformed = gpu_array<TransformedT>{this->size}; hipLaunchKernelGGL(( transform_kernel), dim3(n_blocks), dim3(n_threads), 0, 0, _gpu_pointer, size, transformed.gpu_pointer(), gpu_fn ); return transformed; } template <class FunctionT> gpu_array<T>& for_each( FunctionT gpu_fn, int n_blocks = 1024, int n_threads = 32 ) {/* Apply the function in-place for each element of the array. Example: // Create gpu_array "array" with the numbers 0 to 9 auto array = gpu_array<int>(10, []__device__ (int &el, int i) { el = i; }); // Make a linear transformation int a = 12; int b = 34; array.for_each( [a,b] __device__ (int2 &el, int idx) { el = a*el + b; }); */ hipLaunchKernelGGL(( for_each_kernel), dim3(n_blocks), dim3(n_threads), 0, 0, _gpu_pointer, size, gpu_fn); return *this; } template <class ReductionT> gpu_object<T> reduce( ReductionT reduce_fn, int n_blocks = 128, int threads_per_block = 32 /* <-- Must be a power of 2! */ ) { /* Perform a reduction of the elements of the array using the provided function. Example: // Create gpu_array "array" with the numbers 0 to 9 auto array = gpu_array<int>(10, []__device__ (int &el, int i) { el = i; }); // Multipliy the elements in GPU gpu_object<int> product = array.reduce( []__device__ (int a, int b) { return a * b }); */ unsigned int shared_memory_size = threads_per_block * sizeof(T); auto block_partials = gpu_array<T>(n_blocks); hipLaunchKernelGGL(( reduce_2step_kernel), dim3(n_blocks), dim3(threads_per_block), shared_memory_size, 0, _gpu_pointer, size, block_partials.gpu_pointer(), reduce_fn ); auto out = gpu_object<T>(); hipLaunchKernelGGL(( reduce_2step_kernel), dim3(1), dim3(threads_per_block), shared_memory_size, 0, block_partials.gpu_pointer(), n_blocks, out.gpu_pointer(), reduce_fn ); return out; } // Iterator protocol T* begin() { return _cpu_pointer; } T* end() { return _cpu_pointer + size; } gpu_array<T> copy() { auto copied = gpu_array<T>(this->size); // Copy in GPU copied.for_each( [old_one=this->gpu_pointer()] __device__ (T &new_el, int i) { new_el = old_one[i]; }); // Copy in CPU for(int i=0; i<size; i++) { copied[i] = (*this)[i]; } return copied; } ~gpu_array() { free(_cpu_pointer); CUDA_CALL(hipFree(_gpu_pointer)); } }; /* ----------------------------------------------------------------------- The following is executable documentation as described in Kevlin Henney's talk "Structure and Interpretation of Test Cases" (https://youtu.be/tWn8RA_DEic) written using the doctest framework (https://github.com/onqtam/doctest). Run with `make test`. */ #ifdef __TESTING__ #include "tests/doctest.h" #include <typeinfo> // operator typeid #include <assert.h> template<class T> struct pair { T first; T second; }; TEST_SUITE("GPU Array specification") { SCENARIO("GPU Array initialization") { GIVEN("A size and the type of the elements") { int size = 10; using element_t = int; THEN("A GPU array can be initialized without failure") { auto array = gpu_array<element_t>(size); using array_element_type = decltype(array)::element_type; CHECK(typeid(array_element_type) == typeid(element_t)); CHECK(array.size == size); } } } SCENARIO("GPU Array for_each") { GIVEN("A GPU array") { int size = 10; using element_t = int; auto array = gpu_array<element_t>(size); WHEN("It's elements are modified with for_each") { array.for_each( [] __device__ (element_t &el, int idx) { el = idx * idx; }); THEN("The values on GPU are changed accordingly") { array.for_each( // <-- check on GPU [] __device__ (element_t current_val, int idx){ assert(current_val == idx*idx); }); array.to_cpu(); // <-- check on CPU for(int i=0; i<array.size; i++){ CHECK(array[i] == i*i); } } } } } SCENARIO("GPU Array transformation") { GIVEN("A GPU array") { int size = 10; using element_t = int; // Initialize to {0, 1, 2, 3, 4...9} auto array = gpu_array<element_t>(size, []__device__ (element_t &el, int i) { el = i; }); WHEN("A new array is obtained as a transformation of it") { auto squares = array.transform<pair<element_t>>( [] __device__ (element_t &el, int idx) { return pair<element_t>{el, el*el}; }); THEN("The values on GPU are changed accordingly") { squares.for_each( // <-- check on GPU [] __device__ (pair<element_t> p, int idx) { assert(p.first == idx); assert(p.second == idx*idx); }); squares.to_cpu(); // <-- check on CPU for(int i=0; i<squares.size; i++){ CHECK(squares[i].first == i); CHECK(squares[i].second == i*i); } } } } } SCENARIO("GPU Array reduction") { GIVEN("A GPU array with arbitrary elements") { int size = 1000; using element_t = int; auto array = gpu_array<element_t>(size); array.for_each( [] __device__ (element_t &el, int i) { el = i+1; }); WHEN("A reducion operation is applied on it") { auto sum_gpu = array.reduce( [] __device__ (element_t reduced, element_t el) { return reduced + el; }).to_cpu(); THEN("The reduction on CPU yields the same result") { array.to_cpu(); // <-- check on CPU auto sum_cpu = array[0]; for(int i=1; i<array.size; i++){ sum_cpu += array[i]; } CHECK(sum_cpu == sum_gpu); } } } SUBCASE("Reduction on a very large array") { auto n = 500000; // Initialize to {0, 1, 2, 3, 4...n} auto nums = gpu_array<int>(n, [] __device__ (int &el, int idx) { el = idx; }); auto addition = [] __device__ (int a, int b) { return a + b; }; auto sum = nums.reduce(addition).to_cpu(); CHECK(sum == n*(n-1)/2); } } } #endif
82524fcd4b1c5365e0501eae833b2d904753988f.cu
#pragma once /* ## Clase `gpu_array` Un *smart pointer* para arreglos de objetos en el GPU. La contraparte del `gpu_object` pero para arreglos. La clase abstrae la alocación y liberación de memoria además de las operaciones de copia entre Host y Device. Esta es una abstracción del Host, por lo que no se puede utilizar en un kernel. */ #include "pcuditas/gpu/macros.cu" #include "pcuditas/gpu/kernels.cu" #include "pcuditas/gpu/gpu_object.cu" #include <assert.h> template< typename T > class gpu_array { T *_gpu_pointer; T *_cpu_pointer; public: size_t size; using element_type = T; gpu_array(size_t n): size(n) { assert(size > 0); // <-- Allocate and initialize on GPU CUDA_CALL(cudaMalloc(&_gpu_pointer, n * sizeof(T))); init_array_kernel<<<128,32>>>(_gpu_pointer, n); // <-- Allocate and initialize on CPU _cpu_pointer = (T *) malloc(n * sizeof(T)); for (int i=0; i<n; i++) { new (&_cpu_pointer[i]) T(); } } template <class InitializerT> gpu_array(size_t n, InitializerT init_fn) : gpu_array(n) { /* Instantiate with a function to initialize each value. Example: // Initialize to {0, 1, 2, 3, 4...9} auto array = gpu_array<int>(10, []__device__ (int &el, int i) { el = i; }); */ // Apply the initialization function to each element (*this).for_each(init_fn); } T *gpu_pointer() const { return _gpu_pointer; } gpu_array<T>& to_cpu() { CUDA_CALL(cudaMemcpy( _cpu_pointer, _gpu_pointer, size*sizeof(T), cudaMemcpyDeviceToHost )); return *this; } T *cpu_pointer() const { return _cpu_pointer; } T operator[](size_t idx) { return _cpu_pointer[idx]; } template <class TransformedT, class TransformationT> gpu_array<TransformedT> transform( TransformationT gpu_fn, int n_blocks = 1024, int n_threads = 32 ) { /* Create a new array with the transformed elements. Example: // Create gpu_array "array" with the numbers 0 to 9 auto array = gpu_array<int>(10, []__device__ (int &el, int i) { el = i; }); // Transform to pairs of the number and it's squares auto squares = array.transform<int2>( [] __device__ (int2 el, int idx) { return make_int2(el, el*el); }); */ auto transformed = gpu_array<TransformedT>{this->size}; transform_kernel<<<n_blocks, n_threads>>>( _gpu_pointer, size, transformed.gpu_pointer(), gpu_fn ); return transformed; } template <class FunctionT> gpu_array<T>& for_each( FunctionT gpu_fn, int n_blocks = 1024, int n_threads = 32 ) {/* Apply the function in-place for each element of the array. Example: // Create gpu_array "array" with the numbers 0 to 9 auto array = gpu_array<int>(10, []__device__ (int &el, int i) { el = i; }); // Make a linear transformation int a = 12; int b = 34; array.for_each( [a,b] __device__ (int2 &el, int idx) { el = a*el + b; }); */ for_each_kernel<<<n_blocks, n_threads>>>(_gpu_pointer, size, gpu_fn); return *this; } template <class ReductionT> gpu_object<T> reduce( ReductionT reduce_fn, int n_blocks = 128, int threads_per_block = 32 /* <-- Must be a power of 2! */ ) { /* Perform a reduction of the elements of the array using the provided function. Example: // Create gpu_array "array" with the numbers 0 to 9 auto array = gpu_array<int>(10, []__device__ (int &el, int i) { el = i; }); // Multipliy the elements in GPU gpu_object<int> product = array.reduce( []__device__ (int a, int b) { return a * b }); */ unsigned int shared_memory_size = threads_per_block * sizeof(T); auto block_partials = gpu_array<T>(n_blocks); reduce_2step_kernel<<<n_blocks, threads_per_block, shared_memory_size>>>( _gpu_pointer, size, block_partials.gpu_pointer(), reduce_fn ); auto out = gpu_object<T>(); reduce_2step_kernel<<<1, threads_per_block, shared_memory_size>>>( block_partials.gpu_pointer(), n_blocks, out.gpu_pointer(), reduce_fn ); return out; } // Iterator protocol T* begin() { return _cpu_pointer; } T* end() { return _cpu_pointer + size; } gpu_array<T> copy() { auto copied = gpu_array<T>(this->size); // Copy in GPU copied.for_each( [old_one=this->gpu_pointer()] __device__ (T &new_el, int i) { new_el = old_one[i]; }); // Copy in CPU for(int i=0; i<size; i++) { copied[i] = (*this)[i]; } return copied; } ~gpu_array() { free(_cpu_pointer); CUDA_CALL(cudaFree(_gpu_pointer)); } }; /* ----------------------------------------------------------------------- The following is executable documentation as described in Kevlin Henney's talk "Structure and Interpretation of Test Cases" (https://youtu.be/tWn8RA_DEic) written using the doctest framework (https://github.com/onqtam/doctest). Run with `make test`. */ #ifdef __TESTING__ #include "tests/doctest.h" #include <typeinfo> // operator typeid #include <assert.h> template<class T> struct pair { T first; T second; }; TEST_SUITE("GPU Array specification") { SCENARIO("GPU Array initialization") { GIVEN("A size and the type of the elements") { int size = 10; using element_t = int; THEN("A GPU array can be initialized without failure") { auto array = gpu_array<element_t>(size); using array_element_type = decltype(array)::element_type; CHECK(typeid(array_element_type) == typeid(element_t)); CHECK(array.size == size); } } } SCENARIO("GPU Array for_each") { GIVEN("A GPU array") { int size = 10; using element_t = int; auto array = gpu_array<element_t>(size); WHEN("It's elements are modified with for_each") { array.for_each( [] __device__ (element_t &el, int idx) { el = idx * idx; }); THEN("The values on GPU are changed accordingly") { array.for_each( // <-- check on GPU [] __device__ (element_t current_val, int idx){ assert(current_val == idx*idx); }); array.to_cpu(); // <-- check on CPU for(int i=0; i<array.size; i++){ CHECK(array[i] == i*i); } } } } } SCENARIO("GPU Array transformation") { GIVEN("A GPU array") { int size = 10; using element_t = int; // Initialize to {0, 1, 2, 3, 4...9} auto array = gpu_array<element_t>(size, []__device__ (element_t &el, int i) { el = i; }); WHEN("A new array is obtained as a transformation of it") { auto squares = array.transform<pair<element_t>>( [] __device__ (element_t &el, int idx) { return pair<element_t>{el, el*el}; }); THEN("The values on GPU are changed accordingly") { squares.for_each( // <-- check on GPU [] __device__ (pair<element_t> p, int idx) { assert(p.first == idx); assert(p.second == idx*idx); }); squares.to_cpu(); // <-- check on CPU for(int i=0; i<squares.size; i++){ CHECK(squares[i].first == i); CHECK(squares[i].second == i*i); } } } } } SCENARIO("GPU Array reduction") { GIVEN("A GPU array with arbitrary elements") { int size = 1000; using element_t = int; auto array = gpu_array<element_t>(size); array.for_each( [] __device__ (element_t &el, int i) { el = i+1; }); WHEN("A reducion operation is applied on it") { auto sum_gpu = array.reduce( [] __device__ (element_t reduced, element_t el) { return reduced + el; }).to_cpu(); THEN("The reduction on CPU yields the same result") { array.to_cpu(); // <-- check on CPU auto sum_cpu = array[0]; for(int i=1; i<array.size; i++){ sum_cpu += array[i]; } CHECK(sum_cpu == sum_gpu); } } } SUBCASE("Reduction on a very large array") { auto n = 500000; // Initialize to {0, 1, 2, 3, 4...n} auto nums = gpu_array<int>(n, [] __device__ (int &el, int idx) { el = idx; }); auto addition = [] __device__ (int a, int b) { return a + b; }; auto sum = nums.reduce(addition).to_cpu(); CHECK(sum == n*(n-1)/2); } } } #endif
dae6445a48a8ca3e273824599ee81b2b6cf9790f.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <algorithm> #include <iostream> #include <metrics/contingencyMatrix.cuh> #include <metrics/mutual_info_score.cuh> #include <raft/mr/device/allocator.hpp> #include <random> #include "test_utils.h" namespace MLCommon { namespace Metrics { // parameter structure definition struct mutualInfoParam { int nElements; int lowerLabelRange; int upperLabelRange; bool sameArrays; double tolerance; }; // test fixture class template <typename T> class mutualInfoTest : public ::testing::TestWithParam<mutualInfoParam> { protected: // the constructor void SetUp() override { // getting the parameters params = ::testing::TestWithParam<mutualInfoParam>::GetParam(); nElements = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; // generating random value test input std::vector<int> arr1(nElements, 0); std::vector<int> arr2(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); if (params.sameArrays) { arr2 = arr1; } else { std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); }); } // generating the golden output // calculating the contingency matrix int numUniqueClasses = upperLabelRange - lowerLabelRange + 1; size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int); int* hGoldenOutput = (int*)malloc(sizeOfMat); memset(hGoldenOutput, 0, sizeOfMat); int i, j; for (i = 0; i < nElements; i++) { int row = arr1[i] - lowerLabelRange; int column = arr2[i] - lowerLabelRange; hGoldenOutput[row * numUniqueClasses + column] += 1; } int* a = (int*)malloc(numUniqueClasses * sizeof(int)); int* b = (int*)malloc(numUniqueClasses * sizeof(int)); memset(a, 0, numUniqueClasses * sizeof(int)); memset(b, 0, numUniqueClasses * sizeof(int)); // and also the reducing contingency matrix along row and column for (i = 0; i < numUniqueClasses; ++i) { for (j = 0; j < numUniqueClasses; ++j) { a[i] += hGoldenOutput[i * numUniqueClasses + j]; b[i] += hGoldenOutput[j * numUniqueClasses + i]; } } // calculating the truth mutual information for (int i = 0; i < numUniqueClasses; ++i) { for (int j = 0; j < numUniqueClasses; ++j) { if (a[i] * b[j] != 0 && hGoldenOutput[i * numUniqueClasses + j] != 0) { truthmutualInfo += (double)(hGoldenOutput[i * numUniqueClasses + j]) * (log((double)(double(nElements) * hGoldenOutput[i * numUniqueClasses + j])) - log((double)(a[i] * b[j]))); } } } truthmutualInfo /= nElements; // allocating and initializing memory to the GPU CUDA_CHECK(hipStreamCreate(&stream)); raft::allocate(firstClusterArray, nElements, true); raft::allocate(secondClusterArray, nElements, true); raft::update_device(firstClusterArray, &arr1[0], (int)nElements, stream); raft::update_device(secondClusterArray, &arr2[0], (int)nElements, stream); std::shared_ptr<raft::mr::device::allocator> allocator(new raft::mr::device::default_allocator); // calling the mutualInfo CUDA implementation computedmutualInfo = MLCommon::Metrics::mutual_info_score(firstClusterArray, secondClusterArray, nElements, lowerLabelRange, upperLabelRange, allocator, stream); } // the destructor void TearDown() override { CUDA_CHECK(hipFree(firstClusterArray)); CUDA_CHECK(hipFree(secondClusterArray)); CUDA_CHECK(hipStreamDestroy(stream)); } // declaring the data values mutualInfoParam params; T lowerLabelRange, upperLabelRange; T* firstClusterArray = nullptr; T* secondClusterArray = nullptr; int nElements = 0; double truthmutualInfo = 0; double computedmutualInfo = 0; hipStream_t stream; }; // setting test parameter values const std::vector<mutualInfoParam> inputs = {{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001}, {100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001}, {198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001}, {199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001}, {100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001}, {198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}}; // writing the test suite typedef mutualInfoTest<int> mutualInfoTestClass; TEST_P(mutualInfoTestClass, Result) { ASSERT_NEAR(computedmutualInfo, truthmutualInfo, params.tolerance); } INSTANTIATE_TEST_CASE_P(mutualInfo, mutualInfoTestClass, ::testing::ValuesIn(inputs)); } // end namespace Metrics } // end namespace MLCommon
dae6445a48a8ca3e273824599ee81b2b6cf9790f.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <algorithm> #include <iostream> #include <metrics/contingencyMatrix.cuh> #include <metrics/mutual_info_score.cuh> #include <raft/mr/device/allocator.hpp> #include <random> #include "test_utils.h" namespace MLCommon { namespace Metrics { // parameter structure definition struct mutualInfoParam { int nElements; int lowerLabelRange; int upperLabelRange; bool sameArrays; double tolerance; }; // test fixture class template <typename T> class mutualInfoTest : public ::testing::TestWithParam<mutualInfoParam> { protected: // the constructor void SetUp() override { // getting the parameters params = ::testing::TestWithParam<mutualInfoParam>::GetParam(); nElements = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; // generating random value test input std::vector<int> arr1(nElements, 0); std::vector<int> arr2(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); if (params.sameArrays) { arr2 = arr1; } else { std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); }); } // generating the golden output // calculating the contingency matrix int numUniqueClasses = upperLabelRange - lowerLabelRange + 1; size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int); int* hGoldenOutput = (int*)malloc(sizeOfMat); memset(hGoldenOutput, 0, sizeOfMat); int i, j; for (i = 0; i < nElements; i++) { int row = arr1[i] - lowerLabelRange; int column = arr2[i] - lowerLabelRange; hGoldenOutput[row * numUniqueClasses + column] += 1; } int* a = (int*)malloc(numUniqueClasses * sizeof(int)); int* b = (int*)malloc(numUniqueClasses * sizeof(int)); memset(a, 0, numUniqueClasses * sizeof(int)); memset(b, 0, numUniqueClasses * sizeof(int)); // and also the reducing contingency matrix along row and column for (i = 0; i < numUniqueClasses; ++i) { for (j = 0; j < numUniqueClasses; ++j) { a[i] += hGoldenOutput[i * numUniqueClasses + j]; b[i] += hGoldenOutput[j * numUniqueClasses + i]; } } // calculating the truth mutual information for (int i = 0; i < numUniqueClasses; ++i) { for (int j = 0; j < numUniqueClasses; ++j) { if (a[i] * b[j] != 0 && hGoldenOutput[i * numUniqueClasses + j] != 0) { truthmutualInfo += (double)(hGoldenOutput[i * numUniqueClasses + j]) * (log((double)(double(nElements) * hGoldenOutput[i * numUniqueClasses + j])) - log((double)(a[i] * b[j]))); } } } truthmutualInfo /= nElements; // allocating and initializing memory to the GPU CUDA_CHECK(cudaStreamCreate(&stream)); raft::allocate(firstClusterArray, nElements, true); raft::allocate(secondClusterArray, nElements, true); raft::update_device(firstClusterArray, &arr1[0], (int)nElements, stream); raft::update_device(secondClusterArray, &arr2[0], (int)nElements, stream); std::shared_ptr<raft::mr::device::allocator> allocator(new raft::mr::device::default_allocator); // calling the mutualInfo CUDA implementation computedmutualInfo = MLCommon::Metrics::mutual_info_score(firstClusterArray, secondClusterArray, nElements, lowerLabelRange, upperLabelRange, allocator, stream); } // the destructor void TearDown() override { CUDA_CHECK(cudaFree(firstClusterArray)); CUDA_CHECK(cudaFree(secondClusterArray)); CUDA_CHECK(cudaStreamDestroy(stream)); } // declaring the data values mutualInfoParam params; T lowerLabelRange, upperLabelRange; T* firstClusterArray = nullptr; T* secondClusterArray = nullptr; int nElements = 0; double truthmutualInfo = 0; double computedmutualInfo = 0; cudaStream_t stream; }; // setting test parameter values const std::vector<mutualInfoParam> inputs = {{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001}, {100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001}, {198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001}, {199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001}, {100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001}, {198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}}; // writing the test suite typedef mutualInfoTest<int> mutualInfoTestClass; TEST_P(mutualInfoTestClass, Result) { ASSERT_NEAR(computedmutualInfo, truthmutualInfo, params.tolerance); } INSTANTIATE_TEST_CASE_P(mutualInfo, mutualInfoTestClass, ::testing::ValuesIn(inputs)); } // end namespace Metrics } // end namespace MLCommon
b30977e76b1793b04b73f4845361e1ccf29cdf7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void prefSumBinTreeCuda(float *a, int n) { __shared__ float shm[CUDA_THREAD_NUM]; int tid=threadIdx.x; int dot=2;//depth of tree if((tid+1)%dot==0) { shm[tid]=a[tid]+a[tid-1]; } dot*=2; __syncthreads(); while(dot<=n) { if((tid+1)%dot==0) { shm[tid]=shm[tid]+shm[tid-dot/2]; } dot*=2; __syncthreads(); } dot/=2; while(dot>2) { if((tid+1)%dot==0) { if((tid+1)/dot!=1) { shm[tid-dot/2]=shm[tid-dot/2]+shm[tid-dot]; } } dot/=2; __syncthreads(); } if((tid+1)%2==0) { a[tid]=shm[tid]; } else if(tid>0) { a[tid]=a[tid]+shm[tid-1]; } }
b30977e76b1793b04b73f4845361e1ccf29cdf7f.cu
#include "includes.h" __global__ void prefSumBinTreeCuda(float *a, int n) { __shared__ float shm[CUDA_THREAD_NUM]; int tid=threadIdx.x; int dot=2;//depth of tree if((tid+1)%dot==0) { shm[tid]=a[tid]+a[tid-1]; } dot*=2; __syncthreads(); while(dot<=n) { if((tid+1)%dot==0) { shm[tid]=shm[tid]+shm[tid-dot/2]; } dot*=2; __syncthreads(); } dot/=2; while(dot>2) { if((tid+1)%dot==0) { if((tid+1)/dot!=1) { shm[tid-dot/2]=shm[tid-dot/2]+shm[tid-dot]; } } dot/=2; __syncthreads(); } if((tid+1)%2==0) { a[tid]=shm[tid]; } else if(tid>0) { a[tid]=a[tid]+shm[tid-1]; } }
579780736dfd0b26ba538c9b18554f5f20d367f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void initializeBiasKernel_softmax(float* b, int size){ int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < size){ b[index] = 0.0; } }
579780736dfd0b26ba538c9b18554f5f20d367f7.cu
#include "includes.h" __global__ void initializeBiasKernel_softmax(float* b, int size){ int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < size){ b[index] = 0.0; } }
9e29880d5c51076c579eea6dcc0afb2d82976b48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/dropout_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void DropoutForward(const int n, const Dtype* in, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index] > threshold) * scale; } } template <typename Dtype> void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); if (this->phase_ == TRAIN) { unsigned int* mask = static_cast<unsigned int*>(rand_vec_.mutable_gpu_data()); caffe_gpu_rng_uniform(count, mask); // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, mask, uint_thres_, scale_, top_data); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(count, bottom_data, top_data); } #ifdef SAMPLE_FLOATS if(this->phase_ == TRAIN && this->sample_iter_) { sample_blob(top[0]->gpu_data(), top[0]->count(), this->activation_exp, this->activation_frac, this->activation, this->activation_vector, SAMPLING_FREQ); } #endif } template <typename Dtype> __global__ void DropoutBackward(const int n, const Dtype* in_diff, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); } } template <typename Dtype> void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->phase_ == TRAIN) { const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_.gpu_data()); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, uint_thres_, scale_, bottom_diff); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); } #ifdef SAMPLE_FLOATS if (this->phase_ == TRAIN && this->sample_iter_) { sample_blob(bottom[0]->gpu_diff(), bottom[0]->count(), this->activation_gradient_exp, this->activation_gradient_frac, this->activation_gradient, this->activation_gradient_vector, SAMPLING_FREQ); } #endif } } INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); } // namespace caffe
9e29880d5c51076c579eea6dcc0afb2d82976b48.cu
#include <vector> #include "caffe/layers/dropout_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void DropoutForward(const int n, const Dtype* in, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index] > threshold) * scale; } } template <typename Dtype> void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); if (this->phase_ == TRAIN) { unsigned int* mask = static_cast<unsigned int*>(rand_vec_.mutable_gpu_data()); caffe_gpu_rng_uniform(count, mask); // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) DropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, mask, uint_thres_, scale_, top_data); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(count, bottom_data, top_data); } #ifdef SAMPLE_FLOATS if(this->phase_ == TRAIN && this->sample_iter_) { sample_blob(top[0]->gpu_data(), top[0]->count(), this->activation_exp, this->activation_frac, this->activation, this->activation_vector, SAMPLING_FREQ); } #endif } template <typename Dtype> __global__ void DropoutBackward(const int n, const Dtype* in_diff, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); } } template <typename Dtype> void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->phase_ == TRAIN) { const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_.gpu_data()); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, mask, uint_thres_, scale_, bottom_diff); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); } #ifdef SAMPLE_FLOATS if (this->phase_ == TRAIN && this->sample_iter_) { sample_blob(bottom[0]->gpu_diff(), bottom[0]->count(), this->activation_gradient_exp, this->activation_gradient_frac, this->activation_gradient, this->activation_gradient_vector, SAMPLING_FREQ); } #endif } } INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); } // namespace caffe
0741202bf653a86b50b6f802299cdc6900382e0c.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <algorithm> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" // ------------------------------------------------- // Forward // ------------------------------------------------- template <int N=6, int M=16, int MAX_NODE_UNIT> __global__ void kernal_fp32_MicroMlp_Forward ( float const *x_buf, float *y_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float const *output_W, float const *output_b, int node_size, int frame_size, int frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; // __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float b1[MAX_NODE_UNIT]; __shared__ float const *x_ptr[N][MAX_NODE_UNIT]; float *y_ptr; if ( node < node_size ) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } if ( id == 0 ) { b1[node_id] = output_b[node]; } // for ( int i = 0; i < N; ++i ) { int in_idx = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[frame_stride * in_idx]; } // y_ptr = &y_buf[frame_stride * node]; } __syncthreads(); // 1SM1node for ( int frame = id; frame < frame_size; frame += id_step ) { if ( node < node_size ) { // float x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][frame]; } // float sig1 = b1[node_id]; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { sig0 += x[j] * W0[i][j][node_id]; } sig0 = fmaxf(sig0, 0); // ReLU sig1 += sig0 * W1[i][node_id]; } // y_ptr[frame] = sig1; } __syncthreads(); } } template <int N=6, int M=16> int bbcu_fp32_MicroMlp_Forward ( float const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int frame_stride, hipStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 512; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = ::min(block.x, MAX_FRAME_UNIT); block.y = ::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); hipLaunchKernelGGL(( kernal_fp32_MicroMlp_Forward<N, M, MAX_NODE_UNIT>), dim3(grid), dim3(block), 0, streamId, dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } int bbcu_fp32_MicroMlp6x16_Forward ( float const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int frame_stride, hipStream_t streamId ) { return bbcu_fp32_MicroMlp_Forward<6, 16>( dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, input_node_size, output_node_size, frame_size, frame_stride, streamId ); } ///////////////////// // bit template <int N=6, int M=16, int MAX_NODE_UNIT=16> __global__ void kernal_bit_fp32_MicroMlp_Forward( int const *x_buf, float *y_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float const *output_W, float const *output_b, int node_size, int frame_size, int input_frame_stride, int output_frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; // __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float b1[MAX_NODE_UNIT]; __shared__ int const *x_ptr[N][MAX_NODE_UNIT]; if ( node < node_size) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } if ( id == 0 ) { b1[node_id] = output_b[node]; } // for ( int i = 0; i < N; ++i ) { int input_node = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[input_frame_stride * input_node]; } } __syncthreads(); if ( node < node_size) { // float *y_ptr = &y_buf[output_frame_stride * node]; // 1SM1node for ( int frame = id; frame < frame_size; frame += id_step ) { int bit = (1 << (frame & 0x1f)); int unit = (frame >> 5); // int x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][unit]; } // float sig1 = b1[node_id]; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { if ( x[j] & bit ) { sig0 += W0[i][j][node_id]; } } sig0 = fmaxf(sig0, 0); // ReLU sig1 += sig0 * W1[i][node_id]; } // y_ptr[frame] = sig1; } } } template <int N=6, int M=16> int bbcu_bit_fp32_MicroMlp_Forward ( int const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int input_frame_stride, int output_frame_stride, hipStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 512; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = ::min(block.x, MAX_FRAME_UNIT); block.y = ::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); hipLaunchKernelGGL(( kernal_bit_fp32_MicroMlp_Forward<N, M, MAX_NODE_UNIT>), dim3(grid), dim3(block), 0, streamId, dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, output_node_size, frame_size, input_frame_stride, output_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } int bbcu_bit_fp32_MicroMlp6x16_Forward ( int const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int input_frame_stride, int output_frame_stride, hipStream_t streamId ) { return bbcu_bit_fp32_MicroMlp_Forward<6, 16> ( dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, input_node_size, output_node_size, frame_size, input_frame_stride, output_frame_stride, streamId ); } // ------------------------------------------------- // Backward // ------------------------------------------------- __device__ __forceinline__ float device_fp32_LocalSum(float v, float *buf) { buf[threadIdx.x] = v; __syncthreads(); // int comb = 1; while (comb < blockDim.x) { int next = comb * 2; int mask = next - 1; if ((threadIdx.x & mask) == 0) { buf[threadIdx.x] += buf[threadIdx.x + comb]; } comb = next; __syncthreads(); } float sum = buf[0]; __syncthreads(); return sum; } // kernel template <int N=6, int M=16, int MAX_FRAME_UNIT=32, int MAX_NODE_UNIT=8> __global__ void kernal_fp32_MicroMlp_Backward ( float const *x_buf, float const *dy_buf, float *dx_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float *hidden_dW, float *hidden_db, float const *output_W, float const *output_b, float *output_dW, float *output_db, int node_size, int frame_size, int frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; __shared__ float sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT]; __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float dW0_prev[M][N][MAX_NODE_UNIT]; __shared__ float db0_prev[M][MAX_NODE_UNIT]; __shared__ float dW1_prev[M][MAX_NODE_UNIT]; __shared__ float db1_prev[MAX_NODE_UNIT]; __shared__ float const *x_ptr[N][MAX_NODE_UNIT]; float const *dy_ptr; if ( node < node_size ) { // for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } // for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { dW0_prev[i][j][node_id] = hidden_dW[(node * M + i) * N + j]; } db0_prev[i][node_id] = hidden_db[node * M + i]; dW1_prev[i][node_id] = output_dW[node * M + i]; } if ( id == 0 ) { db1_prev[node_id] = output_db[node]; } // for ( int i = 0; i < N; ++i ) { int input_node = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[frame_stride * input_node]; } dy_ptr = &dy_buf[frame_stride * node]; } __syncthreads(); // float dW0[M][N]; float db0[M]; float dW1[M]; float db1; for ( int i = 0; i < M; ++ i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = 0; } } for ( int i = 0; i < M; ++i ) { db0[i] = 0; dW1[i] = 0; } db1 = 0; if ( node < node_size ) { // 1SM1node for ( int frame = id; frame < frame_size; frame += id_step ) { // float x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][frame]; } // 12 float grad1 = dy_ptr[frame]; float grad0[M]; db1 += grad1; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { sig0 += x[j] * W0[i][j][node_id]; } sig0 = fmaxf(sig0, 0); // ReLU dW1[i] += grad1 * sig0; if ( sig0 > 0 ) { // ReLU grad0[i] = grad1 * W1[i][node_id]; } else { grad0[i] = 0; } } // 1 float *dx_ptr = &dx_buf[frame_stride * N * node]; float dx[N]; for ( int i = 0; i < N; ++i ) { dx[i] = 0; // dx_ptr[frame_stride * i + frame]; } for ( int i = 0; i < M; ++i ) { db0[i] += grad0[i]; for ( int j = 0; j < N; ++j ) { dW0[i][j] += grad0[i] * x[j]; dx[j] += grad0[i] * W0[i][j][node_id]; } } // for ( int i = 0; i < N; ++i ) { dx_ptr[frame_stride * i + frame] = dx[i]; } } } __syncthreads(); // for ( int i = 0; i < M; ++i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = device_fp32_LocalSum(dW0[i][j], sbuf[node_id]); } db0[i] = device_fp32_LocalSum(db0[i], sbuf[node_id]); dW1[i] = device_fp32_LocalSum(dW1[i], sbuf[node_id]); } db1 = device_fp32_LocalSum(db1, sbuf[node_id]); // if ( node < node_size ) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { hidden_dW[(node * M + i) * N + j] = dW0[i][j] + dW0_prev[i][j][node_id]; } hidden_db[node * M + i] = db0[i] + db0_prev[i][node_id]; output_dW[node * M + i] = dW1[i] + dW1_prev[i][node_id]; } if (id == 0) { output_db[node] = db1 + db1_prev[node_id]; } } __syncthreads(); } template <int N=6> __global__ void kernal_fp32_MicroMlp_BackwardMarge ( float const *src_buf, float *dst_buf, int const *input_index, int node_size, int frame_size, int frame_stride ) { int frame = blockDim.x * blockIdx.x + threadIdx.x; for ( int node = 0; node < node_size; ++node ) { if ( frame < frame_size ) { for ( int n = 0; n < N; ++n ) { int in_idx = input_index[node*N + n]; float* dst_buf_ptr = &dst_buf[frame_stride * in_idx]; float prev_data = dst_buf_ptr[frame]; const float* src_buf_ptr = &src_buf[(N * node + n) * frame_stride]; dst_buf_ptr[frame] = prev_data + src_buf_ptr[frame]; } } __syncthreads(); } } template <int N=6, int M=16> int bbcu_fp32_MicroMlp_Backward ( float const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int frame_stride, hipStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); { unsigned int const THREAD_SIZE = 256; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = ::min(block.x, MAX_FRAME_UNIT); block.y = ::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); hipLaunchKernelGGL(( kernal_fp32_MicroMlp_Backward<N, M, MAX_FRAME_UNIT, MAX_NODE_UNIT>), dim3(grid), dim3(block), 0, streamId, dev_x_buf, dev_dy_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } { BB_CUDA_SAFE_CALL(hipMemset(dev_dx_buf, 0, input_node_size * frame_stride * sizeof(float))); int block_x = 1024; while ( block_x / 2 >= frame_size ) { block_x /= 2; } dim3 grid((frame_size + (block_x - 1)) / block_x); dim3 block(block_x); hipLaunchKernelGGL(( kernal_fp32_MicroMlp_BackwardMarge<N>), dim3(grid), dim3(block), 0, 0, dev_dx_tmp, dev_dx_buf, dev_input_index, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } return 0; } BBCU_DLL_EXPORT int bbcu_fp32_MicroMlp6x16_Backward( float const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int frame_stride, hipStream_t streamId ) { return bbcu_fp32_MicroMlp_Backward<6, 16>( dev_x_buf, dev_dy_buf, dev_dx_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, input_node_size, output_node_size, frame_size, frame_stride, streamId ); } /////////////////////////////// // kernel template <int N=6, int M=16, int MAX_FRAME_UNIT=32, int MAX_NODE_UNIT=8> __global__ void kernal_bit_fp32_MicroMlp_Backward ( int const *x_buf, float const *dy_buf, float *dx_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float *hidden_dW, float *hidden_db, float const *output_W, float const *output_b, float *output_dW, float *output_db, int node_size, int frame_size, int x_frame_stride, int frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; __shared__ float sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT]; __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float dW0_prev[M][N][MAX_NODE_UNIT]; __shared__ float db0_prev[M][MAX_NODE_UNIT]; __shared__ float dW1_prev[M][MAX_NODE_UNIT]; __shared__ float db1_prev[MAX_NODE_UNIT]; __shared__ int const *x_ptr[N][MAX_NODE_UNIT]; float const *dy_ptr; if ( node < node_size ) { // for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } // for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { dW0_prev[i][j][node_id] = hidden_dW[(node * M + i) * N + j]; } db0_prev[i][node_id] = hidden_db[node * M + i]; dW1_prev[i][node_id] = output_dW[node * M + i]; } if ( id == 0 ) { db1_prev[node_id] = output_db[node]; } // for ( int i = 0; i < N; ++i ) { int input_node = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[x_frame_stride * input_node]; } dy_ptr = &dy_buf[frame_stride * node]; } __syncthreads(); // float dW0[M][N]; float db0[M]; float dW1[M]; float db1; for ( int i = 0; i < M; ++ i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = 0; } } for ( int i = 0; i < M; ++i ) { db0[i] = 0; dW1[i] = 0; } db1 = 0; if ( node < node_size ) { // 1SM1node for ( int frame = id; frame < frame_size; frame += id_step ) { int bit = (1 << (frame & 0x1f)); int unit = (frame >> 5); // int x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][unit]; } // 12 float grad1 = dy_ptr[frame]; float grad0[M]; db1 += grad1; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { if ( x[j] & bit ) { sig0 += W0[i][j][node_id]; } } sig0 = fmaxf(sig0, 0); // ReLU dW1[i] += grad1 * sig0; if ( sig0 > 0 ) { // ReLU grad0[i] = grad1 * W1[i][node_id]; } else { grad0[i] = 0; } } // 1 float *dx_ptr = &dx_buf[frame_stride * N * node]; float dx[N]; for ( int i = 0; i < N; ++i ) { dx[i] = 0; // dx_ptr[frame_stride * i + frame]; } for ( int i = 0; i < M; ++i ) { db0[i] += grad0[i]; for ( int j = 0; j < N; ++j ) { if ( x[j] & bit ) { dW0[i][j] += grad0[i]; } dx[j] += grad0[i] * W0[i][j][node_id]; } } // for ( int i = 0; i < N; ++i ) { dx_ptr[frame_stride * i + frame] = dx[i]; } } } __syncthreads(); // for ( int i = 0; i < M; ++i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = device_fp32_LocalSum(dW0[i][j], sbuf[node_id]); } db0[i] = device_fp32_LocalSum(db0[i], sbuf[node_id]); dW1[i] = device_fp32_LocalSum(dW1[i], sbuf[node_id]); } db1 = device_fp32_LocalSum(db1, sbuf[node_id]); // if ( node < node_size ) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { hidden_dW[(node * M + i) * N + j] = dW0[i][j] + dW0_prev[i][j][node_id]; } hidden_db[node * M + i] = db0[i] + db0_prev[i][node_id]; output_dW[node * M + i] = dW1[i] + dW1_prev[i][node_id]; } if (id == 0) { output_db[node] = db1 + db1_prev[node_id]; } } __syncthreads(); } template <int N=6, int M=16> int bbcu_bit_fp32_MicroMlp_Backward ( int const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int x_frame_stride, int frame_stride, hipStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); { unsigned int const THREAD_SIZE = 256; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = ::min(block.x, MAX_FRAME_UNIT); block.y = ::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); hipLaunchKernelGGL(( kernal_bit_fp32_MicroMlp_Backward<N, M, MAX_FRAME_UNIT, MAX_NODE_UNIT>), dim3(grid), dim3(block), 0, streamId, dev_x_buf, dev_dy_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, output_node_size, frame_size, x_frame_stride, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } { BB_CUDA_SAFE_CALL(hipMemset(dev_dx_buf, 0, input_node_size * frame_stride * sizeof(float))); int block_x = 1024; while ( block_x / 2 >= frame_size ) { block_x /= 2; } dim3 grid((frame_size + (block_x - 1)) / block_x); dim3 block(block_x); hipLaunchKernelGGL(( kernal_fp32_MicroMlp_BackwardMarge<N>), dim3(grid), dim3(block), 0, 0, dev_dx_tmp, dev_dx_buf, dev_input_index, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } return 0; } BBCU_DLL_EXPORT int bbcu_bit_fp32_MicroMlp6x16_Backward ( int const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int x_frame_stride, int frame_stride, hipStream_t streamId ) { return bbcu_bit_fp32_MicroMlp_Backward<6, 16> ( dev_x_buf, dev_dy_buf, dev_dx_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, input_node_size, output_node_size, frame_size, x_frame_stride, frame_stride, streamId ); } // end of file
0741202bf653a86b50b6f802299cdc6900382e0c.cu
#include <iostream> #include <algorithm> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" // ------------------------------------------------- // Forward // ------------------------------------------------- template <int N=6, int M=16, int MAX_NODE_UNIT> __global__ void kernal_fp32_MicroMlp_Forward ( float const *x_buf, float *y_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float const *output_W, float const *output_b, int node_size, int frame_size, int frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; // 係数読み込み __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float b1[MAX_NODE_UNIT]; __shared__ float const *x_ptr[N][MAX_NODE_UNIT]; float *y_ptr; if ( node < node_size ) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } if ( id == 0 ) { b1[node_id] = output_b[node]; } // 読み込みアドレス for ( int i = 0; i < N; ++i ) { int in_idx = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[frame_stride * in_idx]; } // 書き込みアドレス y_ptr = &y_buf[frame_stride * node]; } __syncthreads(); // 1つのSMで1nodeを全フレーム処理 for ( int frame = id; frame < frame_size; frame += id_step ) { if ( node < node_size ) { // 入力データ読み込み float x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][frame]; } // 計算 float sig1 = b1[node_id]; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { sig0 += x[j] * W0[i][j][node_id]; } sig0 = fmaxf(sig0, 0); // ReLU sig1 += sig0 * W1[i][node_id]; } // 出力 y_ptr[frame] = sig1; } __syncthreads(); } } template <int N=6, int M=16> int bbcu_fp32_MicroMlp_Forward ( float const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int frame_stride, cudaStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 512; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = std::min(block.x, MAX_FRAME_UNIT); block.y = std::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); kernal_fp32_MicroMlp_Forward<N, M, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>( dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } int bbcu_fp32_MicroMlp6x16_Forward ( float const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { return bbcu_fp32_MicroMlp_Forward<6, 16>( dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, input_node_size, output_node_size, frame_size, frame_stride, streamId ); } ///////////////////// // bit入力版 template <int N=6, int M=16, int MAX_NODE_UNIT=16> __global__ void kernal_bit_fp32_MicroMlp_Forward( int const *x_buf, float *y_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float const *output_W, float const *output_b, int node_size, int frame_size, int input_frame_stride, int output_frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; // 係数読み込み __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float b1[MAX_NODE_UNIT]; __shared__ int const *x_ptr[N][MAX_NODE_UNIT]; if ( node < node_size) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } if ( id == 0 ) { b1[node_id] = output_b[node]; } // 読み込みアドレス for ( int i = 0; i < N; ++i ) { int input_node = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[input_frame_stride * input_node]; } } __syncthreads(); if ( node < node_size) { // 書き込みアドレス float *y_ptr = &y_buf[output_frame_stride * node]; // 1つのSMで1nodeを全フレーム処理 for ( int frame = id; frame < frame_size; frame += id_step ) { int bit = (1 << (frame & 0x1f)); int unit = (frame >> 5); // 入力データ読み込み int x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][unit]; } // 計算 float sig1 = b1[node_id]; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { if ( x[j] & bit ) { sig0 += W0[i][j][node_id]; } } sig0 = fmaxf(sig0, 0); // ReLU sig1 += sig0 * W1[i][node_id]; } // 出力 y_ptr[frame] = sig1; } } } template <int N=6, int M=16> int bbcu_bit_fp32_MicroMlp_Forward ( int const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int input_frame_stride, int output_frame_stride, cudaStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 512; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = std::min(block.x, MAX_FRAME_UNIT); block.y = std::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); kernal_bit_fp32_MicroMlp_Forward<N, M, MAX_NODE_UNIT><<<grid, block, 0, streamId>>> ( dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, output_node_size, frame_size, input_frame_stride, output_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } int bbcu_bit_fp32_MicroMlp6x16_Forward ( int const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int input_frame_stride, int output_frame_stride, cudaStream_t streamId ) { return bbcu_bit_fp32_MicroMlp_Forward<6, 16> ( dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, input_node_size, output_node_size, frame_size, input_frame_stride, output_frame_stride, streamId ); } // ------------------------------------------------- // Backward // ------------------------------------------------- __device__ __forceinline__ float device_fp32_LocalSum(float v, float *buf) { buf[threadIdx.x] = v; __syncthreads(); // スレッド間集計 int comb = 1; while (comb < blockDim.x) { int next = comb * 2; int mask = next - 1; if ((threadIdx.x & mask) == 0) { buf[threadIdx.x] += buf[threadIdx.x + comb]; } comb = next; __syncthreads(); } float sum = buf[0]; __syncthreads(); return sum; } // kernel template <int N=6, int M=16, int MAX_FRAME_UNIT=32, int MAX_NODE_UNIT=8> __global__ void kernal_fp32_MicroMlp_Backward ( float const *x_buf, float const *dy_buf, float *dx_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float *hidden_dW, float *hidden_db, float const *output_W, float const *output_b, float *output_dW, float *output_db, int node_size, int frame_size, int frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; __shared__ float sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT]; __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float dW0_prev[M][N][MAX_NODE_UNIT]; __shared__ float db0_prev[M][MAX_NODE_UNIT]; __shared__ float dW1_prev[M][MAX_NODE_UNIT]; __shared__ float db1_prev[MAX_NODE_UNIT]; __shared__ float const *x_ptr[N][MAX_NODE_UNIT]; float const *dy_ptr; if ( node < node_size ) { // 係数読み込み for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } // 直前の係数読み込み for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { dW0_prev[i][j][node_id] = hidden_dW[(node * M + i) * N + j]; } db0_prev[i][node_id] = hidden_db[node * M + i]; dW1_prev[i][node_id] = output_dW[node * M + i]; } if ( id == 0 ) { db1_prev[node_id] = output_db[node]; } // ポインタ読み込み for ( int i = 0; i < N; ++i ) { int input_node = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[frame_stride * input_node]; } dy_ptr = &dy_buf[frame_stride * node]; } __syncthreads(); // 勾配初期化 float dW0[M][N]; float db0[M]; float dW1[M]; float db1; for ( int i = 0; i < M; ++ i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = 0; } } for ( int i = 0; i < M; ++i ) { db0[i] = 0; dW1[i] = 0; } db1 = 0; if ( node < node_size ) { // 1つのSMで1nodeを全フレーム処理 for ( int frame = id; frame < frame_size; frame += id_step ) { // 入力データ読み込み float x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][frame]; } // 1段目再計算して2段目逆伝播 float grad1 = dy_ptr[frame]; float grad0[M]; db1 += grad1; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { sig0 += x[j] * W0[i][j][node_id]; } sig0 = fmaxf(sig0, 0); // ReLU dW1[i] += grad1 * sig0; if ( sig0 > 0 ) { // ReLU grad0[i] = grad1 * W1[i][node_id]; } else { grad0[i] = 0; } } // 1段目逆伝播 float *dx_ptr = &dx_buf[frame_stride * N * node]; float dx[N]; for ( int i = 0; i < N; ++i ) { dx[i] = 0; // dx_ptr[frame_stride * i + frame]; } for ( int i = 0; i < M; ++i ) { db0[i] += grad0[i]; for ( int j = 0; j < N; ++j ) { dW0[i][j] += grad0[i] * x[j]; dx[j] += grad0[i] * W0[i][j][node_id]; } } // 誤差書き込み for ( int i = 0; i < N; ++i ) { dx_ptr[frame_stride * i + frame] = dx[i]; } } } __syncthreads(); // 係数統合 for ( int i = 0; i < M; ++i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = device_fp32_LocalSum(dW0[i][j], sbuf[node_id]); } db0[i] = device_fp32_LocalSum(db0[i], sbuf[node_id]); dW1[i] = device_fp32_LocalSum(dW1[i], sbuf[node_id]); } db1 = device_fp32_LocalSum(db1, sbuf[node_id]); // 勾配出力 if ( node < node_size ) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { hidden_dW[(node * M + i) * N + j] = dW0[i][j] + dW0_prev[i][j][node_id]; } hidden_db[node * M + i] = db0[i] + db0_prev[i][node_id]; output_dW[node * M + i] = dW1[i] + dW1_prev[i][node_id]; } if (id == 0) { output_db[node] = db1 + db1_prev[node_id]; } } __syncthreads(); } template <int N=6> __global__ void kernal_fp32_MicroMlp_BackwardMarge ( float const *src_buf, float *dst_buf, int const *input_index, int node_size, int frame_size, int frame_stride ) { int frame = blockDim.x * blockIdx.x + threadIdx.x; for ( int node = 0; node < node_size; ++node ) { if ( frame < frame_size ) { for ( int n = 0; n < N; ++n ) { int in_idx = input_index[node*N + n]; float* dst_buf_ptr = &dst_buf[frame_stride * in_idx]; float prev_data = dst_buf_ptr[frame]; const float* src_buf_ptr = &src_buf[(N * node + n) * frame_stride]; dst_buf_ptr[frame] = prev_data + src_buf_ptr[frame]; } } __syncthreads(); } } template <int N=6, int M=16> int bbcu_fp32_MicroMlp_Backward ( float const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int frame_stride, cudaStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); { unsigned int const THREAD_SIZE = 256; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = std::min(block.x, MAX_FRAME_UNIT); block.y = std::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); kernal_fp32_MicroMlp_Backward<N, M, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>> ( dev_x_buf, dev_dy_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } { BB_CUDA_SAFE_CALL(cudaMemset(dev_dx_buf, 0, input_node_size * frame_stride * sizeof(float))); int block_x = 1024; while ( block_x / 2 >= frame_size ) { block_x /= 2; } dim3 grid((frame_size + (block_x - 1)) / block_x); dim3 block(block_x); kernal_fp32_MicroMlp_BackwardMarge<N><<<grid, block>>> ( dev_dx_tmp, dev_dx_buf, dev_input_index, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } return 0; } BBCU_DLL_EXPORT int bbcu_fp32_MicroMlp6x16_Backward( float const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { return bbcu_fp32_MicroMlp_Backward<6, 16>( dev_x_buf, dev_dy_buf, dev_dx_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, input_node_size, output_node_size, frame_size, frame_stride, streamId ); } /////////////////////////////// // kernel template <int N=6, int M=16, int MAX_FRAME_UNIT=32, int MAX_NODE_UNIT=8> __global__ void kernal_bit_fp32_MicroMlp_Backward ( int const *x_buf, float const *dy_buf, float *dx_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float *hidden_dW, float *hidden_db, float const *output_W, float const *output_b, float *output_dW, float *output_db, int node_size, int frame_size, int x_frame_stride, int frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; __shared__ float sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT]; __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float dW0_prev[M][N][MAX_NODE_UNIT]; __shared__ float db0_prev[M][MAX_NODE_UNIT]; __shared__ float dW1_prev[M][MAX_NODE_UNIT]; __shared__ float db1_prev[MAX_NODE_UNIT]; __shared__ int const *x_ptr[N][MAX_NODE_UNIT]; float const *dy_ptr; if ( node < node_size ) { // 係数読み込み for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } // 直前の係数読み込み for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { dW0_prev[i][j][node_id] = hidden_dW[(node * M + i) * N + j]; } db0_prev[i][node_id] = hidden_db[node * M + i]; dW1_prev[i][node_id] = output_dW[node * M + i]; } if ( id == 0 ) { db1_prev[node_id] = output_db[node]; } // ポインタ読み込み for ( int i = 0; i < N; ++i ) { int input_node = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[x_frame_stride * input_node]; } dy_ptr = &dy_buf[frame_stride * node]; } __syncthreads(); // 勾配初期化 float dW0[M][N]; float db0[M]; float dW1[M]; float db1; for ( int i = 0; i < M; ++ i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = 0; } } for ( int i = 0; i < M; ++i ) { db0[i] = 0; dW1[i] = 0; } db1 = 0; if ( node < node_size ) { // 1つのSMで1nodeを全フレーム処理 for ( int frame = id; frame < frame_size; frame += id_step ) { int bit = (1 << (frame & 0x1f)); int unit = (frame >> 5); // 入力データ読み込み int x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][unit]; } // 1段目再計算して2段目逆伝播 float grad1 = dy_ptr[frame]; float grad0[M]; db1 += grad1; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { if ( x[j] & bit ) { sig0 += W0[i][j][node_id]; } } sig0 = fmaxf(sig0, 0); // ReLU dW1[i] += grad1 * sig0; if ( sig0 > 0 ) { // ReLU grad0[i] = grad1 * W1[i][node_id]; } else { grad0[i] = 0; } } // 1段目逆伝播 float *dx_ptr = &dx_buf[frame_stride * N * node]; float dx[N]; for ( int i = 0; i < N; ++i ) { dx[i] = 0; // dx_ptr[frame_stride * i + frame]; } for ( int i = 0; i < M; ++i ) { db0[i] += grad0[i]; for ( int j = 0; j < N; ++j ) { if ( x[j] & bit ) { dW0[i][j] += grad0[i]; } dx[j] += grad0[i] * W0[i][j][node_id]; } } // 誤差書き込み for ( int i = 0; i < N; ++i ) { dx_ptr[frame_stride * i + frame] = dx[i]; } } } __syncthreads(); // 係数統合 for ( int i = 0; i < M; ++i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = device_fp32_LocalSum(dW0[i][j], sbuf[node_id]); } db0[i] = device_fp32_LocalSum(db0[i], sbuf[node_id]); dW1[i] = device_fp32_LocalSum(dW1[i], sbuf[node_id]); } db1 = device_fp32_LocalSum(db1, sbuf[node_id]); // 勾配出力 if ( node < node_size ) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { hidden_dW[(node * M + i) * N + j] = dW0[i][j] + dW0_prev[i][j][node_id]; } hidden_db[node * M + i] = db0[i] + db0_prev[i][node_id]; output_dW[node * M + i] = dW1[i] + dW1_prev[i][node_id]; } if (id == 0) { output_db[node] = db1 + db1_prev[node_id]; } } __syncthreads(); } template <int N=6, int M=16> int bbcu_bit_fp32_MicroMlp_Backward ( int const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int x_frame_stride, int frame_stride, cudaStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); { unsigned int const THREAD_SIZE = 256; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = std::min(block.x, MAX_FRAME_UNIT); block.y = std::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); kernal_bit_fp32_MicroMlp_Backward<N, M, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>> ( dev_x_buf, dev_dy_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, output_node_size, frame_size, x_frame_stride, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } { BB_CUDA_SAFE_CALL(cudaMemset(dev_dx_buf, 0, input_node_size * frame_stride * sizeof(float))); int block_x = 1024; while ( block_x / 2 >= frame_size ) { block_x /= 2; } dim3 grid((frame_size + (block_x - 1)) / block_x); dim3 block(block_x); kernal_fp32_MicroMlp_BackwardMarge<N><<<grid, block>>> ( dev_dx_tmp, dev_dx_buf, dev_input_index, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } return 0; } BBCU_DLL_EXPORT int bbcu_bit_fp32_MicroMlp6x16_Backward ( int const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int x_frame_stride, int frame_stride, cudaStream_t streamId ) { return bbcu_bit_fp32_MicroMlp_Backward<6, 16> ( dev_x_buf, dev_dy_buf, dev_dx_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, input_node_size, output_node_size, frame_size, x_frame_stride, frame_stride, streamId ); } // end of file
f7df5c807940ad70937c46d12631bc17953ef483.hip
// !!! This is a file automatically generated by hipify!!! // Using CUDA device to calculate pi #include <stdio.h> #include <hip/hip_runtime.h> #define NBIN 10000000 // Number of bins #define NUM_BLOCK 30 // Number of thread blocks #define NUM_THREAD 8 // Number of threads per block int tid; float pi = 0; // Kernel that executes on the CUDA device __global__ void cal_pi(float *sum, int nbin, float step, int nthreads, int nblocks) { int i; float x; int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks for (i=idx; i< nbin; i+=nthreads*nblocks) { x = (i+0.5)*step; sum[idx] += 4.0/(1.0+x*x); } } // Main routine that executes on the host int main(void) { dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions float *sumHost, *sumDev; // Pointer to host & device arrays float step = 1.0/NBIN; // Step size size_t size = NUM_BLOCK*NUM_THREAD*sizeof(float); //Array memory size sumHost = (float *)malloc(size); // Allocate array on host hipMalloc((void **) &sumDev, size); // Allocate array on device // Initialize array in device to 0 hipMemset(sumDev, 0, size); // Do calculation on device hipLaunchKernelGGL(( cal_pi) , dim3(dimGrid), dim3(dimBlock), 0, 0, sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK); // call CUDA kernel // Retrieve result from device and store it in host array hipMemcpy(sumHost, sumDev, size, hipMemcpyDeviceToHost); for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++) pi += sumHost[tid]; pi *= step; // Print results printf("PI = %f\n",pi); // Cleanup free(sumHost); hipFree(sumDev); return 0; }
f7df5c807940ad70937c46d12631bc17953ef483.cu
// Using CUDA device to calculate pi #include <stdio.h> #include <cuda.h> #define NBIN 10000000 // Number of bins #define NUM_BLOCK 30 // Number of thread blocks #define NUM_THREAD 8 // Number of threads per block int tid; float pi = 0; // Kernel that executes on the CUDA device __global__ void cal_pi(float *sum, int nbin, float step, int nthreads, int nblocks) { int i; float x; int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks for (i=idx; i< nbin; i+=nthreads*nblocks) { x = (i+0.5)*step; sum[idx] += 4.0/(1.0+x*x); } } // Main routine that executes on the host int main(void) { dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions float *sumHost, *sumDev; // Pointer to host & device arrays float step = 1.0/NBIN; // Step size size_t size = NUM_BLOCK*NUM_THREAD*sizeof(float); //Array memory size sumHost = (float *)malloc(size); // Allocate array on host cudaMalloc((void **) &sumDev, size); // Allocate array on device // Initialize array in device to 0 cudaMemset(sumDev, 0, size); // Do calculation on device cal_pi <<<dimGrid, dimBlock>>> (sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK); // call CUDA kernel // Retrieve result from device and store it in host array cudaMemcpy(sumHost, sumDev, size, cudaMemcpyDeviceToHost); for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++) pi += sumHost[tid]; pi *= step; // Print results printf("PI = %f\n",pi); // Cleanup free(sumHost); cudaFree(sumDev); return 0; }
62a5bf7224529fea43e0cd085e4d8408000b5746.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void mult_matrix_kernel_simple(float *A, float *B, float *C, unsigned int N, unsigned int L, unsigned int M) { // unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; // unsigned int row = by * blockDim.y + ty; unsigned int col = bx * blockDim.x + tx; if (row < N && col < M) { float sum = 0.0f; for (unsigned int i = 0; i < L; ++i) sum += A[row*L + i]*B[i*M + col]; C[row*M + col] = sum; } } __global__ void mult_matrix_kernel_shared(float *A, float *B, float *C, unsigned int N, unsigned int L, unsigned int M) { // __shared__ float ds_A[16][16]; __shared__ float ds_B[16][16]; // unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; // unsigned int row = by * blockDim.y + ty; unsigned int col = bx * blockDim.x + tx; float sum = 0; unsigned int nCycles = (L - 1)/16 + 1; for (int i = 0; i < nCycles; ++i) { // A if (row < N && i*blockDim.x + tx < L) ds_A[ty][tx] = A[row*N + i*blockDim.x + tx]; else ds_A[ty][tx] = 0; // B if (i*blockDim.x + ty < L && col < M) ds_B[ty][tx] = B[(i*blockDim.x + ty)*M + col]; else ds_B[ty][tx] = 0; __syncthreads(); // for (int j = 0; j < blockDim.x; ++j) sum += ds_A[ty][j] * ds_B[j][tx]; __syncthreads(); } // if (row < N && col < M) C[row*M + col] = sum; } extern "C" void multMatrixSimple(float *A, float *B, float *C, unsigned int N, unsigned int L, unsigned int M) { dim3 BlockDim(16, 16, 1); dim3 GridDim((N - 1)/BlockDim.x + 1, (M - 1)/BlockDim.y + 1, 1); hipLaunchKernelGGL(( mult_matrix_kernel_simple), dim3(GridDim), dim3(BlockDim), 0, 0, A, B, C, N, L, M); } extern "C" void multMatrixShared(float *A, float *B, float *C, unsigned int N, unsigned int L, unsigned int M) { dim3 BlockDim(16, 16, 1); dim3 GridDim((N - 1)/BlockDim.x + 1, (M - 1)/BlockDim.y + 1, 1); hipLaunchKernelGGL(( mult_matrix_kernel_shared), dim3(GridDim), dim3(BlockDim), 0, 0, A, B, C, N, L, M); }
62a5bf7224529fea43e0cd085e4d8408000b5746.cu
__global__ void mult_matrix_kernel_simple(float *A, float *B, float *C, unsigned int N, unsigned int L, unsigned int M) { // определяем место потока в массиве unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; // определяем рассчетный элемент матрицы unsigned int row = by * blockDim.y + ty; unsigned int col = bx * blockDim.x + tx; if (row < N && col < M) { float sum = 0.0f; for (unsigned int i = 0; i < L; ++i) sum += A[row*L + i]*B[i*M + col]; C[row*M + col] = sum; } } __global__ void mult_matrix_kernel_shared(float *A, float *B, float *C, unsigned int N, unsigned int L, unsigned int M) { // разделяемая память для хранения блока элементов исходных матриц __shared__ float ds_A[16][16]; __shared__ float ds_B[16][16]; // определяем место потока в массиве unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; // определяем рассчетный элемент матрицы unsigned int row = by * blockDim.y + ty; unsigned int col = bx * blockDim.x + tx; float sum = 0; unsigned int nCycles = (L - 1)/16 + 1; for (int i = 0; i < nCycles; ++i) { // копируем элемент исходной матрицы A if (row < N && i*blockDim.x + tx < L) ds_A[ty][tx] = A[row*N + i*blockDim.x + tx]; else ds_A[ty][tx] = 0; // копируем элемент исходной матрицы B if (i*blockDim.x + ty < L && col < M) ds_B[ty][tx] = B[(i*blockDim.x + ty)*M + col]; else ds_B[ty][tx] = 0; __syncthreads(); // накопление частичной суммы for (int j = 0; j < blockDim.x; ++j) sum += ds_A[ty][j] * ds_B[j][tx]; __syncthreads(); } // запись элемента результирующей матрицы if (row < N && col < M) C[row*M + col] = sum; } extern "C" void multMatrixSimple(float *A, float *B, float *C, unsigned int N, unsigned int L, unsigned int M) { dim3 BlockDim(16, 16, 1); dim3 GridDim((N - 1)/BlockDim.x + 1, (M - 1)/BlockDim.y + 1, 1); mult_matrix_kernel_simple<<<GridDim, BlockDim>>>(A, B, C, N, L, M); } extern "C" void multMatrixShared(float *A, float *B, float *C, unsigned int N, unsigned int L, unsigned int M) { dim3 BlockDim(16, 16, 1); dim3 GridDim((N - 1)/BlockDim.x + 1, (M - 1)/BlockDim.y + 1, 1); mult_matrix_kernel_shared<<<GridDim, BlockDim>>>(A, B, C, N, L, M); }